prithivMLmods commited on
Commit
9e4971c
·
verified ·
1 Parent(s): 4a5d7ca

update app

Browse files
Files changed (1) hide show
  1. app.py +928 -0
app.py ADDED
@@ -0,0 +1,928 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import shutil
4
+ import time
5
+ import uuid
6
+ import unicodedata
7
+ from io import BytesIO
8
+ from threading import Timer
9
+ from typing import Any, Dict, List, Optional
10
+ from datetime import datetime
11
+
12
+ import gradio as gr
13
+ import torch
14
+ from dotenv import load_dotenv
15
+ from e2b_desktop import Sandbox
16
+ from gradio_modal import Modal
17
+ from huggingface_hub import login, upload_folder
18
+ from PIL import Image, ImageDraw
19
+
20
+ # Smolagents imports
21
+ from smolagents import CodeAgent, tool, AgentImage
22
+ from smolagents.memory import ActionStep, TaskStep
23
+ from smolagents.models import ChatMessage, Model, MessageRole
24
+ from smolagents.gradio_ui import GradioUI, stream_to_gradio
25
+ from smolagents.monitoring import LogLevel
26
+
27
+ # Transformers for Fara Model
28
+ from transformers import (
29
+ Qwen2_5_VLForConditionalGeneration,
30
+ AutoProcessor,
31
+ )
32
+ from qwen_vl_utils import process_vision_info
33
+
34
+ load_dotenv(override=True)
35
+
36
+ # -----------------------------------------------------------------------------
37
+ # CONFIGURATION & CONSTANTS
38
+ # -----------------------------------------------------------------------------
39
+
40
+ E2B_API_KEY = os.getenv("E2B_API_KEY")
41
+ HF_TOKEN = os.getenv("HF_TOKEN") or os.getenv("HUGGINGFACE_API_KEY")
42
+ if HF_TOKEN:
43
+ login(token=HF_TOKEN)
44
+
45
+ SANDBOXES = {}
46
+ SANDBOX_METADATA = {}
47
+ SANDBOX_TIMEOUT = 600
48
+ WIDTH = 1024
49
+ HEIGHT = 768
50
+ TMP_DIR = "./tmp/"
51
+ if not os.path.exists(TMP_DIR):
52
+ os.makedirs(TMP_DIR)
53
+
54
+ # -----------------------------------------------------------------------------
55
+ # MODEL INITIALIZATION (Fara-7B / Qwen2.5-VL)
56
+ # -----------------------------------------------------------------------------
57
+
58
+ print("Loading Fara Model... This may take a moment.")
59
+ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
60
+ MODEL_ID_F = "microsoft/Fara-7B" # Ensure this ID is accessible or point to local path
61
+
62
+ try:
63
+ processor_f = AutoProcessor.from_pretrained(MODEL_ID_F, trust_remote_code=True)
64
+ model_f = Qwen2_5_VLForConditionalGeneration.from_pretrained(
65
+ MODEL_ID_F,
66
+ trust_remote_code=True,
67
+ torch_dtype=torch.bfloat16 if DEVICE == "cuda" else torch.float32,
68
+ device_map="auto" if DEVICE == "cuda" else None,
69
+ )
70
+ if DEVICE == "cpu":
71
+ model_f.to(DEVICE)
72
+
73
+ model_f.eval()
74
+ print(f"Fara Model loaded successfully on {DEVICE}")
75
+ except Exception as e:
76
+ print(f"Error loading Fara Model: {e}")
77
+ print("Please ensure you have access to the model and enough GPU memory.")
78
+ # Fallback to prevent crash during import, though app won't work without model
79
+ model_f = None
80
+ processor_f = None
81
+
82
+ class FaraLocalModel(Model):
83
+ """
84
+ Wrapper for the local Fara (Qwen2.5-VL) model to work with SmolAgents.
85
+ """
86
+ def __init__(self, model, processor, **kwargs):
87
+ super().__init__(**kwargs)
88
+ self.model = model
89
+ self.processor = processor
90
+
91
+ def __call__(
92
+ self,
93
+ messages: List[Dict[str, Any]],
94
+ stop_sequences: Optional[List[str]] = None,
95
+ **kwargs,
96
+ ) -> ChatMessage:
97
+ if self.model is None:
98
+ raise ValueError("Fara Model is not loaded.")
99
+
100
+ # Convert SmolAgents messages to Qwen/Transformers format
101
+ # SmolAgents uses a specific dict structure for content.
102
+ # We need to normalize it for process_vision_info / apply_chat_template
103
+
104
+ formatted_messages = []
105
+
106
+ for msg in messages:
107
+ role = msg["role"]
108
+ content = msg["content"]
109
+
110
+ new_content = []
111
+
112
+ if isinstance(content, str):
113
+ new_content.append({"type": "text", "text": content})
114
+ elif isinstance(content, list):
115
+ for item in content:
116
+ if isinstance(item, str):
117
+ new_content.append({"type": "text", "text": item})
118
+ elif isinstance(item, dict):
119
+ if "type" in item:
120
+ if item["type"] == "image":
121
+ # Handle path or url
122
+ val = item.get("image") or item.get("url") or item.get("path")
123
+ new_content.append({"type": "image", "image": val})
124
+ else:
125
+ new_content.append(item)
126
+
127
+ formatted_messages.append({"role": role, "content": new_content})
128
+
129
+ # Process Inputs
130
+ text = self.processor.apply_chat_template(
131
+ formatted_messages, tokenize=False, add_generation_prompt=True
132
+ )
133
+
134
+ image_inputs, video_inputs = process_vision_info(formatted_messages)
135
+
136
+ inputs = self.processor(
137
+ text=[text],
138
+ images=image_inputs,
139
+ videos=video_inputs,
140
+ padding=True,
141
+ return_tensors="pt",
142
+ )
143
+
144
+ inputs = inputs.to(self.model.device)
145
+
146
+ # Generate
147
+ with torch.no_grad():
148
+ generated_ids = self.model.generate(
149
+ **inputs,
150
+ max_new_tokens=kwargs.get("max_tokens", 1024),
151
+ stop_strings=stop_sequences,
152
+ tokenizer=self.processor.tokenizer, # Specific for stop_strings in modern transformers
153
+ )
154
+
155
+ # Decode
156
+ generated_ids_trimmed = [
157
+ out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
158
+ ]
159
+ output_text = self.processor.batch_decode(
160
+ generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
161
+ )[0]
162
+
163
+ return ChatMessage(
164
+ role=MessageRole.ASSISTANT,
165
+ content=output_text,
166
+ )
167
+
168
+ # -----------------------------------------------------------------------------
169
+ # E2B AGENT & TOOLS
170
+ # -----------------------------------------------------------------------------
171
+
172
+ E2B_SYSTEM_PROMPT_TEMPLATE = """You are a desktop automation assistant that can control a remote desktop environment. The current date is <<current_date>>.
173
+
174
+ <action process>
175
+ You will be given a task to solve in several steps. At each step you will perform an action.
176
+ After each action, you'll receive an updated screenshot.
177
+ Then you will proceed as follows, with these sections: don't skip any!
178
+
179
+ Short term goal: ...
180
+ What I see: ...
181
+ Reflection: ...
182
+ Action:
183
+ ```python
184
+ click(254, 308)
185
+ ```<end_code>
186
+
187
+ Akways format your action ('Action:' part) as Python code blocks as shown above.
188
+ </action_process>
189
+
190
+ <tools>
191
+ On top of performing computations in the Python code snippets that you create, you only have access to these tools to interact with the desktop, no additional ones:
192
+ {%- for tool in tools.values() %}
193
+ - {{ tool.name }}: {{ tool.description }}
194
+ Takes inputs: {{tool.inputs}}
195
+ Returns an output of type: {{tool.output_type}}
196
+ {%- endfor %}
197
+ </tools>
198
+
199
+ <click_guidelines>
200
+ Look at elements on the screen to determine what to click or interact with.
201
+ The desktop has a resolution of <<resolution_x>>x<<resolution_y>> pixels, take it into account to decide clicking coordinates. NEVER USE HYPOTHETIC OR ASSUMED COORDINATES, USE TRUE COORDINATES that you can see from the screenshot.
202
+ Use precise coordinates based on the current screenshot for mouse movements and clicks.
203
+ Whenever you click, MAKE SURE to click in the middle of the button, text, link or any other clickable element. Not under, not on the side. IN THE MIDDLE, else you risk to miss it.
204
+ In menus it is always better to click in the middle of the text rather than in the tiny icon. Calculate extremelly well the coordinates. A mistake here can make the full task fail.
205
+ Sometimes you may have missed a click, so never assume that you're on the right page, always make sure that your previous action worked.
206
+ In the screenshot you will see a green crosshair displayed over the position of your last click: this way can inspect if the mouse pointer is off of the targeted element, pay special attention to it.
207
+ </click_guidelines>
208
+
209
+ <general_guidelines>
210
+ Always analyze the latest screenshot carefully before performing actions.
211
+ You can wait for appropriate loading times using the wait() tool. But don't wait forever, sometimes you've just misclicked and the process didn't launch.
212
+ Execute one action at a time: don't try to pack a click and typing in one action.
213
+ On each step, look at the last screenshot and action to validate if previous steps worked and decide the next action. If you repeated an action already without effect, it means that this action is useless: don't repeat it and try something else.
214
+ Use click to move through menus on the desktop and scroll for web and specific applications.
215
+ Always analyze the latest screenshot carefully before performing actions.
216
+ Desktop menus usually expand with more options, the tiny triangle next to some text in a menu means that menu expands. For example in Office in the Applications menu expands showing presentation or writing applications.
217
+ NEVER CLICK THE WEB BROWSER ICON TO OPEN THE WEB BROWSER: use open_url directly.
218
+ In browser, ignore any sign-in popups while they don't interfere with the elements you want to interact with.
219
+ </general_guidelines>
220
+ """.replace("<<current_date>>", datetime.now().strftime("%A, %d-%B-%Y"))
221
+
222
+
223
+ def draw_marker_on_image(image_copy, click_coordinates):
224
+ x, y = click_coordinates
225
+ draw = ImageDraw.Draw(image_copy)
226
+ cross_size, linewidth = 10, 3
227
+ # Draw cross
228
+ draw.line((x - cross_size, y, x + cross_size, y), fill="green", width=linewidth)
229
+ draw.line((x, y - cross_size, x, y + cross_size), fill="green", width=linewidth)
230
+ # Add a circle around it for better visibility
231
+ draw.ellipse(
232
+ (
233
+ x - cross_size * 2,
234
+ y - cross_size * 2,
235
+ x + cross_size * 2,
236
+ y + cross_size * 2,
237
+ ),
238
+ outline="green",
239
+ width=linewidth,
240
+ )
241
+ return image_copy
242
+
243
+ def get_agent_summary_erase_images(agent):
244
+ for memory_step in agent.memory.steps:
245
+ if hasattr(memory_step, "observations_images"):
246
+ memory_step.observations_images = None
247
+ if hasattr(memory_step, "task_images"):
248
+ memory_step.task_images = None
249
+ return agent.write_memory_to_messages()
250
+
251
+
252
+ class E2BVisionAgent(CodeAgent):
253
+ """Agent for e2b desktop automation with Vision capabilities"""
254
+
255
+ def __init__(
256
+ self,
257
+ model: Model,
258
+ data_dir: str,
259
+ desktop: Sandbox,
260
+ tools: List[tool] = None,
261
+ max_steps: int = 200,
262
+ verbosity_level: LogLevel = 2,
263
+ planning_interval: int = None,
264
+ use_v1_prompt: bool = False,
265
+ **kwargs,
266
+ ):
267
+ self.desktop = desktop
268
+ self.data_dir = data_dir
269
+ self.planning_interval = planning_interval
270
+ # Initialize Desktop
271
+ self.width, self.height = self.desktop.get_screen_size()
272
+ print(f"Screen size: {self.width}x{self.height}")
273
+
274
+ # Set up temp directory
275
+ os.makedirs(self.data_dir, exist_ok=True)
276
+ print(f"Screenshots and steps will be saved to: {self.data_dir}")
277
+
278
+ self.use_v1_prompt = use_v1_prompt
279
+ # Initialize base agent
280
+ super().__init__(
281
+ tools=tools or [],
282
+ model=model,
283
+ max_steps=max_steps,
284
+ verbosity_level=verbosity_level,
285
+ planning_interval=self.planning_interval,
286
+ **kwargs,
287
+ )
288
+ self.prompt_templates["system_prompt"] = E2B_SYSTEM_PROMPT_TEMPLATE.replace(
289
+ "<<resolution_x>>", str(self.width)
290
+ ).replace("<<resolution_y>>", str(self.height))
291
+
292
+ # Add screen info to state
293
+ self.state["screen_width"] = self.width
294
+ self.state["screen_height"] = self.height
295
+
296
+ # Add default tools
297
+ self.logger.log("Setting up agent tools...")
298
+ self._setup_desktop_tools()
299
+ self.step_callbacks.append(self.take_screenshot_callback)
300
+
301
+ def _setup_desktop_tools(self):
302
+ """Register all desktop tools"""
303
+
304
+ @tool
305
+ def click(x: int, y: int) -> str:
306
+ """
307
+ Performs a left-click at the specified coordinates
308
+ Args:
309
+ x: The x coordinate (horizontal position)
310
+ y: The y coordinate (vertical position)
311
+ """
312
+ self.desktop.move_mouse(x, y)
313
+ self.desktop.left_click()
314
+ self.click_coordinates = [x, y]
315
+ self.logger.log(f"Clicked at coordinates ({x}, {y})")
316
+ return f"Clicked at coordinates ({x}, {y})"
317
+
318
+ @tool
319
+ def right_click(x: int, y: int) -> str:
320
+ """
321
+ Performs a right-click at the specified coordinates
322
+ Args:
323
+ x: The x coordinate (horizontal position)
324
+ y: The y coordinate (vertical position)
325
+ """
326
+ self.desktop.move_mouse(x, y)
327
+ self.desktop.right_click()
328
+ self.click_coordinates = [x, y]
329
+ self.logger.log(f"Right-clicked at coordinates ({x}, {y})")
330
+ return f"Right-clicked at coordinates ({x}, {y})"
331
+
332
+ @tool
333
+ def double_click(x: int, y: int) -> str:
334
+ """
335
+ Performs a double-click at the specified coordinates
336
+ Args:
337
+ x: The x coordinate (horizontal position)
338
+ y: The y coordinate (vertical position)
339
+ """
340
+ self.desktop.move_mouse(x, y)
341
+ self.desktop.double_click()
342
+ self.click_coordinates = [x, y]
343
+ self.logger.log(f"Double-clicked at coordinates ({x}, {y})")
344
+ return f"Double-clicked at coordinates ({x}, {y})"
345
+
346
+ @tool
347
+ def move_mouse(x: int, y: int) -> str:
348
+ """
349
+ Moves the mouse cursor to the specified coordinates
350
+ Args:
351
+ x: The x coordinate (horizontal position)
352
+ y: The y coordinate (vertical position)
353
+ """
354
+ self.desktop.move_mouse(x, y)
355
+ self.logger.log(f"Moved mouse to coordinates ({x}, {y})")
356
+ return f"Moved mouse to coordinates ({x}, {y})"
357
+
358
+ def normalize_text(text):
359
+ return "".join(
360
+ c
361
+ for c in unicodedata.normalize("NFD", text)
362
+ if not unicodedata.combining(c)
363
+ )
364
+
365
+ @tool
366
+ def type_text(text: str) -> str:
367
+ """
368
+ Types the specified text at the current cursor position.
369
+ Args:
370
+ text: The text to type
371
+ """
372
+ clean_text = normalize_text(text)
373
+ self.desktop.write(clean_text, delay_in_ms=75)
374
+ self.logger.log(f"Typed text: '{clean_text}'")
375
+ return f"Typed text: '{clean_text}'"
376
+
377
+ @tool
378
+ def press_key(key: str) -> str:
379
+ """
380
+ Presses a keyboard key
381
+ Args:
382
+ key: The key to press (e.g. "enter", "space", "backspace", etc.).
383
+ """
384
+ self.desktop.press(key)
385
+ self.logger.log(f"Pressed key: {key}")
386
+ return f"Pressed key: {key}"
387
+
388
+ @tool
389
+ def go_back() -> str:
390
+ """
391
+ Goes back to the previous page in the browser.
392
+ """
393
+ self.desktop.press(["alt", "left"])
394
+ self.logger.log("Went back one page")
395
+ return "Went back one page"
396
+
397
+ @tool
398
+ def drag_and_drop(x1: int, y1: int, x2: int, y2: int) -> str:
399
+ """
400
+ Clicks [x1, y1], drags mouse to [x2, y2], then release click.
401
+ """
402
+ self.desktop.drag([x1, y1], [x2, y2])
403
+ message = f"Dragged and dropped from [{x1}, {y1}] to [{x2}, {y2}]"
404
+ self.logger.log(message)
405
+ return message
406
+
407
+ @tool
408
+ def scroll(x: int, y: int, direction: str = "down", amount: int = 2) -> str:
409
+ """
410
+ Moves the mouse to selected coordinates, then uses the scroll button.
411
+ Args:
412
+ x: The x coordinate
413
+ y: The y coordinate
414
+ direction: "up" or "down"
415
+ amount: The amount to scroll.
416
+ """
417
+ self.desktop.move_mouse(x, y)
418
+ self.desktop.scroll(direction=direction, amount=amount)
419
+ message = f"Scrolled {direction} by {amount}"
420
+ self.logger.log(message)
421
+ return message
422
+
423
+ @tool
424
+ def wait(seconds: float) -> str:
425
+ """
426
+ Waits for the specified number of seconds.
427
+ """
428
+ time.sleep(seconds)
429
+ self.logger.log(f"Waited for {seconds} seconds")
430
+ return f"Waited for {seconds} seconds"
431
+
432
+ @tool
433
+ def open_url(url: str) -> str:
434
+ """
435
+ Directly opens a browser with the specified url.
436
+ """
437
+ if not url.startswith(("http://", "https://")):
438
+ url = "https://" + url
439
+
440
+ self.desktop.open(url)
441
+ time.sleep(2)
442
+ self.logger.log(f"Opening URL: {url}")
443
+ return f"Opened URL: {url}"
444
+
445
+ @tool
446
+ def find_on_page_ctrl_f(search_string: str) -> str:
447
+ """
448
+ Scroll the browser viewport to the first occurrence of the search string (Ctrl+F).
449
+ """
450
+ self.desktop.press(["ctrl", "f"])
451
+ time.sleep(0.3)
452
+ clean_text = normalize_text(search_string)
453
+ self.desktop.write(clean_text, delay_in_ms=75)
454
+ time.sleep(0.3)
455
+ self.desktop.press("enter")
456
+ time.sleep(0.3)
457
+ self.desktop.press("esc")
458
+ output_message = f"Scrolled to the first occurrence of '{clean_text}'"
459
+ self.logger.log(output_message)
460
+ return output_message
461
+
462
+ # Register the tools
463
+ self.tools["click"] = click
464
+ self.tools["right_click"] = right_click
465
+ self.tools["double_click"] = double_click
466
+ self.tools["move_mouse"] = move_mouse
467
+ self.tools["type_text"] = type_text
468
+ self.tools["press_key"] = press_key
469
+ self.tools["scroll"] = scroll
470
+ self.tools["wait"] = wait
471
+ self.tools["open_url"] = open_url
472
+ self.tools["go_back"] = go_back
473
+ self.tools["drag_and_drop"] = drag_and_drop
474
+ self.tools["find_on_page_ctrl_f"] = find_on_page_ctrl_f
475
+
476
+ def take_screenshot_callback(self, memory_step: ActionStep, agent=None) -> None:
477
+ """Callback that takes a screenshot + memory snapshot after a step completes"""
478
+ self.logger.log("Analyzing screen content...")
479
+
480
+ current_step = memory_step.step_number
481
+
482
+ time.sleep(2.5) # Let things happen on the desktop
483
+ screenshot_bytes = self.desktop.screenshot(format="bytes")
484
+ image = Image.open(BytesIO(screenshot_bytes))
485
+
486
+ # Create a filename with step number
487
+ screenshot_path = os.path.join(self.data_dir, f"step_{current_step:03d}.png")
488
+ image.save(screenshot_path)
489
+
490
+ image_copy = image.copy()
491
+
492
+ if getattr(self, "click_coordinates", None):
493
+ image_copy = draw_marker_on_image(image_copy, self.click_coordinates)
494
+
495
+ self.last_marked_screenshot = AgentImage(screenshot_path)
496
+ print(f"Saved screenshot for step {current_step} to {screenshot_path}")
497
+
498
+ # Optimization: remove previous raw images from memory to save context/speed
499
+ for previous_memory_step in agent.memory.steps:
500
+ if (
501
+ isinstance(previous_memory_step, ActionStep)
502
+ and previous_memory_step.step_number <= current_step - 1
503
+ ):
504
+ previous_memory_step.observations_images = None
505
+ elif isinstance(previous_memory_step, TaskStep):
506
+ previous_memory_step.task_images = None
507
+
508
+ # Add the marker-edited image to the current memory step
509
+ memory_step.observations_images = [image_copy]
510
+ self.click_coordinates = None # Reset click marker
511
+
512
+
513
+ # -----------------------------------------------------------------------------
514
+ # SANDBOX MANAGEMENT & HELPERS
515
+ # -----------------------------------------------------------------------------
516
+
517
+ def upload_to_hf_and_remove(folder_path):
518
+ repo_id = "smolagents/computer-agent-logs"
519
+ try:
520
+ folder_name = os.path.basename(os.path.normpath(folder_path))
521
+ print(f"Uploading {folder_path} to {repo_id}/{folder_name}...")
522
+ url = upload_folder(
523
+ folder_path=folder_path,
524
+ repo_id=repo_id,
525
+ repo_type="dataset",
526
+ path_in_repo=folder_name,
527
+ ignore_patterns=[".git/*", ".gitignore"],
528
+ )
529
+ print(f"Upload complete. Removing local folder {folder_path}...")
530
+ shutil.rmtree(folder_path)
531
+ return url
532
+ except Exception as e:
533
+ print(f"Error during upload or cleanup: {str(e)}")
534
+ # Don't raise, just log, to keep app running
535
+ return None
536
+
537
+ def cleanup_sandboxes():
538
+ current_time = time.time()
539
+ sandboxes_to_remove = []
540
+
541
+ for session_id, metadata in SANDBOX_METADATA.items():
542
+ if current_time - metadata["last_accessed"] > SANDBOX_TIMEOUT:
543
+ sandboxes_to_remove.append(session_id)
544
+
545
+ for session_id in sandboxes_to_remove:
546
+ if session_id in SANDBOXES:
547
+ try:
548
+ data_dir = os.path.join(TMP_DIR, session_id)
549
+ if os.path.exists(data_dir):
550
+ shutil.rmtree(data_dir) # Just local cleanup for this demo
551
+
552
+ SANDBOXES[session_id].kill()
553
+ del SANDBOXES[session_id]
554
+ del SANDBOX_METADATA[session_id]
555
+ print(f"Cleaned up sandbox for session {session_id}")
556
+ except Exception as e:
557
+ print(f"Error cleaning up sandbox {session_id}: {str(e)}")
558
+
559
+ def get_or_create_sandbox(session_uuid):
560
+ current_time = time.time()
561
+
562
+ if (
563
+ session_uuid in SANDBOXES
564
+ and session_uuid in SANDBOX_METADATA
565
+ and current_time - SANDBOX_METADATA[session_uuid]["created_at"]
566
+ < SANDBOX_TIMEOUT
567
+ ):
568
+ print(f"Reusing Sandbox for {session_uuid}")
569
+ SANDBOX_METADATA[session_uuid]["last_accessed"] = current_time
570
+ return SANDBOXES[session_uuid]
571
+ else:
572
+ print("No sandbox found, creating a new one")
573
+
574
+ if session_uuid in SANDBOXES:
575
+ try:
576
+ SANDBOXES[session_uuid].kill()
577
+ except Exception:
578
+ pass
579
+
580
+ print(f"Creating new sandbox for session {session_uuid}")
581
+ desktop = Sandbox(
582
+ api_key=E2B_API_KEY,
583
+ resolution=(WIDTH, HEIGHT),
584
+ dpi=96,
585
+ timeout=SANDBOX_TIMEOUT,
586
+ template="k0wmnzir0zuzye6dndlw",
587
+ )
588
+ desktop.stream.start(require_auth=True)
589
+ setup_cmd = """sudo mkdir -p /usr/lib/firefox-esr/distribution && echo '{"policies":{"OverrideFirstRunPage":"","OverridePostUpdatePage":"","DisableProfileImport":true,"DontCheckDefaultBrowser":true}}' | sudo tee /usr/lib/firefox-esr/distribution/policies.json > /dev/null"""
590
+ desktop.commands.run(setup_cmd)
591
+
592
+ SANDBOXES[session_uuid] = desktop
593
+ SANDBOX_METADATA[session_uuid] = {
594
+ "created_at": current_time,
595
+ "last_accessed": current_time,
596
+ }
597
+ return desktop
598
+
599
+ def save_final_status(folder, status: str, summary, error_message=None) -> None:
600
+ try:
601
+ with open(os.path.join(folder, "metadata.json"), "w") as output_file:
602
+ output_file.write(
603
+ json.dumps(
604
+ {"status": status, "summary": summary, "error_message": error_message},
605
+ default=str
606
+ )
607
+ )
608
+ except Exception as e:
609
+ print(f"Failed to save metadata: {e}")
610
+
611
+ def create_agent(data_dir, desktop):
612
+ # Instantiate the local model wrapper
613
+ if model_f is None:
614
+ raise RuntimeError("Fara model was not loaded successfully.")
615
+
616
+ model = FaraLocalModel(model=model_f, processor=processor_f)
617
+
618
+ return E2BVisionAgent(
619
+ model=model,
620
+ data_dir=data_dir,
621
+ desktop=desktop,
622
+ max_steps=200,
623
+ verbosity_level=2,
624
+ use_v1_prompt=True,
625
+ )
626
+
627
+ def generate_interaction_id(session_uuid):
628
+ return f"{session_uuid}_{int(time.time())}"
629
+
630
+ # -----------------------------------------------------------------------------
631
+ # GRADIO UI & INTERACTION
632
+ # -----------------------------------------------------------------------------
633
+
634
+ custom_css = """
635
+ .modal-container { margin: var(--size-16) auto!important; }
636
+ .sandbox-container { position: relative; width: 910px; height: 800px; overflow: hidden; margin: auto; }
637
+ .sandbox-frame { display: none; position: absolute; top: 0; left: 0; width: 910px; height: 800px; pointer-events:none; }
638
+ .sandbox-iframe, .bsod-image { position: absolute; width: <<WIDTH>>px; height: <<HEIGHT>>px; border: 4px solid #444444; transform-origin: 0 0; }
639
+ .primary-color-label label span { font-weight: bold; color: var(--color-accent); }
640
+ .status-bar { display: flex; flex-direction: row; align-items: center; z-index: 100; }
641
+ .status-indicator { width: 15px; height: 15px; border-radius: 50%; }
642
+ .status-text { font-size: 16px; font-weight: bold; padding-left: 8px; text-shadow: none; }
643
+ .status-interactive { background-color: #2ecc71; animation: blink 2s infinite; }
644
+ .status-view-only { background-color: #e74c3c; }
645
+ .status-error { background-color: #e74c3c; animation: blink-error 1s infinite; }
646
+ @keyframes blink-error { 0% { background-color: rgba(231, 76, 60, 1); } 50% { background-color: rgba(231, 76, 60, 0.4); } 100% { background-color: rgba(231, 76, 60, 1); } }
647
+ @keyframes blink { 0% { background-color: rgba(46, 204, 113, 1); } 50% { background-color: rgba(46, 204, 113, 0.4); } 100% { background-color: rgba(46, 204, 113, 1); } }
648
+ #chatbot { height:1000px!important; }
649
+ #chatbot .role { max-width:95% }
650
+ .logo-container { display: flex; flex-direction: column; align-items: flex-start; gap: 5px; }
651
+ .logo-item { display: flex; align-items: center; padding: 0 30px; gap: 10px; text-decoration: none!important; color: #f59e0b; font-size:17px; }
652
+ """.replace("<<WIDTH>>", str(WIDTH + 15)).replace("<<HEIGHT>>", str(HEIGHT + 10))
653
+
654
+ sandbox_html_template = """
655
+ <style>
656
+ @import url('https://fonts.googleapis.com/css2?family=Oxanium:wght@200..800&display=swap');
657
+ </style>
658
+ <h1 style="color:var(--color-accent);margin:0;">Fara CUA - <i>Powered by <a href="https://github.com/huggingface/smolagents">smolagents</a></i><h1>
659
+ <div class="sandbox-container" style="margin:0;">
660
+ <div class="status-bar">
661
+ <div class="status-indicator {status_class}"></div>
662
+ <div class="status-text">{status_text}</div>
663
+ </div>
664
+ <iframe id="sandbox-iframe"
665
+ src="{stream_url}"
666
+ class="sandbox-iframe"
667
+ style="display: block;"
668
+ allowfullscreen>
669
+ </iframe>
670
+ <img src="https://huggingface.co/datasets/mfarre/servedfiles/resolve/main/blue_screen_of_death.gif" class="bsod-image" style="display: none;"/>
671
+ <img src="https://huggingface.co/datasets/m-ric/images/resolve/main/HUD_thom.png" class="sandbox-frame" />
672
+ </div>
673
+ """.replace("<<WIDTH>>", str(WIDTH + 15)).replace("<<HEIGHT>>", str(HEIGHT + 10))
674
+
675
+ custom_js = """function() {
676
+ document.body.classList.add('dark');
677
+ // Function to check if sandbox is timing out
678
+ const checkSandboxTimeout = function() {
679
+ const timeElement = document.getElementById('sandbox-creation-time');
680
+ if (timeElement) {
681
+ const creationTime = parseFloat(timeElement.getAttribute('data-time'));
682
+ const timeoutValue = parseFloat(timeElement.getAttribute('data-timeout'));
683
+ const currentTime = Math.floor(Date.now() / 1000);
684
+ if (currentTime - creationTime >= timeoutValue) {
685
+ showBSOD('Error');
686
+ return;
687
+ }
688
+ }
689
+ setTimeout(checkSandboxTimeout, 5000);
690
+ };
691
+ const showBSOD = function(statusText = 'Error') {
692
+ const iframe = document.getElementById('sandbox-iframe');
693
+ const bsod = document.getElementById('bsod-image');
694
+ if (iframe && bsod) {
695
+ iframe.style.display = 'none';
696
+ bsod.style.display = 'block';
697
+ document.querySelector('.status-indicator').className = 'status-indicator status-error';
698
+ document.querySelector('.status-text').innerText = statusText;
699
+ }
700
+ };
701
+ const resetBSOD = function() {
702
+ const iframe = document.getElementById('sandbox-iframe');
703
+ const bsod = document.getElementById('bsod-image');
704
+ if (iframe && bsod && bsod.style.display === 'block') {
705
+ iframe.style.display = 'block';
706
+ bsod.style.display = 'none';
707
+ }
708
+ };
709
+ document.addEventListener('click', function(e) {
710
+ if (e.target.tagName === 'BUTTON' && e.target.innerText === "Let's go!") {
711
+ resetBSOD();
712
+ }
713
+ });
714
+ checkSandboxTimeout();
715
+ const params = new URLSearchParams(window.location.search);
716
+ if (!params.has('__theme')) {
717
+ params.set('__theme', 'dark');
718
+ window.location.search = params.toString();
719
+ }
720
+ }"""
721
+
722
+ def update_html(interactive_mode: bool, session_uuid):
723
+ desktop = get_or_create_sandbox(session_uuid)
724
+ auth_key = desktop.stream.get_auth_key()
725
+ base_url = desktop.stream.get_url(auth_key=auth_key)
726
+ stream_url = base_url if interactive_mode else f"{base_url}&view_only=true"
727
+
728
+ status_class = "status-interactive" if interactive_mode else "status-view-only"
729
+ status_text = "Interactive" if interactive_mode else "Agent running..."
730
+ creation_time = (
731
+ SANDBOX_METADATA[session_uuid]["created_at"]
732
+ if session_uuid in SANDBOX_METADATA
733
+ else time.time()
734
+ )
735
+
736
+ sandbox_html_content = sandbox_html_template.format(
737
+ stream_url=stream_url,
738
+ status_class=status_class,
739
+ status_text=status_text,
740
+ )
741
+ sandbox_html_content += f'<div id="sandbox-creation-time" style="display:none;" data-time="{creation_time}" data-timeout="{SANDBOX_TIMEOUT}"></div>'
742
+ return sandbox_html_content
743
+
744
+ def initialize_session(interactive_mode, browser_uuid):
745
+ if not browser_uuid:
746
+ new_uuid = str(uuid.uuid4())
747
+ return update_html(interactive_mode, new_uuid), new_uuid
748
+ else:
749
+ return update_html(interactive_mode, browser_uuid), browser_uuid
750
+
751
+ class EnrichedGradioUI(GradioUI):
752
+ def interact_with_agent(
753
+ self,
754
+ task_input,
755
+ stored_messages,
756
+ session_state,
757
+ session_uuid,
758
+ consent_storage,
759
+ request: gr.Request,
760
+ ):
761
+ interaction_id = generate_interaction_id(session_uuid)
762
+ desktop = get_or_create_sandbox(session_uuid)
763
+
764
+ data_dir = os.path.join(TMP_DIR, interaction_id)
765
+ if not os.path.exists(data_dir):
766
+ os.makedirs(data_dir)
767
+
768
+ # Re-create agent to ensure fresh context with the Fara model
769
+ session_state["agent"] = create_agent(data_dir=data_dir, desktop=desktop)
770
+
771
+ try:
772
+ stored_messages.append(gr.ChatMessage(role="user", content=task_input))
773
+ yield stored_messages
774
+
775
+ screenshot_bytes = session_state["agent"].desktop.screenshot(format="bytes")
776
+ initial_screenshot = Image.open(BytesIO(screenshot_bytes))
777
+
778
+ for msg in stream_to_gradio(
779
+ session_state["agent"],
780
+ task=task_input,
781
+ task_images=[initial_screenshot],
782
+ reset_agent_memory=False,
783
+ ):
784
+ if (
785
+ hasattr(session_state["agent"], "last_marked_screenshot")
786
+ and msg.content == "-----"
787
+ ):
788
+ stored_messages.append(
789
+ gr.ChatMessage(
790
+ role="assistant",
791
+ content={
792
+ "path": session_state["agent"].last_marked_screenshot.to_string(),
793
+ "mime_type": "image/png",
794
+ },
795
+ )
796
+ )
797
+ stored_messages.append(msg)
798
+ yield stored_messages
799
+
800
+ if consent_storage:
801
+ summary = get_agent_summary_erase_images(session_state["agent"])
802
+ save_final_status(data_dir, "completed", summary=summary)
803
+ yield stored_messages
804
+
805
+ except Exception as e:
806
+ error_message = f"Error in interaction: {str(e)}"
807
+ print(error_message)
808
+ stored_messages.append(
809
+ gr.ChatMessage(role="assistant", content="Run failed:\n" + error_message)
810
+ )
811
+ if consent_storage:
812
+ save_final_status(data_dir, "failed", summary=None, error_message=error_message)
813
+ yield stored_messages
814
+
815
+ # -----------------------------------------------------------------------------
816
+ # MAIN APP CONSTRUCTION
817
+ # -----------------------------------------------------------------------------
818
+
819
+ theme = gr.themes.Default(
820
+ font=["Oxanium", "sans-serif"], primary_hue="amber", secondary_hue="blue"
821
+ )
822
+
823
+ with gr.Blocks(theme=theme, css=custom_css, js=custom_js) as demo:
824
+ session_uuid_state = gr.State(None)
825
+
826
+ with gr.Row():
827
+ sandbox_html = gr.HTML(
828
+ value=sandbox_html_template.format(
829
+ stream_url="",
830
+ status_class="status-interactive",
831
+ status_text="Interactive",
832
+ ),
833
+ label="Output",
834
+ )
835
+ with gr.Sidebar(position="left"):
836
+ with Modal(visible=True) as modal:
837
+ gr.Markdown("""### Welcome to Fara CUA Demo 🖥️
838
+ This agent uses **microsoft/Fara-7B** (running locally) and **smolagents** to control a remote computer.
839
+
840
+ 👉 Type a task, click 'Let's go!', and watch the agent work.
841
+ """)
842
+ task_input = gr.Textbox(
843
+ value="Find me pictures of cute puppies",
844
+ label="Enter your task below:",
845
+ elem_classes="primary-color-label",
846
+ )
847
+
848
+ run_btn = gr.Button("Let's go!", variant="primary")
849
+
850
+ # Simple controls
851
+ stop_btn = gr.Button("Stop the agent!", variant="secondary")
852
+ consent_storage = gr.Checkbox(label="Store logs locally?", value=True)
853
+
854
+ gr.Examples(
855
+ examples=[
856
+ "Use Google Maps to find the Hugging Face HQ in Paris",
857
+ "Go to Wikipedia and find what happened on April 4th",
858
+ "Find out the travel time by train from Bern to Basel on Google Maps",
859
+ ],
860
+ inputs=task_input,
861
+ )
862
+
863
+ session_state = gr.State({})
864
+ stored_messages = gr.State([])
865
+
866
+ chatbot_display = gr.Chatbot(
867
+ elem_id="chatbot",
868
+ label="Agent's execution logs",
869
+ type="messages",
870
+ avatar_images=(None, "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/smolagents/mascot_smol.png"),
871
+ resizable=True,
872
+ )
873
+
874
+ # Dummy agent init for UI wrapper (actual agent created in interaction loop)
875
+ # We pass a dummy CodeAgent just to initialize the UI class
876
+ agent_ui = EnrichedGradioUI(CodeAgent(tools=[], model=Model(), name="init"))
877
+
878
+ is_interactive = gr.Checkbox(value=True, visible=False)
879
+
880
+ def clear_and_set_view_only(task_input, session_uuid):
881
+ return update_html(False, session_uuid)
882
+
883
+ def set_interactive(session_uuid):
884
+ return update_html(True, session_uuid)
885
+
886
+ def interrupt_agent(session_state):
887
+ if "agent" in session_state and not session_state["agent"].interrupt_switch:
888
+ session_state["agent"].interrupt()
889
+ return "Stopped"
890
+ return "Stop"
891
+
892
+ # Event Wiring
893
+ run_event = (
894
+ run_btn.click(
895
+ fn=clear_and_set_view_only,
896
+ inputs=[task_input, session_uuid_state],
897
+ outputs=[sandbox_html],
898
+ )
899
+ .then(
900
+ agent_ui.interact_with_agent,
901
+ inputs=[
902
+ task_input,
903
+ stored_messages,
904
+ session_state,
905
+ session_uuid_state,
906
+ consent_storage,
907
+ ],
908
+ outputs=[chatbot_display],
909
+ )
910
+ .then(fn=set_interactive, inputs=[session_uuid_state], outputs=[sandbox_html])
911
+ )
912
+
913
+ stop_btn.click(fn=interrupt_agent, inputs=[session_state], outputs=[])
914
+
915
+ # Initialization on load
916
+ demo.load(
917
+ fn=lambda: True,
918
+ outputs=[is_interactive],
919
+ ).then(
920
+ fn=initialize_session,
921
+ js="() => localStorage.getItem('gradio-session-uuid') || (() => { const id = self.crypto.randomUUID(); localStorage.setItem('gradio-session-uuid', id); return id })()",
922
+ inputs=[is_interactive],
923
+ outputs=[sandbox_html, session_uuid_state],
924
+ )
925
+
926
+ if __name__ == "__main__":
927
+ Timer(60, cleanup_sandboxes).start()
928
+ demo.launch()