Spaces:
Runtime error
Runtime error
Upload 5 files
Browse files- src/agent/__init__.py +6 -0
- src/agent/custom_agent.py +480 -480
- src/agent/custom_massage_manager.py +121 -0
- src/agent/custom_prompts.py +205 -0
- src/agent/custom_views.py +59 -0
src/agent/__init__.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
# @Time : 2025/1/1
|
| 3 |
+
# @Author : wenshao
|
| 4 |
+
# @Email : wenshaoguo1026@gmail.com
|
| 5 |
+
# @Project : browser-use-webui
|
| 6 |
+
# @FileName: __init__.py.py
|
src/agent/custom_agent.py
CHANGED
|
@@ -1,480 +1,480 @@
|
|
| 1 |
-
# -*- coding: utf-8 -*-
|
| 2 |
-
# @Time : 2025/1/2
|
| 3 |
-
# @Author : wenshao
|
| 4 |
-
# @ProjectName: browser-use-webui
|
| 5 |
-
# @FileName: custom_agent.py
|
| 6 |
-
|
| 7 |
-
import json
|
| 8 |
-
import logging
|
| 9 |
-
import pdb
|
| 10 |
-
import traceback
|
| 11 |
-
from typing import Optional, Type
|
| 12 |
-
from PIL import Image, ImageDraw, ImageFont
|
| 13 |
-
import os
|
| 14 |
-
import base64
|
| 15 |
-
import io
|
| 16 |
-
|
| 17 |
-
from browser_use.agent.prompts import SystemPrompt
|
| 18 |
-
from browser_use.agent.service import Agent
|
| 19 |
-
from browser_use.agent.views import (
|
| 20 |
-
ActionResult,
|
| 21 |
-
AgentHistoryList,
|
| 22 |
-
AgentOutput,
|
| 23 |
-
AgentHistory,
|
| 24 |
-
)
|
| 25 |
-
from browser_use.browser.browser import Browser
|
| 26 |
-
from browser_use.browser.context import BrowserContext
|
| 27 |
-
from browser_use.browser.views import BrowserStateHistory
|
| 28 |
-
from browser_use.controller.service import Controller
|
| 29 |
-
from browser_use.telemetry.views import (
|
| 30 |
-
AgentEndTelemetryEvent,
|
| 31 |
-
AgentRunTelemetryEvent,
|
| 32 |
-
AgentStepErrorTelemetryEvent,
|
| 33 |
-
)
|
| 34 |
-
from browser_use.utils import time_execution_async
|
| 35 |
-
from langchain_core.language_models.chat_models import BaseChatModel
|
| 36 |
-
from langchain_core.messages import (
|
| 37 |
-
BaseMessage,
|
| 38 |
-
)
|
| 39 |
-
from src.utils.agent_state import AgentState
|
| 40 |
-
|
| 41 |
-
from .custom_massage_manager import CustomMassageManager
|
| 42 |
-
from .custom_views import CustomAgentOutput, CustomAgentStepInfo
|
| 43 |
-
|
| 44 |
-
logger = logging.getLogger(__name__)
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
class CustomAgent(Agent):
|
| 48 |
-
def __init__(
|
| 49 |
-
self,
|
| 50 |
-
task: str,
|
| 51 |
-
llm: BaseChatModel,
|
| 52 |
-
add_infos: str = "",
|
| 53 |
-
browser: Browser | None = None,
|
| 54 |
-
browser_context: BrowserContext | None = None,
|
| 55 |
-
controller: Controller = Controller(),
|
| 56 |
-
use_vision: bool = True,
|
| 57 |
-
save_conversation_path: Optional[str] = None,
|
| 58 |
-
max_failures: int = 5,
|
| 59 |
-
retry_delay: int = 10,
|
| 60 |
-
system_prompt_class: Type[SystemPrompt] = SystemPrompt,
|
| 61 |
-
max_input_tokens: int = 128000,
|
| 62 |
-
validate_output: bool = False,
|
| 63 |
-
include_attributes: list[str] = [
|
| 64 |
-
"title",
|
| 65 |
-
"type",
|
| 66 |
-
"name",
|
| 67 |
-
"role",
|
| 68 |
-
"tabindex",
|
| 69 |
-
"aria-label",
|
| 70 |
-
"placeholder",
|
| 71 |
-
"value",
|
| 72 |
-
"alt",
|
| 73 |
-
"aria-expanded",
|
| 74 |
-
],
|
| 75 |
-
max_error_length: int = 400,
|
| 76 |
-
max_actions_per_step: int = 10,
|
| 77 |
-
tool_call_in_content: bool = True,
|
| 78 |
-
agent_state: AgentState = None,
|
| 79 |
-
):
|
| 80 |
-
super().__init__(
|
| 81 |
-
task=task,
|
| 82 |
-
llm=llm,
|
| 83 |
-
browser=browser,
|
| 84 |
-
browser_context=browser_context,
|
| 85 |
-
controller=controller,
|
| 86 |
-
use_vision=use_vision,
|
| 87 |
-
save_conversation_path=save_conversation_path,
|
| 88 |
-
max_failures=max_failures,
|
| 89 |
-
retry_delay=retry_delay,
|
| 90 |
-
system_prompt_class=system_prompt_class,
|
| 91 |
-
max_input_tokens=max_input_tokens,
|
| 92 |
-
validate_output=validate_output,
|
| 93 |
-
include_attributes=include_attributes,
|
| 94 |
-
max_error_length=max_error_length,
|
| 95 |
-
max_actions_per_step=max_actions_per_step,
|
| 96 |
-
tool_call_in_content=tool_call_in_content,
|
| 97 |
-
)
|
| 98 |
-
self.add_infos = add_infos
|
| 99 |
-
self.agent_state = agent_state
|
| 100 |
-
self.message_manager = CustomMassageManager(
|
| 101 |
-
llm=self.llm,
|
| 102 |
-
task=self.task,
|
| 103 |
-
action_descriptions=self.controller.registry.get_prompt_description(),
|
| 104 |
-
system_prompt_class=self.system_prompt_class,
|
| 105 |
-
max_input_tokens=self.max_input_tokens,
|
| 106 |
-
include_attributes=self.include_attributes,
|
| 107 |
-
max_error_length=self.max_error_length,
|
| 108 |
-
max_actions_per_step=self.max_actions_per_step,
|
| 109 |
-
tool_call_in_content=tool_call_in_content,
|
| 110 |
-
)
|
| 111 |
-
|
| 112 |
-
def _setup_action_models(self) -> None:
|
| 113 |
-
"""Setup dynamic action models from controller's registry"""
|
| 114 |
-
# Get the dynamic action model from controller's registry
|
| 115 |
-
self.ActionModel = self.controller.registry.create_action_model()
|
| 116 |
-
# Create output model with the dynamic actions
|
| 117 |
-
self.AgentOutput = CustomAgentOutput.type_with_custom_actions(self.ActionModel)
|
| 118 |
-
|
| 119 |
-
def _log_response(self, response: CustomAgentOutput) -> None:
|
| 120 |
-
"""Log the model's response"""
|
| 121 |
-
if "Success" in response.current_state.prev_action_evaluation:
|
| 122 |
-
emoji = "✅"
|
| 123 |
-
elif "Failed" in response.current_state.prev_action_evaluation:
|
| 124 |
-
emoji = "❌"
|
| 125 |
-
else:
|
| 126 |
-
emoji = "🤷"
|
| 127 |
-
|
| 128 |
-
logger.info(f"{emoji} Eval: {response.current_state.prev_action_evaluation}")
|
| 129 |
-
logger.info(f"🧠 New Memory: {response.current_state.important_contents}")
|
| 130 |
-
logger.info(f"⏳ Task Progress: {response.current_state.completed_contents}")
|
| 131 |
-
logger.info(f"🤔 Thought: {response.current_state.thought}")
|
| 132 |
-
logger.info(f"🎯 Summary: {response.current_state.summary}")
|
| 133 |
-
for i, action in enumerate(response.action):
|
| 134 |
-
logger.info(
|
| 135 |
-
f"🛠️ Action {i + 1}/{len(response.action)}: {action.model_dump_json(exclude_unset=True)}"
|
| 136 |
-
)
|
| 137 |
-
|
| 138 |
-
def update_step_info(
|
| 139 |
-
self, model_output: CustomAgentOutput, step_info: CustomAgentStepInfo = None
|
| 140 |
-
):
|
| 141 |
-
"""
|
| 142 |
-
update step info
|
| 143 |
-
"""
|
| 144 |
-
if step_info is None:
|
| 145 |
-
return
|
| 146 |
-
|
| 147 |
-
step_info.step_number += 1
|
| 148 |
-
important_contents = model_output.current_state.important_contents
|
| 149 |
-
if (
|
| 150 |
-
important_contents
|
| 151 |
-
and "None" not in important_contents
|
| 152 |
-
and important_contents not in step_info.memory
|
| 153 |
-
):
|
| 154 |
-
step_info.memory += important_contents + "\n"
|
| 155 |
-
|
| 156 |
-
completed_contents = model_output.current_state.completed_contents
|
| 157 |
-
if completed_contents and "None" not in completed_contents:
|
| 158 |
-
step_info.task_progress = completed_contents
|
| 159 |
-
|
| 160 |
-
@time_execution_async("--get_next_action")
|
| 161 |
-
async def get_next_action(self, input_messages: list[BaseMessage]) -> AgentOutput:
|
| 162 |
-
"""Get next action from LLM based on current state"""
|
| 163 |
-
try:
|
| 164 |
-
structured_llm = self.llm.with_structured_output(self.AgentOutput, include_raw=True)
|
| 165 |
-
response: dict[str, Any] = await structured_llm.ainvoke(input_messages) # type: ignore
|
| 166 |
-
|
| 167 |
-
parsed: AgentOutput = response['parsed']
|
| 168 |
-
# cut the number of actions to max_actions_per_step
|
| 169 |
-
parsed.action = parsed.action[: self.max_actions_per_step]
|
| 170 |
-
self._log_response(parsed)
|
| 171 |
-
self.n_steps += 1
|
| 172 |
-
|
| 173 |
-
return parsed
|
| 174 |
-
except Exception as e:
|
| 175 |
-
# If something goes wrong, try to invoke the LLM again without structured output,
|
| 176 |
-
# and Manually parse the response. Temporarily solution for DeepSeek
|
| 177 |
-
ret = self.llm.invoke(input_messages)
|
| 178 |
-
if isinstance(ret.content, list):
|
| 179 |
-
parsed_json = json.loads(ret.content[0].replace("```json", "").replace("```", ""))
|
| 180 |
-
else:
|
| 181 |
-
parsed_json = json.loads(ret.content.replace("```json", "").replace("```", ""))
|
| 182 |
-
parsed: AgentOutput = self.AgentOutput(**parsed_json)
|
| 183 |
-
if parsed is None:
|
| 184 |
-
raise ValueError(f'Could not parse response.')
|
| 185 |
-
|
| 186 |
-
# cut the number of actions to max_actions_per_step
|
| 187 |
-
parsed.action = parsed.action[: self.max_actions_per_step]
|
| 188 |
-
self._log_response(parsed)
|
| 189 |
-
self.n_steps += 1
|
| 190 |
-
|
| 191 |
-
return parsed
|
| 192 |
-
|
| 193 |
-
@time_execution_async("--step")
|
| 194 |
-
async def step(self, step_info: Optional[CustomAgentStepInfo] = None) -> None:
|
| 195 |
-
"""Execute one step of the task"""
|
| 196 |
-
logger.info(f"\n📍 Step {self.n_steps}")
|
| 197 |
-
state = None
|
| 198 |
-
model_output = None
|
| 199 |
-
result: list[ActionResult] = []
|
| 200 |
-
|
| 201 |
-
try:
|
| 202 |
-
state = await self.browser_context.get_state(use_vision=self.use_vision)
|
| 203 |
-
self.message_manager.add_state_message(state, self._last_result, step_info)
|
| 204 |
-
input_messages = self.message_manager.get_messages()
|
| 205 |
-
model_output = await self.get_next_action(input_messages)
|
| 206 |
-
self.update_step_info(model_output, step_info)
|
| 207 |
-
logger.info(f"🧠 All Memory: {step_info.memory}")
|
| 208 |
-
self._save_conversation(input_messages, model_output)
|
| 209 |
-
self.message_manager._remove_last_state_message() # we dont want the whole state in the chat history
|
| 210 |
-
self.message_manager.add_model_output(model_output)
|
| 211 |
-
|
| 212 |
-
result: list[ActionResult] = await self.controller.multi_act(
|
| 213 |
-
model_output.action, self.browser_context
|
| 214 |
-
)
|
| 215 |
-
self._last_result = result
|
| 216 |
-
|
| 217 |
-
if len(result) > 0 and result[-1].is_done:
|
| 218 |
-
logger.info(f"📄 Result: {result[-1].extracted_content}")
|
| 219 |
-
|
| 220 |
-
self.consecutive_failures = 0
|
| 221 |
-
|
| 222 |
-
except Exception as e:
|
| 223 |
-
result = self._handle_step_error(e)
|
| 224 |
-
self._last_result = result
|
| 225 |
-
|
| 226 |
-
finally:
|
| 227 |
-
if not result:
|
| 228 |
-
return
|
| 229 |
-
for r in result:
|
| 230 |
-
if r.error:
|
| 231 |
-
self.telemetry.capture(
|
| 232 |
-
AgentStepErrorTelemetryEvent(
|
| 233 |
-
agent_id=self.agent_id,
|
| 234 |
-
error=r.error,
|
| 235 |
-
)
|
| 236 |
-
)
|
| 237 |
-
if state:
|
| 238 |
-
self._make_history_item(model_output, state, result)
|
| 239 |
-
def create_history_gif(
|
| 240 |
-
self,
|
| 241 |
-
output_path: str = 'agent_history.gif',
|
| 242 |
-
duration: int = 3000,
|
| 243 |
-
show_goals: bool = True,
|
| 244 |
-
show_task: bool = True,
|
| 245 |
-
show_logo: bool = False,
|
| 246 |
-
font_size: int = 40,
|
| 247 |
-
title_font_size: int = 56,
|
| 248 |
-
goal_font_size: int = 44,
|
| 249 |
-
margin: int = 40,
|
| 250 |
-
line_spacing: float = 1.5,
|
| 251 |
-
) -> None:
|
| 252 |
-
"""Create a GIF from the agent's history with overlaid task and goal text."""
|
| 253 |
-
if not self.history.history:
|
| 254 |
-
logger.warning('No history to create GIF from')
|
| 255 |
-
return
|
| 256 |
-
|
| 257 |
-
images = []
|
| 258 |
-
# if history is empty or first screenshot is None, we can't create a gif
|
| 259 |
-
if not self.history.history or not self.history.history[0].state.screenshot:
|
| 260 |
-
logger.warning('No history or first screenshot to create GIF from')
|
| 261 |
-
return
|
| 262 |
-
|
| 263 |
-
# Try to load nicer fonts
|
| 264 |
-
try:
|
| 265 |
-
# Try different font options in order of preference
|
| 266 |
-
font_options = ['Helvetica', 'Arial', 'DejaVuSans', 'Verdana']
|
| 267 |
-
font_loaded = False
|
| 268 |
-
|
| 269 |
-
for font_name in font_options:
|
| 270 |
-
try:
|
| 271 |
-
import platform
|
| 272 |
-
if platform.system() == "Windows":
|
| 273 |
-
# Need to specify the abs font path on Windows
|
| 274 |
-
font_name = os.path.join(os.getenv("WIN_FONT_DIR", "C:\\Windows\\Fonts"), font_name + ".ttf")
|
| 275 |
-
regular_font = ImageFont.truetype(font_name, font_size)
|
| 276 |
-
title_font = ImageFont.truetype(font_name, title_font_size)
|
| 277 |
-
goal_font = ImageFont.truetype(font_name, goal_font_size)
|
| 278 |
-
font_loaded = True
|
| 279 |
-
break
|
| 280 |
-
except OSError:
|
| 281 |
-
continue
|
| 282 |
-
|
| 283 |
-
if not font_loaded:
|
| 284 |
-
raise OSError('No preferred fonts found')
|
| 285 |
-
|
| 286 |
-
except OSError:
|
| 287 |
-
regular_font = ImageFont.load_default()
|
| 288 |
-
title_font = ImageFont.load_default()
|
| 289 |
-
|
| 290 |
-
goal_font = regular_font
|
| 291 |
-
|
| 292 |
-
# Load logo if requested
|
| 293 |
-
logo = None
|
| 294 |
-
if show_logo:
|
| 295 |
-
try:
|
| 296 |
-
logo = Image.open('./static/browser-use.png')
|
| 297 |
-
# Resize logo to be small (e.g., 40px height)
|
| 298 |
-
logo_height = 150
|
| 299 |
-
aspect_ratio = logo.width / logo.height
|
| 300 |
-
logo_width = int(logo_height * aspect_ratio)
|
| 301 |
-
logo = logo.resize((logo_width, logo_height), Image.Resampling.LANCZOS)
|
| 302 |
-
except Exception as e:
|
| 303 |
-
logger.warning(f'Could not load logo: {e}')
|
| 304 |
-
|
| 305 |
-
# Create task frame if requested
|
| 306 |
-
if show_task and self.task:
|
| 307 |
-
task_frame = self._create_task_frame(
|
| 308 |
-
self.task,
|
| 309 |
-
self.history.history[0].state.screenshot,
|
| 310 |
-
title_font,
|
| 311 |
-
regular_font,
|
| 312 |
-
logo,
|
| 313 |
-
line_spacing,
|
| 314 |
-
)
|
| 315 |
-
images.append(task_frame)
|
| 316 |
-
|
| 317 |
-
# Process each history item
|
| 318 |
-
for i, item in enumerate(self.history.history, 1):
|
| 319 |
-
if not item.state.screenshot:
|
| 320 |
-
continue
|
| 321 |
-
|
| 322 |
-
# Convert base64 screenshot to PIL Image
|
| 323 |
-
img_data = base64.b64decode(item.state.screenshot)
|
| 324 |
-
image = Image.open(io.BytesIO(img_data))
|
| 325 |
-
|
| 326 |
-
if show_goals and item.model_output:
|
| 327 |
-
image = self._add_overlay_to_image(
|
| 328 |
-
image=image,
|
| 329 |
-
step_number=i,
|
| 330 |
-
goal_text=item.model_output.current_state.thought,
|
| 331 |
-
regular_font=regular_font,
|
| 332 |
-
title_font=title_font,
|
| 333 |
-
margin=margin,
|
| 334 |
-
logo=logo,
|
| 335 |
-
)
|
| 336 |
-
|
| 337 |
-
images.append(image)
|
| 338 |
-
|
| 339 |
-
if images:
|
| 340 |
-
# Save the GIF
|
| 341 |
-
images[0].save(
|
| 342 |
-
output_path,
|
| 343 |
-
save_all=True,
|
| 344 |
-
append_images=images[1:],
|
| 345 |
-
duration=duration,
|
| 346 |
-
loop=0,
|
| 347 |
-
optimize=False,
|
| 348 |
-
)
|
| 349 |
-
logger.info(f'Created GIF at {output_path}')
|
| 350 |
-
else:
|
| 351 |
-
logger.warning('No images found in history to create GIF')
|
| 352 |
-
|
| 353 |
-
async def run(self, max_steps: int = 100) -> AgentHistoryList:
|
| 354 |
-
"""Execute the task with maximum number of steps"""
|
| 355 |
-
try:
|
| 356 |
-
logger.info(f"🚀 Starting task: {self.task}")
|
| 357 |
-
|
| 358 |
-
self.telemetry.capture(
|
| 359 |
-
AgentRunTelemetryEvent(
|
| 360 |
-
agent_id=self.agent_id,
|
| 361 |
-
task=self.task,
|
| 362 |
-
)
|
| 363 |
-
)
|
| 364 |
-
|
| 365 |
-
step_info = CustomAgentStepInfo(
|
| 366 |
-
task=self.task,
|
| 367 |
-
add_infos=self.add_infos,
|
| 368 |
-
step_number=1,
|
| 369 |
-
max_steps=max_steps,
|
| 370 |
-
memory="",
|
| 371 |
-
task_progress="",
|
| 372 |
-
)
|
| 373 |
-
|
| 374 |
-
for step in range(max_steps):
|
| 375 |
-
# 1) Check if stop requested
|
| 376 |
-
if self.agent_state and self.agent_state.is_stop_requested():
|
| 377 |
-
logger.info("🛑 Stop requested by user")
|
| 378 |
-
self._create_stop_history_item()
|
| 379 |
-
break
|
| 380 |
-
|
| 381 |
-
# 2) Store last valid state before step
|
| 382 |
-
if self.browser_context and self.agent_state:
|
| 383 |
-
state = await self.browser_context.get_state(use_vision=self.use_vision)
|
| 384 |
-
self.agent_state.set_last_valid_state(state)
|
| 385 |
-
|
| 386 |
-
if self._too_many_failures():
|
| 387 |
-
break
|
| 388 |
-
|
| 389 |
-
# 3) Do the step
|
| 390 |
-
await self.step(step_info)
|
| 391 |
-
|
| 392 |
-
if self.history.is_done():
|
| 393 |
-
if (
|
| 394 |
-
self.validate_output and step < max_steps - 1
|
| 395 |
-
): # if last step, we dont need to validate
|
| 396 |
-
if not await self._validate_output():
|
| 397 |
-
continue
|
| 398 |
-
|
| 399 |
-
logger.info("✅ Task completed successfully")
|
| 400 |
-
break
|
| 401 |
-
else:
|
| 402 |
-
logger.info("❌ Failed to complete task in maximum steps")
|
| 403 |
-
|
| 404 |
-
return self.history
|
| 405 |
-
|
| 406 |
-
finally:
|
| 407 |
-
self.telemetry.capture(
|
| 408 |
-
AgentEndTelemetryEvent(
|
| 409 |
-
agent_id=self.agent_id,
|
| 410 |
-
task=self.task,
|
| 411 |
-
success=self.history.is_done(),
|
| 412 |
-
steps=len(self.history.history),
|
| 413 |
-
)
|
| 414 |
-
)
|
| 415 |
-
if not self.injected_browser_context:
|
| 416 |
-
await self.browser_context.close()
|
| 417 |
-
|
| 418 |
-
if not self.injected_browser and self.browser:
|
| 419 |
-
await self.browser.close()
|
| 420 |
-
|
| 421 |
-
if self.generate_gif:
|
| 422 |
-
self.create_history_gif()
|
| 423 |
-
|
| 424 |
-
def _create_stop_history_item(self):
|
| 425 |
-
"""Create a history item for when the agent is stopped."""
|
| 426 |
-
try:
|
| 427 |
-
# Attempt to retrieve the last valid state from agent_state
|
| 428 |
-
state = None
|
| 429 |
-
if self.agent_state:
|
| 430 |
-
last_state = self.agent_state.get_last_valid_state()
|
| 431 |
-
if last_state:
|
| 432 |
-
# Convert to BrowserStateHistory
|
| 433 |
-
state = BrowserStateHistory(
|
| 434 |
-
url=getattr(last_state, 'url', ""),
|
| 435 |
-
title=getattr(last_state, 'title', ""),
|
| 436 |
-
tabs=getattr(last_state, 'tabs', []),
|
| 437 |
-
interacted_element=[None],
|
| 438 |
-
screenshot=getattr(last_state, 'screenshot', None)
|
| 439 |
-
)
|
| 440 |
-
else:
|
| 441 |
-
state = self._create_empty_state()
|
| 442 |
-
else:
|
| 443 |
-
state = self._create_empty_state()
|
| 444 |
-
|
| 445 |
-
# Create a final item in the agent history indicating done
|
| 446 |
-
stop_history = AgentHistory(
|
| 447 |
-
model_output=None,
|
| 448 |
-
state=state,
|
| 449 |
-
result=[ActionResult(extracted_content=None, error=None, is_done=True)]
|
| 450 |
-
)
|
| 451 |
-
self.history.history.append(stop_history)
|
| 452 |
-
|
| 453 |
-
except Exception as e:
|
| 454 |
-
logger.error(f"Error creating stop history item: {e}")
|
| 455 |
-
# Create empty state as fallback
|
| 456 |
-
state = self._create_empty_state()
|
| 457 |
-
stop_history = AgentHistory(
|
| 458 |
-
model_output=None,
|
| 459 |
-
state=state,
|
| 460 |
-
result=[ActionResult(extracted_content=None, error=None, is_done=True)]
|
| 461 |
-
)
|
| 462 |
-
self.history.history.append(stop_history)
|
| 463 |
-
|
| 464 |
-
def _convert_to_browser_state_history(self, browser_state):
|
| 465 |
-
return BrowserStateHistory(
|
| 466 |
-
url=getattr(browser_state, 'url', ""),
|
| 467 |
-
title=getattr(browser_state, 'title', ""),
|
| 468 |
-
tabs=getattr(browser_state, 'tabs', []),
|
| 469 |
-
interacted_element=[None],
|
| 470 |
-
screenshot=getattr(browser_state, 'screenshot', None)
|
| 471 |
-
)
|
| 472 |
-
|
| 473 |
-
def _create_empty_state(self):
|
| 474 |
-
return BrowserStateHistory(
|
| 475 |
-
url="",
|
| 476 |
-
title="",
|
| 477 |
-
tabs=[],
|
| 478 |
-
interacted_element=[None],
|
| 479 |
-
screenshot=None
|
| 480 |
-
)
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
# @Time : 2025/1/2
|
| 3 |
+
# @Author : wenshao
|
| 4 |
+
# @ProjectName: browser-use-webui
|
| 5 |
+
# @FileName: custom_agent.py
|
| 6 |
+
|
| 7 |
+
import json
|
| 8 |
+
import logging
|
| 9 |
+
import pdb
|
| 10 |
+
import traceback
|
| 11 |
+
from typing import Optional, Type
|
| 12 |
+
from PIL import Image, ImageDraw, ImageFont
|
| 13 |
+
import os
|
| 14 |
+
import base64
|
| 15 |
+
import io
|
| 16 |
+
|
| 17 |
+
from browser_use.agent.prompts import SystemPrompt
|
| 18 |
+
from browser_use.agent.service import Agent
|
| 19 |
+
from browser_use.agent.views import (
|
| 20 |
+
ActionResult,
|
| 21 |
+
AgentHistoryList,
|
| 22 |
+
AgentOutput,
|
| 23 |
+
AgentHistory,
|
| 24 |
+
)
|
| 25 |
+
from browser_use.browser.browser import Browser
|
| 26 |
+
from browser_use.browser.context import BrowserContext
|
| 27 |
+
from browser_use.browser.views import BrowserStateHistory
|
| 28 |
+
from browser_use.controller.service import Controller
|
| 29 |
+
from browser_use.telemetry.views import (
|
| 30 |
+
AgentEndTelemetryEvent,
|
| 31 |
+
AgentRunTelemetryEvent,
|
| 32 |
+
AgentStepErrorTelemetryEvent,
|
| 33 |
+
)
|
| 34 |
+
from browser_use.utils import time_execution_async
|
| 35 |
+
from langchain_core.language_models.chat_models import BaseChatModel
|
| 36 |
+
from langchain_core.messages import (
|
| 37 |
+
BaseMessage,
|
| 38 |
+
)
|
| 39 |
+
from src.utils.agent_state import AgentState
|
| 40 |
+
|
| 41 |
+
from .custom_massage_manager import CustomMassageManager
|
| 42 |
+
from .custom_views import CustomAgentOutput, CustomAgentStepInfo
|
| 43 |
+
|
| 44 |
+
logger = logging.getLogger(__name__)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
class CustomAgent(Agent):
|
| 48 |
+
def __init__(
|
| 49 |
+
self,
|
| 50 |
+
task: str,
|
| 51 |
+
llm: BaseChatModel,
|
| 52 |
+
add_infos: str = "",
|
| 53 |
+
browser: Browser | None = None,
|
| 54 |
+
browser_context: BrowserContext | None = None,
|
| 55 |
+
controller: Controller = Controller(),
|
| 56 |
+
use_vision: bool = True,
|
| 57 |
+
save_conversation_path: Optional[str] = None,
|
| 58 |
+
max_failures: int = 5,
|
| 59 |
+
retry_delay: int = 10,
|
| 60 |
+
system_prompt_class: Type[SystemPrompt] = SystemPrompt,
|
| 61 |
+
max_input_tokens: int = 128000,
|
| 62 |
+
validate_output: bool = False,
|
| 63 |
+
include_attributes: list[str] = [
|
| 64 |
+
"title",
|
| 65 |
+
"type",
|
| 66 |
+
"name",
|
| 67 |
+
"role",
|
| 68 |
+
"tabindex",
|
| 69 |
+
"aria-label",
|
| 70 |
+
"placeholder",
|
| 71 |
+
"value",
|
| 72 |
+
"alt",
|
| 73 |
+
"aria-expanded",
|
| 74 |
+
],
|
| 75 |
+
max_error_length: int = 400,
|
| 76 |
+
max_actions_per_step: int = 10,
|
| 77 |
+
tool_call_in_content: bool = True,
|
| 78 |
+
agent_state: AgentState = None,
|
| 79 |
+
):
|
| 80 |
+
super().__init__(
|
| 81 |
+
task=task,
|
| 82 |
+
llm=llm,
|
| 83 |
+
browser=browser,
|
| 84 |
+
browser_context=browser_context,
|
| 85 |
+
controller=controller,
|
| 86 |
+
use_vision=use_vision,
|
| 87 |
+
save_conversation_path=save_conversation_path,
|
| 88 |
+
max_failures=max_failures,
|
| 89 |
+
retry_delay=retry_delay,
|
| 90 |
+
system_prompt_class=system_prompt_class,
|
| 91 |
+
max_input_tokens=max_input_tokens,
|
| 92 |
+
validate_output=validate_output,
|
| 93 |
+
include_attributes=include_attributes,
|
| 94 |
+
max_error_length=max_error_length,
|
| 95 |
+
max_actions_per_step=max_actions_per_step,
|
| 96 |
+
tool_call_in_content=tool_call_in_content,
|
| 97 |
+
)
|
| 98 |
+
self.add_infos = add_infos
|
| 99 |
+
self.agent_state = agent_state
|
| 100 |
+
self.message_manager = CustomMassageManager(
|
| 101 |
+
llm=self.llm,
|
| 102 |
+
task=self.task,
|
| 103 |
+
action_descriptions=self.controller.registry.get_prompt_description(),
|
| 104 |
+
system_prompt_class=self.system_prompt_class,
|
| 105 |
+
max_input_tokens=self.max_input_tokens,
|
| 106 |
+
include_attributes=self.include_attributes,
|
| 107 |
+
max_error_length=self.max_error_length,
|
| 108 |
+
max_actions_per_step=self.max_actions_per_step,
|
| 109 |
+
tool_call_in_content=tool_call_in_content,
|
| 110 |
+
)
|
| 111 |
+
|
| 112 |
+
def _setup_action_models(self) -> None:
|
| 113 |
+
"""Setup dynamic action models from controller's registry"""
|
| 114 |
+
# Get the dynamic action model from controller's registry
|
| 115 |
+
self.ActionModel = self.controller.registry.create_action_model()
|
| 116 |
+
# Create output model with the dynamic actions
|
| 117 |
+
self.AgentOutput = CustomAgentOutput.type_with_custom_actions(self.ActionModel)
|
| 118 |
+
|
| 119 |
+
def _log_response(self, response: CustomAgentOutput) -> None:
|
| 120 |
+
"""Log the model's response"""
|
| 121 |
+
if "Success" in response.current_state.prev_action_evaluation:
|
| 122 |
+
emoji = "✅"
|
| 123 |
+
elif "Failed" in response.current_state.prev_action_evaluation:
|
| 124 |
+
emoji = "❌"
|
| 125 |
+
else:
|
| 126 |
+
emoji = "🤷"
|
| 127 |
+
|
| 128 |
+
logger.info(f"{emoji} Eval: {response.current_state.prev_action_evaluation}")
|
| 129 |
+
logger.info(f"🧠 New Memory: {response.current_state.important_contents}")
|
| 130 |
+
logger.info(f"⏳ Task Progress: {response.current_state.completed_contents}")
|
| 131 |
+
logger.info(f"🤔 Thought: {response.current_state.thought}")
|
| 132 |
+
logger.info(f"🎯 Summary: {response.current_state.summary}")
|
| 133 |
+
for i, action in enumerate(response.action):
|
| 134 |
+
logger.info(
|
| 135 |
+
f"🛠️ Action {i + 1}/{len(response.action)}: {action.model_dump_json(exclude_unset=True)}"
|
| 136 |
+
)
|
| 137 |
+
|
| 138 |
+
def update_step_info(
|
| 139 |
+
self, model_output: CustomAgentOutput, step_info: CustomAgentStepInfo = None
|
| 140 |
+
):
|
| 141 |
+
"""
|
| 142 |
+
update step info
|
| 143 |
+
"""
|
| 144 |
+
if step_info is None:
|
| 145 |
+
return
|
| 146 |
+
|
| 147 |
+
step_info.step_number += 1
|
| 148 |
+
important_contents = model_output.current_state.important_contents
|
| 149 |
+
if (
|
| 150 |
+
important_contents
|
| 151 |
+
and "None" not in important_contents
|
| 152 |
+
and important_contents not in step_info.memory
|
| 153 |
+
):
|
| 154 |
+
step_info.memory += important_contents + "\n"
|
| 155 |
+
|
| 156 |
+
completed_contents = model_output.current_state.completed_contents
|
| 157 |
+
if completed_contents and "None" not in completed_contents:
|
| 158 |
+
step_info.task_progress = completed_contents
|
| 159 |
+
|
| 160 |
+
@time_execution_async("--get_next_action")
|
| 161 |
+
async def get_next_action(self, input_messages: list[BaseMessage]) -> AgentOutput:
|
| 162 |
+
"""Get next action from LLM based on current state"""
|
| 163 |
+
try:
|
| 164 |
+
structured_llm = self.llm.with_structured_output(self.AgentOutput, include_raw=True)
|
| 165 |
+
response: dict[str, Any] = await structured_llm.ainvoke(input_messages) # type: ignore
|
| 166 |
+
|
| 167 |
+
parsed: AgentOutput = response['parsed']
|
| 168 |
+
# cut the number of actions to max_actions_per_step
|
| 169 |
+
parsed.action = parsed.action[: self.max_actions_per_step]
|
| 170 |
+
self._log_response(parsed)
|
| 171 |
+
self.n_steps += 1
|
| 172 |
+
|
| 173 |
+
return parsed
|
| 174 |
+
except Exception as e:
|
| 175 |
+
# If something goes wrong, try to invoke the LLM again without structured output,
|
| 176 |
+
# and Manually parse the response. Temporarily solution for DeepSeek
|
| 177 |
+
ret = self.llm.invoke(input_messages)
|
| 178 |
+
if isinstance(ret.content, list):
|
| 179 |
+
parsed_json = json.loads(ret.content[0].replace("```json", "").replace("```", ""))
|
| 180 |
+
else:
|
| 181 |
+
parsed_json = json.loads(ret.content.replace("```json", "").replace("```", ""))
|
| 182 |
+
parsed: AgentOutput = self.AgentOutput(**parsed_json)
|
| 183 |
+
if parsed is None:
|
| 184 |
+
raise ValueError(f'Could not parse response.')
|
| 185 |
+
|
| 186 |
+
# cut the number of actions to max_actions_per_step
|
| 187 |
+
parsed.action = parsed.action[: self.max_actions_per_step]
|
| 188 |
+
self._log_response(parsed)
|
| 189 |
+
self.n_steps += 1
|
| 190 |
+
|
| 191 |
+
return parsed
|
| 192 |
+
|
| 193 |
+
@time_execution_async("--step")
|
| 194 |
+
async def step(self, step_info: Optional[CustomAgentStepInfo] = None) -> None:
|
| 195 |
+
"""Execute one step of the task"""
|
| 196 |
+
logger.info(f"\n📍 Step {self.n_steps}")
|
| 197 |
+
state = None
|
| 198 |
+
model_output = None
|
| 199 |
+
result: list[ActionResult] = []
|
| 200 |
+
|
| 201 |
+
try:
|
| 202 |
+
state = await self.browser_context.get_state(use_vision=self.use_vision)
|
| 203 |
+
self.message_manager.add_state_message(state, self._last_result, step_info)
|
| 204 |
+
input_messages = self.message_manager.get_messages()
|
| 205 |
+
model_output = await self.get_next_action(input_messages)
|
| 206 |
+
self.update_step_info(model_output, step_info)
|
| 207 |
+
logger.info(f"🧠 All Memory: {step_info.memory}")
|
| 208 |
+
self._save_conversation(input_messages, model_output)
|
| 209 |
+
self.message_manager._remove_last_state_message() # we dont want the whole state in the chat history
|
| 210 |
+
self.message_manager.add_model_output(model_output)
|
| 211 |
+
|
| 212 |
+
result: list[ActionResult] = await self.controller.multi_act(
|
| 213 |
+
model_output.action, self.browser_context
|
| 214 |
+
)
|
| 215 |
+
self._last_result = result
|
| 216 |
+
|
| 217 |
+
if len(result) > 0 and result[-1].is_done:
|
| 218 |
+
logger.info(f"📄 Result: {result[-1].extracted_content}")
|
| 219 |
+
|
| 220 |
+
self.consecutive_failures = 0
|
| 221 |
+
|
| 222 |
+
except Exception as e:
|
| 223 |
+
result = self._handle_step_error(e)
|
| 224 |
+
self._last_result = result
|
| 225 |
+
|
| 226 |
+
finally:
|
| 227 |
+
if not result:
|
| 228 |
+
return
|
| 229 |
+
for r in result:
|
| 230 |
+
if r.error:
|
| 231 |
+
self.telemetry.capture(
|
| 232 |
+
AgentStepErrorTelemetryEvent(
|
| 233 |
+
agent_id=self.agent_id,
|
| 234 |
+
error=r.error,
|
| 235 |
+
)
|
| 236 |
+
)
|
| 237 |
+
if state:
|
| 238 |
+
self._make_history_item(model_output, state, result)
|
| 239 |
+
def create_history_gif(
|
| 240 |
+
self,
|
| 241 |
+
output_path: str = 'agent_history.gif',
|
| 242 |
+
duration: int = 3000,
|
| 243 |
+
show_goals: bool = True,
|
| 244 |
+
show_task: bool = True,
|
| 245 |
+
show_logo: bool = False,
|
| 246 |
+
font_size: int = 40,
|
| 247 |
+
title_font_size: int = 56,
|
| 248 |
+
goal_font_size: int = 44,
|
| 249 |
+
margin: int = 40,
|
| 250 |
+
line_spacing: float = 1.5,
|
| 251 |
+
) -> None:
|
| 252 |
+
"""Create a GIF from the agent's history with overlaid task and goal text."""
|
| 253 |
+
if not self.history.history:
|
| 254 |
+
logger.warning('No history to create GIF from')
|
| 255 |
+
return
|
| 256 |
+
|
| 257 |
+
images = []
|
| 258 |
+
# if history is empty or first screenshot is None, we can't create a gif
|
| 259 |
+
if not self.history.history or not self.history.history[0].state.screenshot:
|
| 260 |
+
logger.warning('No history or first screenshot to create GIF from')
|
| 261 |
+
return
|
| 262 |
+
|
| 263 |
+
# Try to load nicer fonts
|
| 264 |
+
try:
|
| 265 |
+
# Try different font options in order of preference
|
| 266 |
+
font_options = ['Helvetica', 'Arial', 'DejaVuSans', 'Verdana']
|
| 267 |
+
font_loaded = False
|
| 268 |
+
|
| 269 |
+
for font_name in font_options:
|
| 270 |
+
try:
|
| 271 |
+
import platform
|
| 272 |
+
if platform.system() == "Windows":
|
| 273 |
+
# Need to specify the abs font path on Windows
|
| 274 |
+
font_name = os.path.join(os.getenv("WIN_FONT_DIR", "C:\\Windows\\Fonts"), font_name + ".ttf")
|
| 275 |
+
regular_font = ImageFont.truetype(font_name, font_size)
|
| 276 |
+
title_font = ImageFont.truetype(font_name, title_font_size)
|
| 277 |
+
goal_font = ImageFont.truetype(font_name, goal_font_size)
|
| 278 |
+
font_loaded = True
|
| 279 |
+
break
|
| 280 |
+
except OSError:
|
| 281 |
+
continue
|
| 282 |
+
|
| 283 |
+
if not font_loaded:
|
| 284 |
+
raise OSError('No preferred fonts found')
|
| 285 |
+
|
| 286 |
+
except OSError:
|
| 287 |
+
regular_font = ImageFont.load_default()
|
| 288 |
+
title_font = ImageFont.load_default()
|
| 289 |
+
|
| 290 |
+
goal_font = regular_font
|
| 291 |
+
|
| 292 |
+
# Load logo if requested
|
| 293 |
+
logo = None
|
| 294 |
+
if show_logo:
|
| 295 |
+
try:
|
| 296 |
+
logo = Image.open('./static/browser-use.png')
|
| 297 |
+
# Resize logo to be small (e.g., 40px height)
|
| 298 |
+
logo_height = 150
|
| 299 |
+
aspect_ratio = logo.width / logo.height
|
| 300 |
+
logo_width = int(logo_height * aspect_ratio)
|
| 301 |
+
logo = logo.resize((logo_width, logo_height), Image.Resampling.LANCZOS)
|
| 302 |
+
except Exception as e:
|
| 303 |
+
logger.warning(f'Could not load logo: {e}')
|
| 304 |
+
|
| 305 |
+
# Create task frame if requested
|
| 306 |
+
if show_task and self.task:
|
| 307 |
+
task_frame = self._create_task_frame(
|
| 308 |
+
self.task,
|
| 309 |
+
self.history.history[0].state.screenshot,
|
| 310 |
+
title_font,
|
| 311 |
+
regular_font,
|
| 312 |
+
logo,
|
| 313 |
+
line_spacing,
|
| 314 |
+
)
|
| 315 |
+
images.append(task_frame)
|
| 316 |
+
|
| 317 |
+
# Process each history item
|
| 318 |
+
for i, item in enumerate(self.history.history, 1):
|
| 319 |
+
if not item.state.screenshot:
|
| 320 |
+
continue
|
| 321 |
+
|
| 322 |
+
# Convert base64 screenshot to PIL Image
|
| 323 |
+
img_data = base64.b64decode(item.state.screenshot)
|
| 324 |
+
image = Image.open(io.BytesIO(img_data))
|
| 325 |
+
|
| 326 |
+
if show_goals and item.model_output:
|
| 327 |
+
image = self._add_overlay_to_image(
|
| 328 |
+
image=image,
|
| 329 |
+
step_number=i,
|
| 330 |
+
goal_text=item.model_output.current_state.thought,
|
| 331 |
+
regular_font=regular_font,
|
| 332 |
+
title_font=title_font,
|
| 333 |
+
margin=margin,
|
| 334 |
+
logo=logo,
|
| 335 |
+
)
|
| 336 |
+
|
| 337 |
+
images.append(image)
|
| 338 |
+
|
| 339 |
+
if images:
|
| 340 |
+
# Save the GIF
|
| 341 |
+
images[0].save(
|
| 342 |
+
output_path,
|
| 343 |
+
save_all=True,
|
| 344 |
+
append_images=images[1:],
|
| 345 |
+
duration=duration,
|
| 346 |
+
loop=0,
|
| 347 |
+
optimize=False,
|
| 348 |
+
)
|
| 349 |
+
logger.info(f'Created GIF at {output_path}')
|
| 350 |
+
else:
|
| 351 |
+
logger.warning('No images found in history to create GIF')
|
| 352 |
+
|
| 353 |
+
async def run(self, max_steps: int = 100) -> AgentHistoryList:
|
| 354 |
+
"""Execute the task with maximum number of steps"""
|
| 355 |
+
try:
|
| 356 |
+
logger.info(f"🚀 Starting task: {self.task}")
|
| 357 |
+
|
| 358 |
+
self.telemetry.capture(
|
| 359 |
+
AgentRunTelemetryEvent(
|
| 360 |
+
agent_id=self.agent_id,
|
| 361 |
+
task=self.task,
|
| 362 |
+
)
|
| 363 |
+
)
|
| 364 |
+
|
| 365 |
+
step_info = CustomAgentStepInfo(
|
| 366 |
+
task=self.task,
|
| 367 |
+
add_infos=self.add_infos,
|
| 368 |
+
step_number=1,
|
| 369 |
+
max_steps=max_steps,
|
| 370 |
+
memory="",
|
| 371 |
+
task_progress="",
|
| 372 |
+
)
|
| 373 |
+
|
| 374 |
+
for step in range(max_steps):
|
| 375 |
+
# 1) Check if stop requested
|
| 376 |
+
if self.agent_state and self.agent_state.is_stop_requested():
|
| 377 |
+
logger.info("🛑 Stop requested by user")
|
| 378 |
+
self._create_stop_history_item()
|
| 379 |
+
break
|
| 380 |
+
|
| 381 |
+
# 2) Store last valid state before step
|
| 382 |
+
if self.browser_context and self.agent_state:
|
| 383 |
+
state = await self.browser_context.get_state(use_vision=self.use_vision)
|
| 384 |
+
self.agent_state.set_last_valid_state(state)
|
| 385 |
+
|
| 386 |
+
if self._too_many_failures():
|
| 387 |
+
break
|
| 388 |
+
|
| 389 |
+
# 3) Do the step
|
| 390 |
+
await self.step(step_info)
|
| 391 |
+
|
| 392 |
+
if self.history.is_done():
|
| 393 |
+
if (
|
| 394 |
+
self.validate_output and step < max_steps - 1
|
| 395 |
+
): # if last step, we dont need to validate
|
| 396 |
+
if not await self._validate_output():
|
| 397 |
+
continue
|
| 398 |
+
|
| 399 |
+
logger.info("✅ Task completed successfully")
|
| 400 |
+
break
|
| 401 |
+
else:
|
| 402 |
+
logger.info("❌ Failed to complete task in maximum steps")
|
| 403 |
+
|
| 404 |
+
return self.history
|
| 405 |
+
|
| 406 |
+
finally:
|
| 407 |
+
self.telemetry.capture(
|
| 408 |
+
AgentEndTelemetryEvent(
|
| 409 |
+
agent_id=self.agent_id,
|
| 410 |
+
task=self.task,
|
| 411 |
+
success=self.history.is_done(),
|
| 412 |
+
steps=len(self.history.history),
|
| 413 |
+
)
|
| 414 |
+
)
|
| 415 |
+
if not self.injected_browser_context:
|
| 416 |
+
await self.browser_context.close()
|
| 417 |
+
|
| 418 |
+
if not self.injected_browser and self.browser:
|
| 419 |
+
await self.browser.close()
|
| 420 |
+
|
| 421 |
+
if self.generate_gif:
|
| 422 |
+
self.create_history_gif()
|
| 423 |
+
|
| 424 |
+
def _create_stop_history_item(self):
|
| 425 |
+
"""Create a history item for when the agent is stopped."""
|
| 426 |
+
try:
|
| 427 |
+
# Attempt to retrieve the last valid state from agent_state
|
| 428 |
+
state = None
|
| 429 |
+
if self.agent_state:
|
| 430 |
+
last_state = self.agent_state.get_last_valid_state()
|
| 431 |
+
if last_state:
|
| 432 |
+
# Convert to BrowserStateHistory
|
| 433 |
+
state = BrowserStateHistory(
|
| 434 |
+
url=getattr(last_state, 'url', ""),
|
| 435 |
+
title=getattr(last_state, 'title', ""),
|
| 436 |
+
tabs=getattr(last_state, 'tabs', []),
|
| 437 |
+
interacted_element=[None],
|
| 438 |
+
screenshot=getattr(last_state, 'screenshot', None)
|
| 439 |
+
)
|
| 440 |
+
else:
|
| 441 |
+
state = self._create_empty_state()
|
| 442 |
+
else:
|
| 443 |
+
state = self._create_empty_state()
|
| 444 |
+
|
| 445 |
+
# Create a final item in the agent history indicating done
|
| 446 |
+
stop_history = AgentHistory(
|
| 447 |
+
model_output=None,
|
| 448 |
+
state=state,
|
| 449 |
+
result=[ActionResult(extracted_content=None, error=None, is_done=True)]
|
| 450 |
+
)
|
| 451 |
+
self.history.history.append(stop_history)
|
| 452 |
+
|
| 453 |
+
except Exception as e:
|
| 454 |
+
logger.error(f"Error creating stop history item: {e}")
|
| 455 |
+
# Create empty state as fallback
|
| 456 |
+
state = self._create_empty_state()
|
| 457 |
+
stop_history = AgentHistory(
|
| 458 |
+
model_output=None,
|
| 459 |
+
state=state,
|
| 460 |
+
result=[ActionResult(extracted_content=None, error=None, is_done=True)]
|
| 461 |
+
)
|
| 462 |
+
self.history.history.append(stop_history)
|
| 463 |
+
|
| 464 |
+
def _convert_to_browser_state_history(self, browser_state):
|
| 465 |
+
return BrowserStateHistory(
|
| 466 |
+
url=getattr(browser_state, 'url', ""),
|
| 467 |
+
title=getattr(browser_state, 'title', ""),
|
| 468 |
+
tabs=getattr(browser_state, 'tabs', []),
|
| 469 |
+
interacted_element=[None],
|
| 470 |
+
screenshot=getattr(browser_state, 'screenshot', None)
|
| 471 |
+
)
|
| 472 |
+
|
| 473 |
+
def _create_empty_state(self):
|
| 474 |
+
return BrowserStateHistory(
|
| 475 |
+
url="",
|
| 476 |
+
title="",
|
| 477 |
+
tabs=[],
|
| 478 |
+
interacted_element=[None],
|
| 479 |
+
screenshot=None
|
| 480 |
+
)
|
src/agent/custom_massage_manager.py
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
# @Time : 2025/1/2
|
| 3 |
+
# @Author : wenshao
|
| 4 |
+
# @ProjectName: browser-use-webui
|
| 5 |
+
# @FileName: custom_massage_manager.py
|
| 6 |
+
|
| 7 |
+
from __future__ import annotations
|
| 8 |
+
|
| 9 |
+
import logging
|
| 10 |
+
from typing import List, Optional, Type
|
| 11 |
+
|
| 12 |
+
from browser_use.agent.message_manager.service import MessageManager
|
| 13 |
+
from browser_use.agent.message_manager.views import MessageHistory
|
| 14 |
+
from browser_use.agent.prompts import SystemPrompt
|
| 15 |
+
from browser_use.agent.views import ActionResult, AgentStepInfo
|
| 16 |
+
from browser_use.browser.views import BrowserState
|
| 17 |
+
from langchain_core.language_models import BaseChatModel
|
| 18 |
+
from langchain_core.messages import (
|
| 19 |
+
HumanMessage,
|
| 20 |
+
AIMessage
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
from .custom_prompts import CustomAgentMessagePrompt
|
| 24 |
+
|
| 25 |
+
logger = logging.getLogger(__name__)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class CustomMassageManager(MessageManager):
|
| 29 |
+
def __init__(
|
| 30 |
+
self,
|
| 31 |
+
llm: BaseChatModel,
|
| 32 |
+
task: str,
|
| 33 |
+
action_descriptions: str,
|
| 34 |
+
system_prompt_class: Type[SystemPrompt],
|
| 35 |
+
max_input_tokens: int = 128000,
|
| 36 |
+
estimated_tokens_per_character: int = 3,
|
| 37 |
+
image_tokens: int = 800,
|
| 38 |
+
include_attributes: list[str] = [],
|
| 39 |
+
max_error_length: int = 400,
|
| 40 |
+
max_actions_per_step: int = 10,
|
| 41 |
+
tool_call_in_content: bool = False,
|
| 42 |
+
):
|
| 43 |
+
super().__init__(
|
| 44 |
+
llm=llm,
|
| 45 |
+
task=task,
|
| 46 |
+
action_descriptions=action_descriptions,
|
| 47 |
+
system_prompt_class=system_prompt_class,
|
| 48 |
+
max_input_tokens=max_input_tokens,
|
| 49 |
+
estimated_tokens_per_character=estimated_tokens_per_character,
|
| 50 |
+
image_tokens=image_tokens,
|
| 51 |
+
include_attributes=include_attributes,
|
| 52 |
+
max_error_length=max_error_length,
|
| 53 |
+
max_actions_per_step=max_actions_per_step,
|
| 54 |
+
tool_call_in_content=tool_call_in_content,
|
| 55 |
+
)
|
| 56 |
+
|
| 57 |
+
# Custom: Move Task info to state_message
|
| 58 |
+
self.history = MessageHistory()
|
| 59 |
+
self._add_message_with_tokens(self.system_prompt)
|
| 60 |
+
tool_calls = [
|
| 61 |
+
{
|
| 62 |
+
'name': 'CustomAgentOutput',
|
| 63 |
+
'args': {
|
| 64 |
+
'current_state': {
|
| 65 |
+
'prev_action_evaluation': 'Unknown - No previous actions to evaluate.',
|
| 66 |
+
'important_contents': '',
|
| 67 |
+
'completed_contents': '',
|
| 68 |
+
'thought': 'Now Google is open. Need to type OpenAI to search.',
|
| 69 |
+
'summary': 'Type OpenAI to search.',
|
| 70 |
+
},
|
| 71 |
+
'action': [],
|
| 72 |
+
},
|
| 73 |
+
'id': '',
|
| 74 |
+
'type': 'tool_call',
|
| 75 |
+
}
|
| 76 |
+
]
|
| 77 |
+
if self.tool_call_in_content:
|
| 78 |
+
# openai throws error if tool_calls are not responded -> move to content
|
| 79 |
+
example_tool_call = AIMessage(
|
| 80 |
+
content=f'{tool_calls}',
|
| 81 |
+
tool_calls=[],
|
| 82 |
+
)
|
| 83 |
+
else:
|
| 84 |
+
example_tool_call = AIMessage(
|
| 85 |
+
content=f'',
|
| 86 |
+
tool_calls=tool_calls,
|
| 87 |
+
)
|
| 88 |
+
|
| 89 |
+
self._add_message_with_tokens(example_tool_call)
|
| 90 |
+
|
| 91 |
+
def add_state_message(
|
| 92 |
+
self,
|
| 93 |
+
state: BrowserState,
|
| 94 |
+
result: Optional[List[ActionResult]] = None,
|
| 95 |
+
step_info: Optional[AgentStepInfo] = None,
|
| 96 |
+
) -> None:
|
| 97 |
+
"""Add browser state as human message"""
|
| 98 |
+
|
| 99 |
+
# if keep in memory, add to directly to history and add state without result
|
| 100 |
+
if result:
|
| 101 |
+
for r in result:
|
| 102 |
+
if r.include_in_memory:
|
| 103 |
+
if r.extracted_content:
|
| 104 |
+
msg = HumanMessage(content=str(r.extracted_content))
|
| 105 |
+
self._add_message_with_tokens(msg)
|
| 106 |
+
if r.error:
|
| 107 |
+
msg = HumanMessage(
|
| 108 |
+
content=str(r.error)[-self.max_error_length:]
|
| 109 |
+
)
|
| 110 |
+
self._add_message_with_tokens(msg)
|
| 111 |
+
result = None # if result in history, we dont want to add it again
|
| 112 |
+
|
| 113 |
+
# otherwise add state message and result to next message (which will not stay in memory)
|
| 114 |
+
state_message = CustomAgentMessagePrompt(
|
| 115 |
+
state,
|
| 116 |
+
result,
|
| 117 |
+
include_attributes=self.include_attributes,
|
| 118 |
+
max_error_length=self.max_error_length,
|
| 119 |
+
step_info=step_info,
|
| 120 |
+
).get_user_message()
|
| 121 |
+
self._add_message_with_tokens(state_message)
|
src/agent/custom_prompts.py
ADDED
|
@@ -0,0 +1,205 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
# @Time : 2025/1/2
|
| 3 |
+
# @Author : wenshao
|
| 4 |
+
# @ProjectName: browser-use-webui
|
| 5 |
+
# @FileName: custom_prompts.py
|
| 6 |
+
|
| 7 |
+
from typing import List, Optional
|
| 8 |
+
|
| 9 |
+
from browser_use.agent.prompts import SystemPrompt
|
| 10 |
+
from browser_use.agent.views import ActionResult
|
| 11 |
+
from browser_use.browser.views import BrowserState
|
| 12 |
+
from langchain_core.messages import HumanMessage, SystemMessage
|
| 13 |
+
|
| 14 |
+
from .custom_views import CustomAgentStepInfo
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class CustomSystemPrompt(SystemPrompt):
|
| 18 |
+
def important_rules(self) -> str:
|
| 19 |
+
"""
|
| 20 |
+
Returns the important rules for the agent.
|
| 21 |
+
"""
|
| 22 |
+
text = """
|
| 23 |
+
1. RESPONSE FORMAT: You must ALWAYS respond with valid JSON in this exact format:
|
| 24 |
+
{
|
| 25 |
+
"current_state": {
|
| 26 |
+
"prev_action_evaluation": "Success|Failed|Unknown - Analyze the current elements and the image to check if the previous goals/actions are successful like intended by the task. Ignore the action result. The website is the ground truth. Also mention if something unexpected happened like new suggestions in an input field. Shortly state why/why not. Note that the result you output must be consistent with the reasoning you output afterwards. If you consider it to be 'Failed,' you should reflect on this during your thought.",
|
| 27 |
+
"important_contents": "Output important contents closely related to user\'s instruction or task on the current page. If there is, please output the contents. If not, please output empty string ''.",
|
| 28 |
+
"completed_contents": "Update the input Task Progress. Completed contents is a general summary of the current contents that have been completed. Just summarize the contents that have been actually completed based on the current page and the history operations. Please list each completed item individually, such as: 1. Input username. 2. Input Password. 3. Click confirm button",
|
| 29 |
+
"thought": "Think about the requirements that have been completed in previous operations and the requirements that need to be completed in the next one operation. If the output of prev_action_evaluation is 'Failed', please reflect and output your reflection here. If you think you have entered the wrong page, consider to go back to the previous page in next action.",
|
| 30 |
+
"summary": "Please generate a brief natural language description for the operation in next actions based on your Thought."
|
| 31 |
+
},
|
| 32 |
+
"action": [
|
| 33 |
+
{
|
| 34 |
+
"action_name": {
|
| 35 |
+
// action-specific parameters
|
| 36 |
+
}
|
| 37 |
+
},
|
| 38 |
+
// ... more actions in sequence
|
| 39 |
+
]
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
2. ACTIONS: You can specify multiple actions to be executed in sequence.
|
| 43 |
+
|
| 44 |
+
Common action sequences:
|
| 45 |
+
- Form filling: [
|
| 46 |
+
{"input_text": {"index": 1, "text": "username"}},
|
| 47 |
+
{"input_text": {"index": 2, "text": "password"}},
|
| 48 |
+
{"click_element": {"index": 3}}
|
| 49 |
+
]
|
| 50 |
+
- Navigation and extraction: [
|
| 51 |
+
{"open_new_tab": {}},
|
| 52 |
+
{"go_to_url": {"url": "https://example.com"}},
|
| 53 |
+
{"extract_page_content": {}}
|
| 54 |
+
]
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
3. ELEMENT INTERACTION:
|
| 58 |
+
- Only use indexes that exist in the provided element list
|
| 59 |
+
- Each element has a unique index number (e.g., "33[:]<button>")
|
| 60 |
+
- Elements marked with "_[:]" are non-interactive (for context only)
|
| 61 |
+
|
| 62 |
+
4. NAVIGATION & ERROR HANDLING:
|
| 63 |
+
- If no suitable elements exist, use other functions to complete the task
|
| 64 |
+
- If stuck, try alternative approaches
|
| 65 |
+
- Handle popups/cookies by accepting or closing them
|
| 66 |
+
- Use scroll to find elements you are looking for
|
| 67 |
+
|
| 68 |
+
5. TASK COMPLETION:
|
| 69 |
+
- If you think all the requirements of user\'s instruction have been completed and no further operation is required, output the done action to terminate the operation process.
|
| 70 |
+
- Don't hallucinate actions.
|
| 71 |
+
- If the task requires specific information - make sure to include everything in the done function. This is what the user will see.
|
| 72 |
+
- If you are running out of steps (current step), think about speeding it up, and ALWAYS use the done action as the last action.
|
| 73 |
+
|
| 74 |
+
6. VISUAL CONTEXT:
|
| 75 |
+
- When an image is provided, use it to understand the page layout
|
| 76 |
+
- Bounding boxes with labels correspond to element indexes
|
| 77 |
+
- Each bounding box and its label have the same color
|
| 78 |
+
- Most often the label is inside the bounding box, on the top right
|
| 79 |
+
- Visual context helps verify element locations and relationships
|
| 80 |
+
- sometimes labels overlap, so use the context to verify the correct element
|
| 81 |
+
|
| 82 |
+
7. Form filling:
|
| 83 |
+
- If you fill an input field and your action sequence is interrupted, most often a list with suggestions poped up under the field and you need to first select the right element from the suggestion list.
|
| 84 |
+
|
| 85 |
+
8. ACTION SEQUENCING:
|
| 86 |
+
- Actions are executed in the order they appear in the list
|
| 87 |
+
- Each action should logically follow from the previous one
|
| 88 |
+
- If the page changes after an action, the sequence is interrupted and you get the new state.
|
| 89 |
+
- If content only disappears the sequence continues.
|
| 90 |
+
- Only provide the action sequence until you think the page will change.
|
| 91 |
+
- Try to be efficient, e.g. fill forms at once, or chain actions where nothing changes on the page like saving, extracting, checkboxes...
|
| 92 |
+
- only use multiple actions if it makes sense.
|
| 93 |
+
"""
|
| 94 |
+
text += f" - use maximum {self.max_actions_per_step} actions per sequence"
|
| 95 |
+
return text
|
| 96 |
+
|
| 97 |
+
def input_format(self) -> str:
|
| 98 |
+
return """
|
| 99 |
+
INPUT STRUCTURE:
|
| 100 |
+
1. Task: The user\'s instructions you need to complete.
|
| 101 |
+
2. Hints(Optional): Some hints to help you complete the user\'s instructions.
|
| 102 |
+
3. Memory: Important contents are recorded during historical operations for use in subsequent operations.
|
| 103 |
+
4. Task Progress: Up to the current page, the content you have completed can be understood as the progress of the task.
|
| 104 |
+
5. Current URL: The webpage you're currently on
|
| 105 |
+
6. Available Tabs: List of open browser tabs
|
| 106 |
+
7. Interactive Elements: List in the format:
|
| 107 |
+
index[:]<element_type>element_text</element_type>
|
| 108 |
+
- index: Numeric identifier for interaction
|
| 109 |
+
- element_type: HTML element type (button, input, etc.)
|
| 110 |
+
- element_text: Visible text or element description
|
| 111 |
+
|
| 112 |
+
Example:
|
| 113 |
+
33[:]<button>Submit Form</button>
|
| 114 |
+
_[:] Non-interactive text
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
Notes:
|
| 118 |
+
- Only elements with numeric indexes are interactive
|
| 119 |
+
- _[:] elements provide context but cannot be interacted with
|
| 120 |
+
"""
|
| 121 |
+
|
| 122 |
+
def get_system_message(self) -> SystemMessage:
|
| 123 |
+
"""
|
| 124 |
+
Get the system prompt for the agent.
|
| 125 |
+
|
| 126 |
+
Returns:
|
| 127 |
+
str: Formatted system prompt
|
| 128 |
+
"""
|
| 129 |
+
time_str = self.current_date.strftime("%Y-%m-%d %H:%M")
|
| 130 |
+
|
| 131 |
+
AGENT_PROMPT = f"""You are a precise browser automation agent that interacts with websites through structured commands. Your role is to:
|
| 132 |
+
1. Analyze the provided webpage elements and structure
|
| 133 |
+
2. Plan a sequence of actions to accomplish the given task
|
| 134 |
+
3. Respond with valid JSON containing your action sequence and state assessment
|
| 135 |
+
|
| 136 |
+
Current date and time: {time_str}
|
| 137 |
+
|
| 138 |
+
{self.input_format()}
|
| 139 |
+
|
| 140 |
+
{self.important_rules()}
|
| 141 |
+
|
| 142 |
+
Functions:
|
| 143 |
+
{self.default_action_description}
|
| 144 |
+
|
| 145 |
+
Remember: Your responses must be valid JSON matching the specified format. Each action in the sequence must be valid."""
|
| 146 |
+
return SystemMessage(content=AGENT_PROMPT)
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
class CustomAgentMessagePrompt:
|
| 150 |
+
def __init__(
|
| 151 |
+
self,
|
| 152 |
+
state: BrowserState,
|
| 153 |
+
result: Optional[List[ActionResult]] = None,
|
| 154 |
+
include_attributes: list[str] = [],
|
| 155 |
+
max_error_length: int = 400,
|
| 156 |
+
step_info: Optional[CustomAgentStepInfo] = None,
|
| 157 |
+
):
|
| 158 |
+
self.state = state
|
| 159 |
+
self.result = result
|
| 160 |
+
self.max_error_length = max_error_length
|
| 161 |
+
self.include_attributes = include_attributes
|
| 162 |
+
self.step_info = step_info
|
| 163 |
+
|
| 164 |
+
def get_user_message(self) -> HumanMessage:
|
| 165 |
+
state_description = f"""
|
| 166 |
+
1. Task: {self.step_info.task}
|
| 167 |
+
2. Hints(Optional):
|
| 168 |
+
{self.step_info.add_infos}
|
| 169 |
+
3. Memory:
|
| 170 |
+
{self.step_info.memory}
|
| 171 |
+
4. Task Progress:
|
| 172 |
+
{self.step_info.task_progress}
|
| 173 |
+
5. Current url: {self.state.url}
|
| 174 |
+
6. Available tabs:
|
| 175 |
+
{self.state.tabs}
|
| 176 |
+
7. Interactive elements:
|
| 177 |
+
{self.state.element_tree.clickable_elements_to_string(include_attributes=self.include_attributes)}
|
| 178 |
+
"""
|
| 179 |
+
|
| 180 |
+
if self.result:
|
| 181 |
+
for i, result in enumerate(self.result):
|
| 182 |
+
if result.extracted_content:
|
| 183 |
+
state_description += f"\nResult of action {i + 1}/{len(self.result)}: {result.extracted_content}"
|
| 184 |
+
if result.error:
|
| 185 |
+
# only use last 300 characters of error
|
| 186 |
+
error = result.error[-self.max_error_length:]
|
| 187 |
+
state_description += (
|
| 188 |
+
f"\nError of action {i + 1}/{len(self.result)}: ...{error}"
|
| 189 |
+
)
|
| 190 |
+
|
| 191 |
+
if self.state.screenshot:
|
| 192 |
+
# Format message for vision model
|
| 193 |
+
return HumanMessage(
|
| 194 |
+
content=[
|
| 195 |
+
{"type": "text", "text": state_description},
|
| 196 |
+
{
|
| 197 |
+
"type": "image_url",
|
| 198 |
+
"image_url": {
|
| 199 |
+
"url": f"data:image/png;base64,{self.state.screenshot}"
|
| 200 |
+
},
|
| 201 |
+
},
|
| 202 |
+
]
|
| 203 |
+
)
|
| 204 |
+
|
| 205 |
+
return HumanMessage(content=state_description)
|
src/agent/custom_views.py
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
# @Time : 2025/1/2
|
| 3 |
+
# @Author : wenshao
|
| 4 |
+
# @ProjectName: browser-use-webui
|
| 5 |
+
# @FileName: custom_views.py
|
| 6 |
+
|
| 7 |
+
from dataclasses import dataclass
|
| 8 |
+
from typing import Type
|
| 9 |
+
|
| 10 |
+
from browser_use.agent.views import AgentOutput
|
| 11 |
+
from browser_use.controller.registry.views import ActionModel
|
| 12 |
+
from pydantic import BaseModel, ConfigDict, Field, create_model
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
@dataclass
|
| 16 |
+
class CustomAgentStepInfo:
|
| 17 |
+
step_number: int
|
| 18 |
+
max_steps: int
|
| 19 |
+
task: str
|
| 20 |
+
add_infos: str
|
| 21 |
+
memory: str
|
| 22 |
+
task_progress: str
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class CustomAgentBrain(BaseModel):
|
| 26 |
+
"""Current state of the agent"""
|
| 27 |
+
|
| 28 |
+
prev_action_evaluation: str
|
| 29 |
+
important_contents: str
|
| 30 |
+
completed_contents: str
|
| 31 |
+
thought: str
|
| 32 |
+
summary: str
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
class CustomAgentOutput(AgentOutput):
|
| 36 |
+
"""Output model for agent
|
| 37 |
+
|
| 38 |
+
@dev note: this model is extended with custom actions in AgentService. You can also use some fields that are not in this model as provided by the linter, as long as they are registered in the DynamicActions model.
|
| 39 |
+
"""
|
| 40 |
+
|
| 41 |
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
| 42 |
+
|
| 43 |
+
current_state: CustomAgentBrain
|
| 44 |
+
action: list[ActionModel]
|
| 45 |
+
|
| 46 |
+
@staticmethod
|
| 47 |
+
def type_with_custom_actions(
|
| 48 |
+
custom_actions: Type[ActionModel],
|
| 49 |
+
) -> Type["CustomAgentOutput"]:
|
| 50 |
+
"""Extend actions with custom actions"""
|
| 51 |
+
return create_model(
|
| 52 |
+
"AgentOutput",
|
| 53 |
+
__base__=CustomAgentOutput,
|
| 54 |
+
action=(
|
| 55 |
+
list[custom_actions],
|
| 56 |
+
Field(...),
|
| 57 |
+
), # Properly annotated field with no default
|
| 58 |
+
__module__=CustomAgentOutput.__module__,
|
| 59 |
+
)
|