repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1 value | license stringclasses 7 values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2 classes |
|---|---|---|---|---|---|---|---|---|
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/tools/utils.py | browser_use/tools/utils.py | """Utility functions for browser tools."""
from browser_use.dom.service import EnhancedDOMTreeNode
def get_click_description(node: EnhancedDOMTreeNode) -> str:
"""Get a brief description of the clicked element for memory."""
parts = []
# Tag name
parts.append(node.tag_name)
# Add type for inputs
if node.tag_name == 'input' and node.attributes.get('type'):
input_type = node.attributes['type']
parts.append(f'type={input_type}')
# For checkboxes, include checked state
if input_type == 'checkbox':
is_checked = node.attributes.get('checked', 'false').lower() in ['true', 'checked', '']
# Also check AX node
if node.ax_node and node.ax_node.properties:
for prop in node.ax_node.properties:
if prop.name == 'checked':
is_checked = prop.value is True or prop.value == 'true'
break
state = 'checked' if is_checked else 'unchecked'
parts.append(f'checkbox-state={state}')
# Add role if present
if node.attributes.get('role'):
role = node.attributes['role']
parts.append(f'role={role}')
# For role=checkbox, include state
if role == 'checkbox':
aria_checked = node.attributes.get('aria-checked', 'false').lower()
is_checked = aria_checked in ['true', 'checked']
if node.ax_node and node.ax_node.properties:
for prop in node.ax_node.properties:
if prop.name == 'checked':
is_checked = prop.value is True or prop.value == 'true'
break
state = 'checked' if is_checked else 'unchecked'
parts.append(f'checkbox-state={state}')
# For labels/spans/divs, check if related to a hidden checkbox
if node.tag_name in ['label', 'span', 'div'] and 'type=' not in ' '.join(parts):
# Check children for hidden checkbox
for child in node.children:
if child.tag_name == 'input' and child.attributes.get('type') == 'checkbox':
# Check if hidden
is_hidden = False
if child.snapshot_node and child.snapshot_node.computed_styles:
opacity = child.snapshot_node.computed_styles.get('opacity', '1')
if opacity == '0' or opacity == '0.0':
is_hidden = True
if is_hidden or not child.is_visible:
# Get checkbox state
is_checked = child.attributes.get('checked', 'false').lower() in ['true', 'checked', '']
if child.ax_node and child.ax_node.properties:
for prop in child.ax_node.properties:
if prop.name == 'checked':
is_checked = prop.value is True or prop.value == 'true'
break
state = 'checked' if is_checked else 'unchecked'
parts.append(f'checkbox-state={state}')
break
# Add short text content if available
text = node.get_all_children_text().strip()
if text:
short_text = text[:30] + ('...' if len(text) > 30 else '')
parts.append(f'"{short_text}"')
# Add key attributes like id, name, aria-label
for attr in ['id', 'name', 'aria-label']:
if node.attributes.get(attr):
parts.append(f'{attr}={node.attributes[attr][:20]}')
return ' '.join(parts)
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/tools/registry/views.py | browser_use/tools/registry/views.py | from collections.abc import Callable
from typing import TYPE_CHECKING, Any
from pydantic import BaseModel, ConfigDict
from browser_use.browser import BrowserSession
from browser_use.filesystem.file_system import FileSystem
from browser_use.llm.base import BaseChatModel
if TYPE_CHECKING:
pass
class RegisteredAction(BaseModel):
"""Model for a registered action"""
name: str
description: str
function: Callable
param_model: type[BaseModel]
# filters: provide specific domains to determine whether the action should be available on the given URL or not
domains: list[str] | None = None # e.g. ['*.google.com', 'www.bing.com', 'yahoo.*]
model_config = ConfigDict(arbitrary_types_allowed=True)
def prompt_description(self) -> str:
"""Get a description of the action for the prompt in unstructured format"""
schema = self.param_model.model_json_schema()
params = []
if 'properties' in schema:
for param_name, param_info in schema['properties'].items():
# Build parameter description
param_desc = param_name
# Add type information if available
if 'type' in param_info:
param_type = param_info['type']
param_desc += f'={param_type}'
# Add description as comment if available
if 'description' in param_info:
param_desc += f' ({param_info["description"]})'
params.append(param_desc)
# Format: action_name: Description. (param1=type, param2=type, ...)
if params:
return f'{self.name}: {self.description}. ({", ".join(params)})'
else:
return f'{self.name}: {self.description}'
class ActionModel(BaseModel):
"""Base model for dynamically created action models"""
# this will have all the registered actions, e.g.
# click_element = param_model = ClickElementParams
# done = param_model = None
#
model_config = ConfigDict(arbitrary_types_allowed=True, extra='forbid')
def get_index(self) -> int | None:
"""Get the index of the action"""
# {'clicked_element': {'index':5}}
params = self.model_dump(exclude_unset=True).values()
if not params:
return None
for param in params:
if param is not None and 'index' in param:
return param['index']
return None
def set_index(self, index: int):
"""Overwrite the index of the action"""
# Get the action name and params
action_data = self.model_dump(exclude_unset=True)
action_name = next(iter(action_data.keys()))
action_params = getattr(self, action_name)
# Update the index directly on the model
if hasattr(action_params, 'index'):
action_params.index = index
class ActionRegistry(BaseModel):
"""Model representing the action registry"""
actions: dict[str, RegisteredAction] = {}
@staticmethod
def _match_domains(domains: list[str] | None, url: str) -> bool:
"""
Match a list of domain glob patterns against a URL.
Args:
domains: A list of domain patterns that can include glob patterns (* wildcard)
url: The URL to match against
Returns:
True if the URL's domain matches the pattern, False otherwise
"""
if domains is None or not url:
return True
# Use the centralized URL matching logic from utils
from browser_use.utils import match_url_with_domain_pattern
for domain_pattern in domains:
if match_url_with_domain_pattern(url, domain_pattern):
return True
return False
def get_prompt_description(self, page_url: str | None = None) -> str:
"""Get a description of all actions for the prompt
Args:
page_url: If provided, filter actions by URL using domain filters.
Returns:
A string description of available actions.
- If page is None: return only actions with no page_filter and no domains (for system prompt)
- If page is provided: return only filtered actions that match the current page (excluding unfiltered actions)
"""
if page_url is None:
# For system prompt (no URL provided), include only actions with no filters
return '\n'.join(action.prompt_description() for action in self.actions.values() if action.domains is None)
# only include filtered actions for the current page URL
filtered_actions = []
for action in self.actions.values():
if not action.domains:
# skip actions with no filters, they are already included in the system prompt
continue
# Check domain filter
if self._match_domains(action.domains, page_url):
filtered_actions.append(action)
return '\n'.join(action.prompt_description() for action in filtered_actions)
class SpecialActionParameters(BaseModel):
"""Model defining all special parameters that can be injected into actions"""
model_config = ConfigDict(arbitrary_types_allowed=True)
# optional user-provided context object passed down from Agent(context=...)
# e.g. can contain anything, external db connections, file handles, queues, runtime config objects, etc.
# that you might want to be able to access quickly from within many of your actions
# browser-use code doesn't use this at all, we just pass it down to your actions for convenience
context: Any | None = None
# browser-use session object, can be used to create new tabs, navigate, access CDP
browser_session: BrowserSession | None = None
# Current page URL for filtering and context
page_url: str | None = None
# CDP client for direct Chrome DevTools Protocol access
cdp_client: Any | None = None # CDPClient type from cdp_use
# extra injected config if the action asks for these arg names
page_extraction_llm: BaseChatModel | None = None
file_system: FileSystem | None = None
available_file_paths: list[str] | None = None
has_sensitive_data: bool = False
@classmethod
def get_browser_requiring_params(cls) -> set[str]:
"""Get parameter names that require browser_session"""
return {'browser_session', 'cdp_client', 'page_url'}
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/tools/registry/service.py | browser_use/tools/registry/service.py | import asyncio
import functools
import inspect
import logging
import re
from collections.abc import Callable
from inspect import Parameter, iscoroutinefunction, signature
from types import UnionType
from typing import Any, Generic, Optional, TypeVar, Union, get_args, get_origin
import pyotp
from pydantic import BaseModel, Field, RootModel, create_model
from browser_use.browser import BrowserSession
from browser_use.filesystem.file_system import FileSystem
from browser_use.llm.base import BaseChatModel
from browser_use.observability import observe_debug
from browser_use.telemetry.service import ProductTelemetry
from browser_use.tools.registry.views import (
ActionModel,
ActionRegistry,
RegisteredAction,
SpecialActionParameters,
)
from browser_use.utils import is_new_tab_page, match_url_with_domain_pattern, time_execution_async
Context = TypeVar('Context')
logger = logging.getLogger(__name__)
class Registry(Generic[Context]):
"""Service for registering and managing actions"""
def __init__(self, exclude_actions: list[str] | None = None):
self.registry = ActionRegistry()
self.telemetry = ProductTelemetry()
# Create a new list to avoid mutable default argument issues
self.exclude_actions = list(exclude_actions) if exclude_actions is not None else []
def exclude_action(self, action_name: str) -> None:
"""Exclude an action from the registry after initialization.
If the action is already registered, it will be removed from the registry.
The action is also added to the exclude_actions list to prevent re-registration.
"""
# Add to exclude list to prevent future registration
if action_name not in self.exclude_actions:
self.exclude_actions.append(action_name)
# Remove from registry if already registered
if action_name in self.registry.actions:
del self.registry.actions[action_name]
logger.debug(f'Excluded action "{action_name}" from registry')
def _get_special_param_types(self) -> dict[str, type | UnionType | None]:
"""Get the expected types for special parameters from SpecialActionParameters"""
# Manually define the expected types to avoid issues with Optional handling.
# we should try to reduce this list to 0 if possible, give as few standardized objects to all the actions
# but each driver should decide what is relevant to expose the action methods,
# e.g. CDP client, 2fa code getters, sensitive_data wrappers, other context, etc.
return {
'context': None, # Context is a TypeVar, so we can't validate type
'browser_session': BrowserSession,
'page_url': str,
'cdp_client': None, # CDPClient type from cdp_use, but we don't import it here
'page_extraction_llm': BaseChatModel,
'available_file_paths': list,
'has_sensitive_data': bool,
'file_system': FileSystem,
}
def _normalize_action_function_signature(
self,
func: Callable,
description: str,
param_model: type[BaseModel] | None = None,
) -> tuple[Callable, type[BaseModel]]:
"""
Normalize action function to accept only kwargs.
Returns:
- Normalized function that accepts (*_, params: ParamModel, **special_params)
- The param model to use for registration
"""
sig = signature(func)
parameters = list(sig.parameters.values())
special_param_types = self._get_special_param_types()
special_param_names = set(special_param_types.keys())
# Step 1: Validate no **kwargs in original function signature
# if it needs default values it must use a dedicated param_model: BaseModel instead
for param in parameters:
if param.kind == Parameter.VAR_KEYWORD:
raise ValueError(
f"Action '{func.__name__}' has **{param.name} which is not allowed. "
f'Actions must have explicit positional parameters only.'
)
# Step 2: Separate special and action parameters
action_params = []
special_params = []
param_model_provided = param_model is not None
for i, param in enumerate(parameters):
# Check if this is a Type 1 pattern (first param is BaseModel)
if i == 0 and param_model_provided and param.name not in special_param_names:
# This is Type 1 pattern - skip the params argument
continue
if param.name in special_param_names:
# Validate special parameter type
expected_type = special_param_types.get(param.name)
if param.annotation != Parameter.empty and expected_type is not None:
# Handle Optional types - normalize both sides
param_type = param.annotation
origin = get_origin(param_type)
if origin is Union:
args = get_args(param_type)
# Find non-None type
param_type = next((arg for arg in args if arg is not type(None)), param_type)
# Check if types are compatible (exact match, subclass, or generic list)
types_compatible = (
param_type == expected_type
or (
inspect.isclass(param_type)
and inspect.isclass(expected_type)
and issubclass(param_type, expected_type)
)
or
# Handle list[T] vs list comparison
(expected_type is list and (param_type is list or get_origin(param_type) is list))
)
if not types_compatible:
expected_type_name = getattr(expected_type, '__name__', str(expected_type))
param_type_name = getattr(param_type, '__name__', str(param_type))
raise ValueError(
f"Action '{func.__name__}' parameter '{param.name}: {param_type_name}' "
f"conflicts with special argument injected by tools: '{param.name}: {expected_type_name}'"
)
special_params.append(param)
else:
action_params.append(param)
# Step 3: Create or validate param model
if not param_model_provided:
# Type 2: Generate param model from action params
if action_params:
params_dict = {}
for param in action_params:
annotation = param.annotation if param.annotation != Parameter.empty else str
default = ... if param.default == Parameter.empty else param.default
params_dict[param.name] = (annotation, default)
param_model = create_model(f'{func.__name__}_Params', __base__=ActionModel, **params_dict)
else:
# No action params, create empty model
param_model = create_model(
f'{func.__name__}_Params',
__base__=ActionModel,
)
assert param_model is not None, f'param_model is None for {func.__name__}'
# Step 4: Create normalized wrapper function
@functools.wraps(func)
async def normalized_wrapper(*args, params: BaseModel | None = None, **kwargs):
"""Normalized action that only accepts kwargs"""
# Validate no positional args
if args:
raise TypeError(f'{func.__name__}() does not accept positional arguments, only keyword arguments are allowed')
# Prepare arguments for original function
call_args = []
call_kwargs = {}
# Handle Type 1 pattern (first arg is the param model)
if param_model_provided and parameters and parameters[0].name not in special_param_names:
if params is None:
raise ValueError(f"{func.__name__}() missing required 'params' argument")
# For Type 1, we'll use the params object as first argument
pass
else:
# Type 2 pattern - need to unpack params
# If params is None, try to create it from kwargs
if params is None and action_params:
# Extract action params from kwargs
action_kwargs = {}
for param in action_params:
if param.name in kwargs:
action_kwargs[param.name] = kwargs[param.name]
if action_kwargs:
# Use the param_model which has the correct types defined
params = param_model(**action_kwargs)
# Build call_args by iterating through original function parameters in order
params_dict = params.model_dump() if params is not None else {}
for i, param in enumerate(parameters):
# Skip first param for Type 1 pattern (it's the model itself)
if param_model_provided and i == 0 and param.name not in special_param_names:
call_args.append(params)
elif param.name in special_param_names:
# This is a special parameter
if param.name in kwargs:
value = kwargs[param.name]
# Check if required special param is None
if value is None and param.default == Parameter.empty:
if param.name == 'browser_session':
raise ValueError(f'Action {func.__name__} requires browser_session but none provided.')
elif param.name == 'page_extraction_llm':
raise ValueError(f'Action {func.__name__} requires page_extraction_llm but none provided.')
elif param.name == 'file_system':
raise ValueError(f'Action {func.__name__} requires file_system but none provided.')
elif param.name == 'page':
raise ValueError(f'Action {func.__name__} requires page but none provided.')
elif param.name == 'available_file_paths':
raise ValueError(f'Action {func.__name__} requires available_file_paths but none provided.')
elif param.name == 'file_system':
raise ValueError(f'Action {func.__name__} requires file_system but none provided.')
else:
raise ValueError(f"{func.__name__}() missing required special parameter '{param.name}'")
call_args.append(value)
elif param.default != Parameter.empty:
call_args.append(param.default)
else:
# Special param is required but not provided
if param.name == 'browser_session':
raise ValueError(f'Action {func.__name__} requires browser_session but none provided.')
elif param.name == 'page_extraction_llm':
raise ValueError(f'Action {func.__name__} requires page_extraction_llm but none provided.')
elif param.name == 'file_system':
raise ValueError(f'Action {func.__name__} requires file_system but none provided.')
elif param.name == 'page':
raise ValueError(f'Action {func.__name__} requires page but none provided.')
elif param.name == 'available_file_paths':
raise ValueError(f'Action {func.__name__} requires available_file_paths but none provided.')
elif param.name == 'file_system':
raise ValueError(f'Action {func.__name__} requires file_system but none provided.')
else:
raise ValueError(f"{func.__name__}() missing required special parameter '{param.name}'")
else:
# This is an action parameter
if param.name in params_dict:
call_args.append(params_dict[param.name])
elif param.default != Parameter.empty:
call_args.append(param.default)
else:
raise ValueError(f"{func.__name__}() missing required parameter '{param.name}'")
# Call original function with positional args
if iscoroutinefunction(func):
return await func(*call_args)
else:
return await asyncio.to_thread(func, *call_args)
# Update wrapper signature to be kwargs-only
new_params = [Parameter('params', Parameter.KEYWORD_ONLY, default=None, annotation=Optional[param_model])]
# Add special params as keyword-only
for sp in special_params:
new_params.append(Parameter(sp.name, Parameter.KEYWORD_ONLY, default=sp.default, annotation=sp.annotation))
# Add **kwargs to accept and ignore extra params
new_params.append(Parameter('kwargs', Parameter.VAR_KEYWORD))
normalized_wrapper.__signature__ = sig.replace(parameters=new_params) # type: ignore[attr-defined]
return normalized_wrapper, param_model
# @time_execution_sync('--create_param_model')
def _create_param_model(self, function: Callable) -> type[BaseModel]:
"""Creates a Pydantic model from function signature"""
sig = signature(function)
special_param_names = set(SpecialActionParameters.model_fields.keys())
params = {
name: (param.annotation, ... if param.default == param.empty else param.default)
for name, param in sig.parameters.items()
if name not in special_param_names
}
# TODO: make the types here work
return create_model(
f'{function.__name__}_parameters',
__base__=ActionModel,
**params, # type: ignore
)
def action(
self,
description: str,
param_model: type[BaseModel] | None = None,
domains: list[str] | None = None,
allowed_domains: list[str] | None = None,
):
"""Decorator for registering actions"""
# Handle aliases: domains and allowed_domains are the same parameter
if allowed_domains is not None and domains is not None:
raise ValueError("Cannot specify both 'domains' and 'allowed_domains' - they are aliases for the same parameter")
final_domains = allowed_domains if allowed_domains is not None else domains
def decorator(func: Callable):
# Skip registration if action is in exclude_actions
if func.__name__ in self.exclude_actions:
return func
# Normalize the function signature
normalized_func, actual_param_model = self._normalize_action_function_signature(func, description, param_model)
action = RegisteredAction(
name=func.__name__,
description=description,
function=normalized_func,
param_model=actual_param_model,
domains=final_domains,
)
self.registry.actions[func.__name__] = action
# Return the normalized function so it can be called with kwargs
return normalized_func
return decorator
@observe_debug(ignore_input=True, ignore_output=True, name='execute_action')
@time_execution_async('--execute_action')
async def execute_action(
self,
action_name: str,
params: dict,
browser_session: BrowserSession | None = None,
page_extraction_llm: BaseChatModel | None = None,
file_system: FileSystem | None = None,
sensitive_data: dict[str, str | dict[str, str]] | None = None,
available_file_paths: list[str] | None = None,
) -> Any:
"""Execute a registered action with simplified parameter handling"""
if action_name not in self.registry.actions:
raise ValueError(f'Action {action_name} not found')
action = self.registry.actions[action_name]
try:
# Create the validated Pydantic model
try:
validated_params = action.param_model(**params)
except Exception as e:
raise ValueError(f'Invalid parameters {params} for action {action_name}: {type(e)}: {e}') from e
if sensitive_data:
# Get current URL if browser_session is provided
current_url = None
if browser_session and browser_session.agent_focus_target_id:
try:
# Get current page info from session_manager
target = browser_session.session_manager.get_target(browser_session.agent_focus_target_id)
if target:
current_url = target.url
except Exception:
pass
validated_params = self._replace_sensitive_data(validated_params, sensitive_data, current_url)
# Build special context dict
special_context = {
'browser_session': browser_session,
'page_extraction_llm': page_extraction_llm,
'available_file_paths': available_file_paths,
'has_sensitive_data': action_name == 'input' and bool(sensitive_data),
'file_system': file_system,
}
# Only pass sensitive_data to actions that explicitly need it (input)
if action_name == 'input':
special_context['sensitive_data'] = sensitive_data
# Add CDP-related parameters if browser_session is available
if browser_session:
# Add page_url
try:
special_context['page_url'] = await browser_session.get_current_page_url()
except Exception:
special_context['page_url'] = None
# Add cdp_client
special_context['cdp_client'] = browser_session.cdp_client
# All functions are now normalized to accept kwargs only
# Call with params and unpacked special context
try:
return await action.function(params=validated_params, **special_context)
except Exception as e:
raise
except ValueError as e:
# Preserve ValueError messages from validation
if 'requires browser_session but none provided' in str(e) or 'requires page_extraction_llm but none provided' in str(
e
):
raise RuntimeError(str(e)) from e
else:
raise RuntimeError(f'Error executing action {action_name}: {str(e)}') from e
except TimeoutError as e:
raise RuntimeError(f'Error executing action {action_name} due to timeout.') from e
except Exception as e:
raise RuntimeError(f'Error executing action {action_name}: {str(e)}') from e
def _log_sensitive_data_usage(self, placeholders_used: set[str], current_url: str | None) -> None:
"""Log when sensitive data is being used on a page"""
if placeholders_used:
url_info = f' on {current_url}' if current_url and not is_new_tab_page(current_url) else ''
logger.info(f'๐ Using sensitive data placeholders: {", ".join(sorted(placeholders_used))}{url_info}')
def _replace_sensitive_data(
self, params: BaseModel, sensitive_data: dict[str, Any], current_url: str | None = None
) -> BaseModel:
"""
Replaces sensitive data placeholders in params with actual values.
Args:
params: The parameter object containing <secret>placeholder</secret> tags
sensitive_data: Dictionary of sensitive data, either in old format {key: value}
or new format {domain_pattern: {key: value}}
current_url: Optional current URL for domain matching
Returns:
BaseModel: The parameter object with placeholders replaced by actual values
"""
secret_pattern = re.compile(r'<secret>(.*?)</secret>')
# Set to track all missing placeholders across the full object
all_missing_placeholders = set()
# Set to track successfully replaced placeholders
replaced_placeholders = set()
# Process sensitive data based on format and current URL
applicable_secrets = {}
for domain_or_key, content in sensitive_data.items():
if isinstance(content, dict):
# New format: {domain_pattern: {key: value}}
# Only include secrets for domains that match the current URL
if current_url and not is_new_tab_page(current_url):
# it's a real url, check it using our custom allowed_domains scheme://*.example.com glob matching
if match_url_with_domain_pattern(current_url, domain_or_key):
applicable_secrets.update(content)
else:
# Old format: {key: value}, expose to all domains (only allowed for legacy reasons)
applicable_secrets[domain_or_key] = content
# Filter out empty values
applicable_secrets = {k: v for k, v in applicable_secrets.items() if v}
def recursively_replace_secrets(value: str | dict | list) -> str | dict | list:
if isinstance(value, str):
matches = secret_pattern.findall(value)
# check if the placeholder key, like x_password is in the output parameters of the LLM and replace it with the sensitive data
for placeholder in matches:
if placeholder in applicable_secrets:
# generate a totp code if secret is a 2fa secret
if 'bu_2fa_code' in placeholder:
totp = pyotp.TOTP(applicable_secrets[placeholder], digits=6)
replacement_value = totp.now()
else:
replacement_value = applicable_secrets[placeholder]
value = value.replace(f'<secret>{placeholder}</secret>', replacement_value)
replaced_placeholders.add(placeholder)
else:
# Keep track of missing placeholders
all_missing_placeholders.add(placeholder)
# Don't replace the tag, keep it as is
return value
elif isinstance(value, dict):
return {k: recursively_replace_secrets(v) for k, v in value.items()}
elif isinstance(value, list):
return [recursively_replace_secrets(v) for v in value]
return value
params_dump = params.model_dump()
processed_params = recursively_replace_secrets(params_dump)
# Log sensitive data usage
self._log_sensitive_data_usage(replaced_placeholders, current_url)
# Log a warning if any placeholders are missing
if all_missing_placeholders:
logger.warning(f'Missing or empty keys in sensitive_data dictionary: {", ".join(all_missing_placeholders)}')
return type(params).model_validate(processed_params)
# @time_execution_sync('--create_action_model')
def create_action_model(self, include_actions: list[str] | None = None, page_url: str | None = None) -> type[ActionModel]:
"""Creates a Union of individual action models from registered actions,
used by LLM APIs that support tool calling & enforce a schema.
Each action model contains only the specific action being used,
rather than all actions with most set to None.
"""
from typing import Union
# Filter actions based on page_url if provided:
# if page_url is None, only include actions with no filters
# if page_url is provided, only include actions that match the URL
available_actions: dict[str, RegisteredAction] = {}
for name, action in self.registry.actions.items():
if include_actions is not None and name not in include_actions:
continue
# If no page_url provided, only include actions with no filters
if page_url is None:
if action.domains is None:
available_actions[name] = action
continue
# Check domain filter if present
domain_is_allowed = self.registry._match_domains(action.domains, page_url)
# Include action if domain filter matches
if domain_is_allowed:
available_actions[name] = action
# Create individual action models for each action
individual_action_models: list[type[BaseModel]] = []
for name, action in available_actions.items():
# Create an individual model for each action that contains only one field
individual_model = create_model(
f'{name.title().replace("_", "")}ActionModel',
__base__=ActionModel,
**{
name: (
action.param_model,
Field(description=action.description),
) # type: ignore
},
)
individual_action_models.append(individual_model)
# If no actions available, return empty ActionModel
if not individual_action_models:
return create_model('EmptyActionModel', __base__=ActionModel)
# Create proper Union type that maintains ActionModel interface
if len(individual_action_models) == 1:
# If only one action, return it directly (no Union needed)
result_model = individual_action_models[0]
# Meaning the length is more than 1
else:
# Create a Union type using RootModel that properly delegates ActionModel methods
union_type = Union[tuple(individual_action_models)] # type: ignore : Typing doesn't understand that the length is >= 2 (by design)
class ActionModelUnion(RootModel[union_type]): # type: ignore
def get_index(self) -> int | None:
"""Delegate get_index to the underlying action model"""
if hasattr(self.root, 'get_index'):
return self.root.get_index() # type: ignore
return None
def set_index(self, index: int):
"""Delegate set_index to the underlying action model"""
if hasattr(self.root, 'set_index'):
self.root.set_index(index) # type: ignore
def model_dump(self, **kwargs):
"""Delegate model_dump to the underlying action model"""
if hasattr(self.root, 'model_dump'):
return self.root.model_dump(**kwargs) # type: ignore
return super().model_dump(**kwargs)
# Set the name for better debugging
ActionModelUnion.__name__ = 'ActionModel'
ActionModelUnion.__qualname__ = 'ActionModel'
result_model = ActionModelUnion
return result_model # type:ignore
def get_prompt_description(self, page_url: str | None = None) -> str:
"""Get a description of all actions for the prompt
If page_url is provided, only include actions that are available for that URL
based on their domain filters
"""
return self.registry.get_prompt_description(page_url=page_url)
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/integrations/gmail/service.py | browser_use/integrations/gmail/service.py | """
Gmail API Service for Browser Use
Handles Gmail API authentication, email reading, and 2FA code extraction.
This service provides a clean interface for agents to interact with Gmail.
"""
import base64
import logging
import os
from pathlib import Path
from typing import Any
import anyio
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from browser_use.config import CONFIG
logger = logging.getLogger(__name__)
class GmailService:
"""
Gmail API service for email reading.
Provides functionality to:
- Authenticate with Gmail API using OAuth2
- Read recent emails with filtering
- Return full email content for agent analysis
"""
# Gmail API scopes
SCOPES = ['https://www.googleapis.com/auth/gmail.readonly']
def __init__(
self,
credentials_file: str | None = None,
token_file: str | None = None,
config_dir: str | None = None,
access_token: str | None = None,
):
"""
Initialize Gmail Service
Args:
credentials_file: Path to OAuth credentials JSON from Google Cloud Console
token_file: Path to store/load access tokens
config_dir: Directory to store config files (defaults to browser-use config directory)
access_token: Direct access token (skips file-based auth if provided)
"""
# Set up configuration directory using browser-use's config system
if config_dir is None:
self.config_dir = CONFIG.BROWSER_USE_CONFIG_DIR
else:
self.config_dir = Path(config_dir).expanduser().resolve()
# Ensure config directory exists (only if not using direct token)
if access_token is None:
self.config_dir.mkdir(parents=True, exist_ok=True)
# Set up credential paths
self.credentials_file = credentials_file or self.config_dir / 'gmail_credentials.json'
self.token_file = token_file or self.config_dir / 'gmail_token.json'
# Direct access token support
self.access_token = access_token
self.service = None
self.creds = None
self._authenticated = False
def is_authenticated(self) -> bool:
"""Check if Gmail service is authenticated"""
return self._authenticated and self.service is not None
async def authenticate(self) -> bool:
"""
Handle OAuth authentication and token management
Returns:
bool: True if authentication successful, False otherwise
"""
try:
logger.info('๐ Authenticating with Gmail API...')
# Check if using direct access token
if self.access_token:
logger.info('๐ Using provided access token')
# Create credentials from access token
self.creds = Credentials(token=self.access_token, scopes=self.SCOPES)
# Test token validity by building service
self.service = build('gmail', 'v1', credentials=self.creds)
self._authenticated = True
logger.info('โ
Gmail API ready with access token!')
return True
# Original file-based authentication flow
# Try to load existing tokens
if os.path.exists(self.token_file):
self.creds = Credentials.from_authorized_user_file(str(self.token_file), self.SCOPES)
logger.debug('๐ Loaded existing tokens')
# If no valid credentials, run OAuth flow
if not self.creds or not self.creds.valid:
if self.creds and self.creds.expired and self.creds.refresh_token:
logger.info('๐ Refreshing expired tokens...')
self.creds.refresh(Request())
else:
logger.info('๐ Starting OAuth flow...')
if not os.path.exists(self.credentials_file):
logger.error(
f'โ Gmail credentials file not found: {self.credentials_file}\n'
'Please download it from Google Cloud Console:\n'
'1. Go to https://console.cloud.google.com/\n'
'2. APIs & Services > Credentials\n'
'3. Download OAuth 2.0 Client JSON\n'
f"4. Save as 'gmail_credentials.json' in {self.config_dir}/"
)
return False
flow = InstalledAppFlow.from_client_secrets_file(str(self.credentials_file), self.SCOPES)
# Use specific redirect URI to match OAuth credentials
self.creds = flow.run_local_server(port=8080, open_browser=True)
# Save tokens for next time
await anyio.Path(self.token_file).write_text(self.creds.to_json())
logger.info(f'๐พ Tokens saved to {self.token_file}')
# Build Gmail service
self.service = build('gmail', 'v1', credentials=self.creds)
self._authenticated = True
logger.info('โ
Gmail API ready!')
return True
except Exception as e:
logger.error(f'โ Gmail authentication failed: {e}')
return False
async def get_recent_emails(self, max_results: int = 10, query: str = '', time_filter: str = '1h') -> list[dict[str, Any]]:
"""
Get recent emails with optional query filter
Args:
max_results: Maximum number of emails to fetch
query: Gmail search query (e.g., 'from:noreply@example.com')
time_filter: Time filter (e.g., '5m', '1h', '1d')
Returns:
List of email dictionaries with parsed content
"""
if not self.is_authenticated():
logger.error('โ Gmail service not authenticated. Call authenticate() first.')
return []
try:
# Add time filter to query if provided
if time_filter and 'newer_than:' not in query:
query = f'newer_than:{time_filter} {query}'.strip()
logger.info(f'๐ง Fetching {max_results} recent emails...')
if query:
logger.debug(f'๐ Query: {query}')
# Get message list
assert self.service is not None
results = self.service.users().messages().list(userId='me', maxResults=max_results, q=query).execute()
messages = results.get('messages', [])
if not messages:
logger.info('๐ญ No messages found')
return []
logger.info(f'๐จ Found {len(messages)} messages, fetching details...')
# Get full message details
emails = []
for i, message in enumerate(messages, 1):
logger.debug(f'๐ Reading email {i}/{len(messages)}...')
full_message = self.service.users().messages().get(userId='me', id=message['id'], format='full').execute()
email_data = self._parse_email(full_message)
emails.append(email_data)
return emails
except HttpError as error:
logger.error(f'โ Gmail API error: {error}')
return []
except Exception as e:
logger.error(f'โ Unexpected error fetching emails: {e}')
return []
def _parse_email(self, message: dict[str, Any]) -> dict[str, Any]:
"""Parse Gmail message into readable format"""
headers = {h['name']: h['value'] for h in message['payload']['headers']}
return {
'id': message['id'],
'thread_id': message['threadId'],
'subject': headers.get('Subject', ''),
'from': headers.get('From', ''),
'to': headers.get('To', ''),
'date': headers.get('Date', ''),
'timestamp': int(message['internalDate']),
'body': self._extract_body(message['payload']),
'raw_message': message,
}
def _extract_body(self, payload: dict[str, Any]) -> str:
"""Extract email body from payload"""
body = ''
if payload.get('body', {}).get('data'):
# Simple email body
body = base64.urlsafe_b64decode(payload['body']['data']).decode('utf-8')
elif payload.get('parts'):
# Multi-part email
for part in payload['parts']:
if part['mimeType'] == 'text/plain' and part.get('body', {}).get('data'):
part_body = base64.urlsafe_b64decode(part['body']['data']).decode('utf-8')
body += part_body
elif part['mimeType'] == 'text/html' and not body and part.get('body', {}).get('data'):
# Fallback to HTML if no plain text
body = base64.urlsafe_b64decode(part['body']['data']).decode('utf-8')
return body
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/integrations/gmail/__init__.py | browser_use/integrations/gmail/__init__.py | """
Gmail Integration for Browser Use
Provides Gmail API integration for email reading and verification code extraction.
This integration enables agents to read email content and extract verification codes themselves.
Usage:
from browser_use.integrations.gmail import GmailService, register_gmail_actions
# Option 1: Register Gmail actions with file-based authentication
tools = Tools()
register_gmail_actions(tools)
# Option 2: Register Gmail actions with direct access token (recommended for production)
tools = Tools()
register_gmail_actions(tools, access_token="your_access_token_here")
# Option 3: Use the service directly
gmail = GmailService(access_token="your_access_token_here")
await gmail.authenticate()
emails = await gmail.get_recent_emails()
"""
# @file purpose: Gmail integration for 2FA email authentication and email reading
from .actions import register_gmail_actions
from .service import GmailService
__all__ = ['GmailService', 'register_gmail_actions']
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/integrations/gmail/actions.py | browser_use/integrations/gmail/actions.py | """
Gmail Actions for Browser Use
Defines agent actions for Gmail integration including 2FA code retrieval,
email reading, and authentication management.
"""
import logging
from pydantic import BaseModel, Field
from browser_use.agent.views import ActionResult
from browser_use.tools.service import Tools
from .service import GmailService
logger = logging.getLogger(__name__)
# Global Gmail service instance - initialized when actions are registered
_gmail_service: GmailService | None = None
class GetRecentEmailsParams(BaseModel):
"""Parameters for getting recent emails"""
keyword: str = Field(default='', description='A single keyword for search, e.g. github, airbnb, etc.')
max_results: int = Field(default=3, ge=1, le=50, description='Maximum number of emails to retrieve (1-50, default: 3)')
def register_gmail_actions(tools: Tools, gmail_service: GmailService | None = None, access_token: str | None = None) -> Tools:
"""
Register Gmail actions with the provided tools
Args:
tools: The browser-use tools to register actions with
gmail_service: Optional pre-configured Gmail service instance
access_token: Optional direct access token (alternative to file-based auth)
"""
global _gmail_service
# Use provided service or create a new one with access token if provided
if gmail_service:
_gmail_service = gmail_service
elif access_token:
_gmail_service = GmailService(access_token=access_token)
else:
_gmail_service = GmailService()
@tools.registry.action(
description='Get recent emails from the mailbox with a keyword to retrieve verification codes, OTP, 2FA tokens, magic links, or any recent email content. Keep your query a single keyword.',
param_model=GetRecentEmailsParams,
)
async def get_recent_emails(params: GetRecentEmailsParams) -> ActionResult:
"""Get recent emails from the last 5 minutes with full content"""
try:
if _gmail_service is None:
raise RuntimeError('Gmail service not initialized')
# Ensure authentication
if not _gmail_service.is_authenticated():
logger.info('๐ง Gmail not authenticated, attempting authentication...')
authenticated = await _gmail_service.authenticate()
if not authenticated:
return ActionResult(
extracted_content='Failed to authenticate with Gmail. Please ensure Gmail credentials are set up properly.',
long_term_memory='Gmail authentication failed',
)
# Use specified max_results (1-50, default 10), last 5 minutes
max_results = params.max_results
time_filter = '5m'
# Build query with time filter and optional user query
query_parts = [f'newer_than:{time_filter}']
if params.keyword.strip():
query_parts.append(params.keyword.strip())
query = ' '.join(query_parts)
logger.info(f'๐ Gmail search query: {query}')
# Get emails
emails = await _gmail_service.get_recent_emails(max_results=max_results, query=query, time_filter=time_filter)
if not emails:
query_info = f" matching '{params.keyword}'" if params.keyword.strip() else ''
memory = f'No recent emails found from last {time_filter}{query_info}'
return ActionResult(
extracted_content=memory,
long_term_memory=memory,
)
# Format with full email content for large display
content = f'Found {len(emails)} recent email{"s" if len(emails) > 1 else ""} from the last {time_filter}:\n\n'
for i, email in enumerate(emails, 1):
content += f'Email {i}:\n'
content += f'From: {email["from"]}\n'
content += f'Subject: {email["subject"]}\n'
content += f'Date: {email["date"]}\n'
content += f'Content:\n{email["body"]}\n'
content += '-' * 50 + '\n\n'
logger.info(f'๐ง Retrieved {len(emails)} recent emails')
return ActionResult(
extracted_content=content,
include_extracted_content_only_once=True,
long_term_memory=f'Retrieved {len(emails)} recent emails from last {time_filter} for query {query}.',
)
except Exception as e:
logger.error(f'Error getting recent emails: {e}')
return ActionResult(
error=f'Error getting recent emails: {str(e)}',
long_term_memory='Failed to get recent emails due to error',
)
return tools
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/skills/views.py | browser_use/skills/views.py | """Skills views - wraps SDK types with helper methods"""
from typing import Any
from browser_use_sdk.types.parameter_schema import ParameterSchema
from browser_use_sdk.types.skill_response import SkillResponse
from pydantic import BaseModel, ConfigDict, Field
class MissingCookieException(Exception):
"""Raised when a required cookie is missing for skill execution
Attributes:
cookie_name: The name of the missing cookie parameter
cookie_description: Description of how to obtain the cookie
"""
def __init__(self, cookie_name: str, cookie_description: str):
self.cookie_name = cookie_name
self.cookie_description = cookie_description
super().__init__(f"Missing required cookie '{cookie_name}': {cookie_description}")
class Skill(BaseModel):
"""Skill model with helper methods for LLM integration
This wraps the SDK SkillResponse with additional helper properties
for converting schemas to Pydantic models.
"""
model_config = ConfigDict(extra='forbid', validate_assignment=True)
id: str
title: str
description: str
parameters: list[ParameterSchema]
output_schema: dict[str, Any] = Field(default_factory=dict)
@staticmethod
def from_skill_response(response: SkillResponse) -> 'Skill':
"""Create a Skill from SDK SkillResponse"""
return Skill(
id=response.id,
title=response.title,
description=response.description,
parameters=response.parameters,
output_schema=response.output_schema,
)
def parameters_pydantic(self, exclude_cookies: bool = False) -> type[BaseModel]:
"""Convert parameter schemas to a pydantic model for structured output
exclude_cookies is very useful when dealing with LLMs that are not aware of cookies.
"""
from browser_use.skills.utils import convert_parameters_to_pydantic
parameters = list[ParameterSchema](self.parameters)
if exclude_cookies:
parameters = [param for param in parameters if param.type != 'cookie']
return convert_parameters_to_pydantic(parameters, model_name=f'{self.title}Parameters')
@property
def output_type_pydantic(self) -> type[BaseModel] | None:
"""Convert output schema to a pydantic model for structured output"""
if not self.output_schema:
return None
from browser_use.skills.utils import convert_json_schema_to_pydantic
return convert_json_schema_to_pydantic(self.output_schema, model_name=f'{self.title}Output')
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/skills/service.py | browser_use/skills/service.py | """Skills service for fetching and executing skills from the Browser Use API"""
import logging
import os
from typing import Any, Literal
from browser_use_sdk import AsyncBrowserUse
from browser_use_sdk.types.execute_skill_response import ExecuteSkillResponse
from browser_use_sdk.types.skill_list_response import SkillListResponse
from cdp_use.cdp.network import Cookie
from pydantic import BaseModel, ValidationError
from browser_use.skills.views import (
MissingCookieException,
Skill,
)
logger = logging.getLogger(__name__)
class SkillService:
"""Service for managing and executing skills from the Browser Use API"""
def __init__(self, skill_ids: list[str | Literal['*']], api_key: str | None = None):
"""Initialize the skills service
Args:
skill_ids: List of skill IDs to fetch and cache, or ['*'] to fetch all available skills
api_key: Browser Use API key (optional, will use env var if not provided)
"""
self.skill_ids = skill_ids
self.api_key = api_key or os.getenv('BROWSER_USE_API_KEY') or ''
if not self.api_key:
raise ValueError('BROWSER_USE_API_KEY environment variable is not set')
self._skills: dict[str, Skill] = {}
self._client: AsyncBrowserUse | None = None
self._initialized = False
async def async_init(self) -> None:
"""Async initialization to fetch all skills at once
This should be called after __init__ to fetch and cache all skills.
Fetches all available skills in one API call and filters based on skill_ids.
"""
if self._initialized:
logger.debug('SkillService already initialized')
return
# Create the SDK client
self._client = AsyncBrowserUse(api_key=self.api_key)
try:
# Fetch skills from API
logger.info('Fetching skills from Browser Use API...')
use_wildcard = '*' in self.skill_ids
page_size = 100
requested_ids: set[str] = set() if use_wildcard else {s for s in self.skill_ids if s != '*'}
if use_wildcard:
# Wildcard: fetch only first page (max 100 skills) to avoid LLM tool overload
skills_response: SkillListResponse = await self._client.skills.list_skills(
page_size=page_size,
page_number=1,
is_enabled=True,
)
all_items = list(skills_response.items)
if len(all_items) >= page_size:
logger.warning(
f'Wildcard "*" limited to first {page_size} skills. '
f'Specify explicit skill IDs if you need specific skills beyond this limit.'
)
logger.debug(f'Fetched {len(all_items)} skills (wildcard mode, single page)')
else:
# Explicit IDs: paginate until all requested IDs found
all_items = []
page = 1
max_pages = 5 # Safety limit
while page <= max_pages:
skills_response = await self._client.skills.list_skills(
page_size=page_size,
page_number=page,
is_enabled=True,
)
all_items.extend(skills_response.items)
# Check if we've found all requested skills
found_ids = {s.id for s in all_items if s.id in requested_ids}
if found_ids == requested_ids:
break
# Stop if we got fewer items than page_size (last page)
if len(skills_response.items) < page_size:
break
page += 1
if page > max_pages:
logger.warning(f'Reached pagination limit ({max_pages} pages) before finding all requested skills')
logger.debug(f'Fetched {len(all_items)} skills across {page} page(s)')
# Filter to only finished skills (is_enabled already filtered by API)
all_available_skills = [skill for skill in all_items if skill.status == 'finished']
logger.info(f'Found {len(all_available_skills)} available skills from API')
# Determine which skills to load
if use_wildcard:
logger.info('Wildcard "*" detected, loading first 100 skills')
skills_to_load = all_available_skills
else:
# Load only the requested skill IDs
skills_to_load = [skill for skill in all_available_skills if skill.id in requested_ids]
# Warn about any requested skills that weren't found
found_ids = {skill.id for skill in skills_to_load}
missing_ids = requested_ids - found_ids
if missing_ids:
logger.warning(f'Requested skills not found or not available: {missing_ids}')
# Convert SDK SkillResponse objects to our Skill models and cache them
for skill_response in skills_to_load:
try:
skill = Skill.from_skill_response(skill_response)
self._skills[skill.id] = skill
logger.debug(f'Cached skill: {skill.title} ({skill.id})')
except Exception as e:
logger.error(f'Failed to convert skill {skill_response.id}: {type(e).__name__}: {e}')
logger.info(f'Successfully loaded {len(self._skills)} skills')
self._initialized = True
except Exception as e:
logger.error(f'Error during skill initialization: {type(e).__name__}: {e}')
self._initialized = True # Mark as initialized even on failure to avoid retry loops
raise
async def get_skill(self, skill_id: str) -> Skill | None:
"""Get a cached skill by ID. Auto-initializes if not already initialized.
Args:
skill_id: The UUID of the skill
Returns:
Skill model or None if not found in cache
"""
if not self._initialized:
await self.async_init()
return self._skills.get(skill_id)
async def get_all_skills(self) -> list[Skill]:
"""Get all cached skills. Auto-initializes if not already initialized.
Returns:
List of all successfully loaded skills
"""
if not self._initialized:
await self.async_init()
return list(self._skills.values())
async def execute_skill(
self, skill_id: str, parameters: dict[str, Any] | BaseModel, cookies: list[Cookie]
) -> ExecuteSkillResponse:
"""Execute a skill with the provided parameters. Auto-initializes if not already initialized.
Parameters are validated against the skill's Pydantic schema before execution.
Args:
skill_id: The UUID of the skill to execute
parameters: Either a dictionary or BaseModel instance matching the skill's parameter schema
Returns:
ExecuteSkillResponse with execution results
Raises:
ValueError: If skill not found in cache or parameter validation fails
Exception: If API call fails
"""
# Auto-initialize if needed
if not self._initialized:
await self.async_init()
assert self._client is not None, 'Client not initialized'
# Check if skill exists in cache
skill = await self.get_skill(skill_id)
if skill is None:
raise ValueError(f'Skill {skill_id} not found in cache. Available skills: {list(self._skills.keys())}')
# Extract cookie parameters from the skill
cookie_params = [p for p in skill.parameters if p.type == 'cookie']
# Build a dict of cookies from the provided cookie list
cookie_dict: dict[str, str] = {cookie['name']: cookie['value'] for cookie in cookies}
# Check for missing required cookies and fill cookie values
if cookie_params:
for cookie_param in cookie_params:
is_required = cookie_param.required if cookie_param.required is not None else True
if is_required and cookie_param.name not in cookie_dict:
# Required cookie is missing - raise exception with description
raise MissingCookieException(
cookie_name=cookie_param.name, cookie_description=cookie_param.description or 'No description provided'
)
# Fill in cookie values into parameters
# Convert parameters to dict first if it's a BaseModel
if isinstance(parameters, BaseModel):
params_dict = parameters.model_dump()
else:
params_dict = dict(parameters)
# Add cookie values to parameters
for cookie_param in cookie_params:
if cookie_param.name in cookie_dict:
params_dict[cookie_param.name] = cookie_dict[cookie_param.name]
# Replace parameters with the updated dict
parameters = params_dict
# Get the skill's pydantic model for parameter validation
ParameterModel = skill.parameters_pydantic(exclude_cookies=False)
# Validate and convert parameters to dict
validated_params_dict: dict[str, Any]
try:
if isinstance(parameters, BaseModel):
# Already a pydantic model - validate it matches the skill's schema
# by converting to dict and re-validating with the skill's model
params_dict = parameters.model_dump()
validated_model = ParameterModel(**params_dict)
validated_params_dict = validated_model.model_dump()
else:
# Dict provided - validate with the skill's pydantic model
validated_model = ParameterModel(**parameters)
validated_params_dict = validated_model.model_dump()
except ValidationError as e:
# Pydantic validation failed
error_msg = f'Parameter validation failed for skill {skill.title}:\n'
for error in e.errors():
field = '.'.join(str(x) for x in error['loc'])
error_msg += f' - {field}: {error["msg"]}\n'
raise ValueError(error_msg) from e
except Exception as e:
raise ValueError(f'Failed to validate parameters for skill {skill.title}: {type(e).__name__}: {e}') from e
# Execute skill via API
try:
logger.info(f'Executing skill: {skill.title} ({skill_id})')
result: ExecuteSkillResponse = await self._client.skills.execute_skill(
skill_id=skill_id, parameters=validated_params_dict
)
if result.success:
logger.info(f'Skill {skill.title} executed successfully (latency: {result.latency_ms}ms)')
else:
logger.error(f'Skill {skill.title} execution failed: {result.error}')
return result
except Exception as e:
logger.error(f'Error executing skill {skill_id}: {type(e).__name__}: {e}')
# Return error response
return ExecuteSkillResponse(
success=False,
error=f'Failed to execute skill: {type(e).__name__}: {str(e)}',
)
async def close(self) -> None:
"""Close the SDK client and cleanup resources"""
if self._client is not None:
# AsyncBrowserUse client cleanup if needed
# The SDK doesn't currently have a close method, but we set to None for cleanup
self._client = None
self._initialized = False
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/skills/utils.py | browser_use/skills/utils.py | """Utilities for skill schema conversion"""
from typing import Any
from pydantic import BaseModel, Field, create_model
from browser_use.skills.views import ParameterSchema
def convert_parameters_to_pydantic(parameters: list[ParameterSchema], model_name: str = 'SkillParameters') -> type[BaseModel]:
"""Convert a list of ParameterSchema to a pydantic model for structured output
Args:
parameters: List of parameter schemas from the skill API
model_name: Name for the generated pydantic model
Returns:
A pydantic BaseModel class with fields matching the parameter schemas
"""
if not parameters:
# Return empty model if no parameters
return create_model(model_name, __base__=BaseModel)
fields: dict[str, Any] = {}
for param in parameters:
# Map parameter type string to Python types
python_type: Any = str # default
param_type = param.type
if param_type == 'string':
python_type = str
elif param_type == 'number':
python_type = float
elif param_type == 'boolean':
python_type = bool
elif param_type == 'object':
python_type = dict[str, Any]
elif param_type == 'array':
python_type = list[Any]
elif param_type == 'cookie':
python_type = str # Treat cookies as strings
# Check if parameter is required (defaults to True if not specified)
is_required = param.required if param.required is not None else True
# Make optional if not required
if not is_required:
python_type = python_type | None # type: ignore
# Create field with description
field_kwargs = {}
if param.description:
field_kwargs['description'] = param.description
if is_required:
fields[param.name] = (python_type, Field(**field_kwargs))
else:
fields[param.name] = (python_type, Field(default=None, **field_kwargs))
# Create and return the model
return create_model(model_name, __base__=BaseModel, **fields)
def convert_json_schema_to_pydantic(schema: dict[str, Any], model_name: str = 'SkillOutput') -> type[BaseModel]:
"""Convert a JSON schema to a pydantic model
Args:
schema: JSON schema dictionary (OpenAPI/JSON Schema format)
model_name: Name for the generated pydantic model
Returns:
A pydantic BaseModel class matching the schema
Note:
This is a simplified converter that handles basic types.
For complex nested schemas, consider using datamodel-code-generator.
"""
if not schema or 'properties' not in schema:
# Return empty model if no schema
return create_model(model_name, __base__=BaseModel)
fields: dict[str, Any] = {}
properties = schema.get('properties', {})
required_fields = set(schema.get('required', []))
for field_name, field_schema in properties.items():
# Get the field type
field_type_str = field_schema.get('type', 'string')
field_description = field_schema.get('description')
# Map JSON schema types to Python types
python_type: Any = str # default
if field_type_str == 'string':
python_type = str
elif field_type_str == 'number':
python_type = float
elif field_type_str == 'integer':
python_type = int
elif field_type_str == 'boolean':
python_type = bool
elif field_type_str == 'object':
python_type = dict[str, Any]
elif field_type_str == 'array':
# Check if items type is specified
items_schema = field_schema.get('items', {})
items_type = items_schema.get('type', 'string')
if items_type == 'string':
python_type = list[str]
elif items_type == 'number':
python_type = list[float]
elif items_type == 'integer':
python_type = list[int]
elif items_type == 'boolean':
python_type = list[bool]
elif items_type == 'object':
python_type = list[dict[str, Any]]
else:
python_type = list[Any]
# Make optional if not required
is_required = field_name in required_fields
if not is_required:
python_type = python_type | None # type: ignore
# Create field with description
field_kwargs = {}
if field_description:
field_kwargs['description'] = field_description
if is_required:
fields[field_name] = (python_type, Field(**field_kwargs))
else:
fields[field_name] = (python_type, Field(default=None, **field_kwargs))
# Create and return the model
return create_model(model_name, __base__=BaseModel, **fields)
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/skills/__init__.py | browser_use/skills/__init__.py | from browser_use.skills.service import SkillService
from browser_use.skills.views import MissingCookieException
__all__ = ['SkillService', 'MissingCookieException']
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/code_use/views.py | browser_use/code_use/views.py | """Data models for code-use mode."""
from __future__ import annotations
import json
from enum import Enum
from pathlib import Path
from typing import Any
from pydantic import BaseModel, ConfigDict, Field, PrivateAttr
from uuid_extensions import uuid7str
from browser_use.tokens.views import UsageSummary
class CellType(str, Enum):
"""Type of notebook cell."""
CODE = 'code'
MARKDOWN = 'markdown'
class ExecutionStatus(str, Enum):
"""Execution status of a cell."""
PENDING = 'pending'
RUNNING = 'running'
SUCCESS = 'success'
ERROR = 'error'
class CodeCell(BaseModel):
"""Represents a code cell in the notebook-like execution."""
model_config = ConfigDict(extra='forbid')
id: str = Field(default_factory=uuid7str)
cell_type: CellType = CellType.CODE
source: str = Field(description='The code to execute')
output: str | None = Field(default=None, description='The output of the code execution')
execution_count: int | None = Field(default=None, description='The execution count')
status: ExecutionStatus = Field(default=ExecutionStatus.PENDING)
error: str | None = Field(default=None, description='Error message if execution failed')
browser_state: str | None = Field(default=None, description='Browser state after execution')
class NotebookSession(BaseModel):
"""Represents a notebook-like session."""
model_config = ConfigDict(extra='forbid')
id: str = Field(default_factory=uuid7str)
cells: list[CodeCell] = Field(default_factory=list)
current_execution_count: int = Field(default=0)
namespace: dict[str, Any] = Field(default_factory=dict, description='Current namespace state')
_complete_history: list[CodeAgentHistory] = PrivateAttr(default_factory=list)
_usage_summary: UsageSummary | None = PrivateAttr(default=None)
def add_cell(self, source: str) -> CodeCell:
"""Add a new code cell to the session."""
cell = CodeCell(source=source)
self.cells.append(cell)
return cell
def get_cell(self, cell_id: str) -> CodeCell | None:
"""Get a cell by ID."""
for cell in self.cells:
if cell.id == cell_id:
return cell
return None
def get_latest_cell(self) -> CodeCell | None:
"""Get the most recently added cell."""
if self.cells:
return self.cells[-1]
return None
def increment_execution_count(self) -> int:
"""Increment and return the execution count."""
self.current_execution_count += 1
return self.current_execution_count
@property
def history(self) -> CodeAgentHistoryList:
"""Get the history as an AgentHistoryList-compatible object."""
return CodeAgentHistoryList(self._complete_history, self._usage_summary)
class NotebookExport(BaseModel):
"""Export format for Jupyter notebook."""
model_config = ConfigDict(extra='forbid')
nbformat: int = Field(default=4)
nbformat_minor: int = Field(default=5)
metadata: dict[str, Any] = Field(default_factory=dict)
cells: list[dict[str, Any]] = Field(default_factory=list)
class CodeAgentModelOutput(BaseModel):
"""Model output for CodeAgent - contains the code and full LLM response."""
model_config = ConfigDict(extra='forbid')
model_output: str = Field(description='The extracted code from the LLM response')
full_response: str = Field(description='The complete LLM response including any text/reasoning')
class CodeAgentResult(BaseModel):
"""Result of executing a code cell in CodeAgent."""
model_config = ConfigDict(extra='forbid')
extracted_content: str | None = Field(default=None, description='Output from code execution')
error: str | None = Field(default=None, description='Error message if execution failed')
is_done: bool = Field(default=False, description='Whether task is marked as done')
success: bool | None = Field(default=None, description='Self-reported success from done() call')
class CodeAgentState(BaseModel):
"""State information for a CodeAgent step."""
model_config = ConfigDict(extra='forbid', arbitrary_types_allowed=True)
url: str | None = Field(default=None, description='Current page URL')
title: str | None = Field(default=None, description='Current page title')
screenshot_path: str | None = Field(default=None, description='Path to screenshot file')
def get_screenshot(self) -> str | None:
"""Load screenshot from disk and return as base64 string."""
if not self.screenshot_path:
return None
import base64
from pathlib import Path
path_obj = Path(self.screenshot_path)
if not path_obj.exists():
return None
try:
with open(path_obj, 'rb') as f:
screenshot_data = f.read()
return base64.b64encode(screenshot_data).decode('utf-8')
except Exception:
return None
class CodeAgentStepMetadata(BaseModel):
"""Metadata for a single CodeAgent step including timing and token information."""
model_config = ConfigDict(extra='forbid')
input_tokens: int | None = Field(default=None, description='Number of input tokens used')
output_tokens: int | None = Field(default=None, description='Number of output tokens used')
step_start_time: float = Field(description='Step start timestamp (Unix time)')
step_end_time: float = Field(description='Step end timestamp (Unix time)')
@property
def duration_seconds(self) -> float:
"""Calculate step duration in seconds."""
return self.step_end_time - self.step_start_time
class CodeAgentHistory(BaseModel):
"""History item for CodeAgent actions."""
model_config = ConfigDict(extra='forbid', arbitrary_types_allowed=True)
model_output: CodeAgentModelOutput | None = Field(default=None, description='LLM output for this step')
result: list[CodeAgentResult] = Field(default_factory=list, description='Results from code execution')
state: CodeAgentState = Field(description='Browser state at this step')
metadata: CodeAgentStepMetadata | None = Field(default=None, description='Step timing and token metadata')
screenshot_path: str | None = Field(default=None, description='Legacy field for screenshot path')
def model_dump(self, **kwargs) -> dict[str, Any]:
"""Custom serialization for CodeAgentHistory."""
return {
'model_output': self.model_output.model_dump() if self.model_output else None,
'result': [r.model_dump() for r in self.result],
'state': self.state.model_dump(),
'metadata': self.metadata.model_dump() if self.metadata else None,
'screenshot_path': self.screenshot_path,
}
class CodeAgentHistoryList:
"""Compatibility wrapper for CodeAgentHistory that provides AgentHistoryList-like API."""
def __init__(self, complete_history: list[CodeAgentHistory], usage_summary: UsageSummary | None) -> None:
"""Initialize with CodeAgent history data."""
self._complete_history = complete_history
self._usage_summary = usage_summary
@property
def history(self) -> list[CodeAgentHistory]:
"""Get the raw history list."""
return self._complete_history
@property
def usage(self) -> UsageSummary | None:
"""Get the usage summary."""
return self._usage_summary
def __len__(self) -> int:
"""Return the number of history items."""
return len(self._complete_history)
def __str__(self) -> str:
"""Representation of the CodeAgentHistoryList object."""
return f'CodeAgentHistoryList(steps={len(self._complete_history)}, action_results={len(self.action_results())})'
def __repr__(self) -> str:
"""Representation of the CodeAgentHistoryList object."""
return self.__str__()
def final_result(self) -> None | str:
"""Final result from history."""
if self._complete_history and self._complete_history[-1].result:
return self._complete_history[-1].result[-1].extracted_content
return None
def is_done(self) -> bool:
"""Check if the agent is done."""
if self._complete_history and len(self._complete_history[-1].result) > 0:
last_result = self._complete_history[-1].result[-1]
return last_result.is_done is True
return False
def is_successful(self) -> bool | None:
"""Check if the agent completed successfully."""
if self._complete_history and len(self._complete_history[-1].result) > 0:
last_result = self._complete_history[-1].result[-1]
if last_result.is_done is True:
return last_result.success
return None
def errors(self) -> list[str | None]:
"""Get all errors from history, with None for steps without errors."""
errors = []
for h in self._complete_history:
step_errors = [r.error for r in h.result if r.error]
# each step can have only one error
errors.append(step_errors[0] if step_errors else None)
return errors
def has_errors(self) -> bool:
"""Check if the agent has any non-None errors."""
return any(error is not None for error in self.errors())
def urls(self) -> list[str | None]:
"""Get all URLs from history."""
return [h.state.url if h.state.url is not None else None for h in self._complete_history]
def screenshot_paths(self, n_last: int | None = None, return_none_if_not_screenshot: bool = True) -> list[str | None]:
"""Get all screenshot paths from history."""
if n_last == 0:
return []
if n_last is None:
if return_none_if_not_screenshot:
return [h.state.screenshot_path if h.state.screenshot_path is not None else None for h in self._complete_history]
else:
return [h.state.screenshot_path for h in self._complete_history if h.state.screenshot_path is not None]
else:
if return_none_if_not_screenshot:
return [
h.state.screenshot_path if h.state.screenshot_path is not None else None
for h in self._complete_history[-n_last:]
]
else:
return [h.state.screenshot_path for h in self._complete_history[-n_last:] if h.state.screenshot_path is not None]
def screenshots(self, n_last: int | None = None, return_none_if_not_screenshot: bool = True) -> list[str | None]:
"""Get all screenshots from history as base64 strings."""
if n_last == 0:
return []
history_items = self._complete_history if n_last is None else self._complete_history[-n_last:]
screenshots = []
for item in history_items:
screenshot_b64 = item.state.get_screenshot()
if screenshot_b64:
screenshots.append(screenshot_b64)
else:
if return_none_if_not_screenshot:
screenshots.append(None)
return screenshots
def action_results(self) -> list[CodeAgentResult]:
"""Get all results from history."""
results = []
for h in self._complete_history:
results.extend([r for r in h.result if r])
return results
def extracted_content(self) -> list[str]:
"""Get all extracted content from history."""
content = []
for h in self._complete_history:
content.extend([r.extracted_content for r in h.result if r.extracted_content])
return content
def number_of_steps(self) -> int:
"""Get the number of steps in the history."""
return len(self._complete_history)
def total_duration_seconds(self) -> float:
"""Get total duration of all steps in seconds."""
total = 0.0
for h in self._complete_history:
if h.metadata:
total += h.metadata.duration_seconds
return total
def last_action(self) -> None | dict:
"""Last action in history - returns the last code execution."""
if self._complete_history and self._complete_history[-1].model_output:
return {
'execute_code': {
'code': self._complete_history[-1].model_output.model_output,
'full_response': self._complete_history[-1].model_output.full_response,
}
}
return None
def action_names(self) -> list[str]:
"""Get all action names from history - returns 'execute_code' for each code execution."""
action_names = []
for action in self.model_actions():
actions = list(action.keys())
if actions:
action_names.append(actions[0])
return action_names
def model_thoughts(self) -> list[Any]:
"""Get all thoughts from history - returns model_output for CodeAgent."""
return [h.model_output for h in self._complete_history if h.model_output]
def model_outputs(self) -> list[CodeAgentModelOutput]:
"""Get all model outputs from history."""
return [h.model_output for h in self._complete_history if h.model_output]
def model_actions(self) -> list[dict]:
"""Get all actions from history - returns code execution actions with their code."""
actions = []
for h in self._complete_history:
if h.model_output:
# Create one action dict per result (code execution)
for _ in h.result:
action_dict = {
'execute_code': {
'code': h.model_output.model_output,
'full_response': h.model_output.full_response,
}
}
actions.append(action_dict)
return actions
def action_history(self) -> list[list[dict]]:
"""Get truncated action history grouped by step."""
step_outputs = []
for h in self._complete_history:
step_actions = []
if h.model_output:
for result in h.result:
action_dict = {
'execute_code': {
'code': h.model_output.model_output,
},
'result': {
'extracted_content': result.extracted_content,
'is_done': result.is_done,
'success': result.success,
'error': result.error,
},
}
step_actions.append(action_dict)
step_outputs.append(step_actions)
return step_outputs
def model_actions_filtered(self, include: list[str] | None = None) -> list[dict]:
"""Get all model actions from history filtered - returns empty for CodeAgent."""
return []
def add_item(self, history_item: CodeAgentHistory) -> None:
"""Add a history item to the list."""
self._complete_history.append(history_item)
def model_dump(self, **kwargs) -> dict[str, Any]:
"""Custom serialization for CodeAgentHistoryList."""
return {
'history': [h.model_dump(**kwargs) for h in self._complete_history],
'usage': self._usage_summary.model_dump() if self._usage_summary else None,
}
def save_to_file(self, filepath: str | Path, sensitive_data: dict[str, str | dict[str, str]] | None = None) -> None:
"""Save history to JSON file."""
try:
Path(filepath).parent.mkdir(parents=True, exist_ok=True)
data = self.model_dump()
with open(filepath, 'w', encoding='utf-8') as f:
json.dump(data, f, indent=2)
except Exception as e:
raise e
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/code_use/service.py | browser_use/code_use/service.py | """Code-use agent service - Jupyter notebook-like code execution for browser automation."""
import asyncio
import datetime
import html
import json
import logging
import re
import traceback
from pathlib import Path
from typing import Any
from uuid_extensions import uuid7str
from browser_use.browser import BrowserSession
from browser_use.browser.profile import BrowserProfile
from browser_use.dom.service import DomService
from browser_use.filesystem.file_system import FileSystem
from browser_use.llm.base import BaseChatModel
from browser_use.llm.messages import (
AssistantMessage,
BaseMessage,
ContentPartImageParam,
ContentPartTextParam,
ImageURL,
UserMessage,
)
from browser_use.screenshots.service import ScreenshotService
from browser_use.telemetry.service import ProductTelemetry
from browser_use.telemetry.views import AgentTelemetryEvent
from browser_use.tokens.service import TokenCost
from browser_use.tokens.views import UsageSummary
from browser_use.tools.service import CodeAgentTools, Tools
from browser_use.utils import get_browser_use_version
from .formatting import format_browser_state_for_llm
from .namespace import EvaluateError, create_namespace
from .utils import detect_token_limit_issue, extract_code_blocks, extract_url_from_task, truncate_message_content
from .views import (
CellType,
CodeAgentHistory,
CodeAgentHistoryList,
CodeAgentModelOutput,
CodeAgentResult,
CodeAgentState,
CodeAgentStepMetadata,
ExecutionStatus,
NotebookSession,
)
logger = logging.getLogger(__name__)
class CodeAgent:
"""
Agent that executes Python code in a notebook-like environment for browser automation.
This agent provides a Jupyter notebook-like interface where the LLM writes Python code
that gets executed in a persistent namespace with browser control functions available.
"""
def __init__(
self,
task: str,
# Optional parameters
llm: BaseChatModel | None = None,
browser_session: BrowserSession | None = None,
browser: BrowserSession | None = None, # Alias for browser_session
tools: Tools | None = None,
controller: Tools | None = None, # Alias for tools
# Agent settings
page_extraction_llm: BaseChatModel | None = None,
file_system: FileSystem | None = None,
available_file_paths: list[str] | None = None,
sensitive_data: dict[str, str | dict[str, str]] | None = None,
max_steps: int = 100,
max_failures: int = 8,
max_validations: int = 0,
use_vision: bool = True,
calculate_cost: bool = False,
demo_mode: bool | None = None,
**kwargs,
):
"""
Initialize the code-use agent.
Args:
task: The task description for the agent
browser_session: Optional browser session (will be created if not provided) [DEPRECATED: use browser]
browser: Optional browser session (cleaner API)
tools: Optional Tools instance (will create default if not provided)
controller: Optional Tools instance
page_extraction_llm: Optional LLM for page extraction
file_system: Optional file system for file operations
available_file_paths: Optional list of available file paths
sensitive_data: Optional sensitive data dictionary
max_steps: Maximum number of execution steps
max_failures: Maximum consecutive errors before termination (default: 8)
max_validations: Maximum number of times to run the validator agent (default: 0)
use_vision: Whether to include screenshots in LLM messages (default: True)
calculate_cost: Whether to calculate token costs (default: False)
demo_mode: Enable the in-browser demo panel for live logging (default: False)
llm: Optional ChatBrowserUse LLM instance (will create default if not provided)
**kwargs: Additional keyword arguments for compatibility (ignored)
"""
# Log and ignore unknown kwargs for compatibility
if kwargs:
logger.debug(f'Ignoring additional kwargs for CodeAgent compatibility: {list(kwargs.keys())}')
if llm is None:
try:
from browser_use import ChatBrowserUse
llm = ChatBrowserUse()
logger.debug('CodeAgent using ChatBrowserUse')
except Exception as e:
raise RuntimeError(f'Failed to initialize CodeAgent LLM: {e}')
if 'ChatBrowserUse' not in llm.__class__.__name__:
raise ValueError('This agent works only with ChatBrowserUse.')
# Handle browser vs browser_session parameter (browser takes precedence)
if browser and browser_session:
raise ValueError('Cannot specify both "browser" and "browser_session" parameters. Use "browser" for the cleaner API.')
browser_session = browser or browser_session
# Handle controller vs tools parameter (controller takes precedence)
if controller and tools:
raise ValueError('Cannot specify both "controller" and "tools" parameters. Use "controller" for the cleaner API.')
tools = controller or tools
# Store browser_profile for creating browser session if needed
self._demo_mode_enabled = False
if browser_session is None:
profile_kwargs: dict[str, Any] = {}
if demo_mode is not None:
profile_kwargs['demo_mode'] = demo_mode
self._browser_profile_for_init = BrowserProfile(**profile_kwargs)
else:
self._browser_profile_for_init = None
self.task = task
self.llm = llm
self.browser_session = browser_session
if self.browser_session:
if demo_mode is not None and self.browser_session.browser_profile.demo_mode != demo_mode:
self.browser_session.browser_profile = self.browser_session.browser_profile.model_copy(
update={'demo_mode': demo_mode}
)
self._demo_mode_enabled = bool(self.browser_session.browser_profile.demo_mode)
self.tools = tools or CodeAgentTools()
self.page_extraction_llm = page_extraction_llm
self.file_system = file_system if file_system is not None else FileSystem(base_dir='./')
self.available_file_paths = available_file_paths or []
self.sensitive_data = sensitive_data
self.max_steps = max_steps
self.max_failures = max_failures
self.max_validations = max_validations
self.use_vision = use_vision
self.session = NotebookSession()
self.namespace: dict[str, Any] = {}
self._llm_messages: list[BaseMessage] = [] # Internal LLM conversation history
self.complete_history: list[CodeAgentHistory] = [] # Type-safe history with model_output and result
self.dom_service: DomService | None = None
self._last_browser_state_text: str | None = None # Track last browser state text
self._last_screenshot: str | None = None # Track last screenshot (base64)
self._consecutive_errors = 0 # Track consecutive errors for auto-termination
self._validation_count = 0 # Track number of validator runs
self._last_llm_usage: Any | None = None # Track last LLM call usage stats
self._step_start_time = 0.0 # Track step start time for duration calculation
self.usage_summary: UsageSummary | None = None # Track usage summary across run for history property
self._sample_output_added = False # Track whether preview cell already created
# Initialize screenshot service for eval tracking
self.id = uuid7str()
timestamp = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
base_tmp = Path('/tmp')
self.agent_directory = base_tmp / f'browser_use_code_agent_{self.id}_{timestamp}'
self.screenshot_service = ScreenshotService(agent_directory=self.agent_directory)
# Initialize token cost service for usage tracking
self.token_cost_service = TokenCost(include_cost=calculate_cost)
self.token_cost_service.register_llm(llm)
if page_extraction_llm:
self.token_cost_service.register_llm(page_extraction_llm)
# Set version and source for telemetry
self.version = get_browser_use_version()
try:
package_root = Path(__file__).parent.parent.parent
repo_files = ['.git', 'README.md', 'docs', 'examples']
if all(Path(package_root / file).exists() for file in repo_files):
self.source = 'git'
else:
self.source = 'pip'
except Exception:
self.source = 'unknown'
# Telemetry
self.telemetry = ProductTelemetry()
async def run(self, max_steps: int | None = None) -> NotebookSession:
"""
Run the agent to complete the task.
Args:
max_steps: Optional override for maximum number of steps (uses __init__ value if not provided)
Returns:
The notebook session with all executed cells
"""
# Use override if provided, otherwise use value from __init__
steps_to_run = max_steps if max_steps is not None else self.max_steps
self.max_steps = steps_to_run
# Start browser if not provided
if self.browser_session is None:
assert self._browser_profile_for_init is not None
self.browser_session = BrowserSession(browser_profile=self._browser_profile_for_init)
await self.browser_session.start()
if self.browser_session:
self._demo_mode_enabled = bool(self.browser_session.browser_profile.demo_mode)
if self._demo_mode_enabled and getattr(self.browser_session.browser_profile, 'headless', False):
logger.warning('Demo mode is enabled but the browser is headless=True; set headless=False to view the panel.')
if self._demo_mode_enabled:
await self._demo_mode_log(f'Started CodeAgent task: {self.task}', 'info', {'tag': 'task'})
# Initialize DOM service with cross-origin iframe support enabled
self.dom_service = DomService(
browser_session=self.browser_session,
cross_origin_iframes=True, # Enable for code-use agent to access forms in iframes
)
# Create namespace with all tools
self.namespace = create_namespace(
browser_session=self.browser_session,
tools=self.tools,
page_extraction_llm=self.page_extraction_llm,
file_system=self.file_system,
available_file_paths=self.available_file_paths,
sensitive_data=self.sensitive_data,
)
# Initialize conversation with task
self._llm_messages.append(UserMessage(content=f'Task: {self.task}'))
# Track agent run error for telemetry
agent_run_error: str | None = None
should_delay_close = False
# Extract URL from task and navigate if found
initial_url = extract_url_from_task(self.task)
if initial_url:
try:
logger.info(f'Extracted URL from task, navigating to: {initial_url}')
# Use the navigate action from namespace
await self.namespace['navigate'](initial_url)
# Wait for page load
await asyncio.sleep(2)
# Record this navigation as a cell in the notebook
nav_code = f"await navigate('{initial_url}')"
cell = self.session.add_cell(source=nav_code)
cell.status = ExecutionStatus.SUCCESS
cell.execution_count = self.session.increment_execution_count()
cell.output = f'Navigated to {initial_url}'
# Get browser state after navigation for the cell
if self.dom_service:
try:
browser_state_text, _ = await self._get_browser_state()
cell.browser_state = browser_state_text
except Exception as state_error:
logger.debug(f'Failed to capture browser state for initial navigation cell: {state_error}')
except Exception as e:
logger.warning(f'Failed to navigate to extracted URL {initial_url}: {e}')
# Record failed navigation as error cell
nav_code = f"await navigate('{initial_url}')"
cell = self.session.add_cell(source=nav_code)
cell.status = ExecutionStatus.ERROR
cell.execution_count = self.session.increment_execution_count()
cell.error = str(e)
# Get initial browser state before first LLM call
if self.browser_session and self.dom_service:
try:
browser_state_text, screenshot = await self._get_browser_state()
self._last_browser_state_text = browser_state_text
self._last_screenshot = screenshot
except Exception as e:
logger.warning(f'Failed to get initial browser state: {e}')
# Main execution loop
for step in range(self.max_steps):
logger.info(f'\n\n\n\n\n\n\nStep {step + 1}/{self.max_steps}')
await self._demo_mode_log(f'Starting step {step + 1}/{self.max_steps}', 'info', {'step': step + 1})
# Start timing this step
self._step_start_time = datetime.datetime.now().timestamp()
# Check if we're approaching the step limit or error limit and inject warning
steps_remaining = self.max_steps - step - 1
errors_remaining = self.max_failures - self._consecutive_errors
should_warn = (
steps_remaining <= 1 # Last step or next to last
or errors_remaining <= 1 # One more error will terminate
or (steps_remaining <= 2 and self._consecutive_errors >= 2) # Close to both limits
)
if should_warn:
warning_message = (
f'\n\nโ ๏ธ CRITICAL WARNING: You are approaching execution limits!\n'
f'- Steps remaining: {steps_remaining + 1}\n'
f'- Consecutive errors: {self._consecutive_errors}/{self.max_failures}\n\n'
f'YOU MUST call done() in your NEXT response, even if the task is incomplete:\n'
f"- Set success=False if you couldn't complete the task\n"
f'- Return EVERYTHING you found so far (partial data is better than nothing)\n'
f"- Include any variables you've stored (products, all_data, etc.)\n"
f"- Explain what worked and what didn't\n\n"
f'Without done(), the user will receive NOTHING.'
)
self._llm_messages.append(UserMessage(content=warning_message))
try:
# Fetch fresh browser state right before LLM call (only if not already set)
if not self._last_browser_state_text and self.browser_session and self.dom_service:
try:
logger.debug('๐ Fetching browser state before LLM call...')
browser_state_text, screenshot = await self._get_browser_state()
self._last_browser_state_text = browser_state_text
self._last_screenshot = screenshot
# # Log browser state
# if len(browser_state_text) > 2000:
# logger.info(
# f'Browser state (before LLM):\n{browser_state_text[:2000]}...\n[Truncated, full state {len(browser_state_text)} chars sent to LLM]'
# )
# else:
# logger.info(f'Browser state (before LLM):\n{browser_state_text}')
except Exception as e:
logger.warning(f'Failed to get browser state before LLM call: {e}')
# Get code from LLM (this also adds to self._llm_messages)
try:
code, full_llm_response = await self._get_code_from_llm(step_number=step + 1)
except Exception as llm_error:
# LLM call failed - count as consecutive error and retry
self._consecutive_errors += 1
logger.warning(
f'LLM call failed (consecutive errors: {self._consecutive_errors}/{self.max_failures}), retrying: {llm_error}'
)
await self._demo_mode_log(
f'LLM call failed: {llm_error}',
'error',
{'step': step + 1},
)
# Check if we've hit the consecutive error limit
if self._consecutive_errors >= self.max_failures:
logger.error(f'Terminating: {self.max_failures} consecutive LLM failures')
break
await asyncio.sleep(1) # Brief pause before retry
continue
if not code or code.strip() == '':
# If task is already done, empty code is fine (LLM explaining completion)
if self._is_task_done():
logger.info('Task already marked as done, LLM provided explanation without code')
# Add the text response to history as a non-code step
await self._add_step_to_complete_history(
model_output_code='',
full_llm_response=full_llm_response,
output=full_llm_response, # Treat the explanation as output
error=None,
screenshot_path=await self._capture_screenshot(step + 1),
)
break # Exit the loop since task is done
logger.warning('LLM returned empty code')
self._consecutive_errors += 1
# new state
if self.browser_session and self.dom_service:
try:
browser_state_text, screenshot = await self._get_browser_state()
self._last_browser_state_text = browser_state_text
self._last_screenshot = screenshot
except Exception as e:
logger.warning(f'Failed to get new browser state: {e}')
continue
# Execute code blocks sequentially if multiple python blocks exist
# This allows JS/bash blocks to be injected into namespace before Python code uses them
all_blocks = self.namespace.get('_all_code_blocks', {})
python_blocks = [k for k in sorted(all_blocks.keys()) if k.startswith('python_')]
if len(python_blocks) > 1:
# Multiple Python blocks - execute each sequentially
output = None
error = None
for i, block_key in enumerate(python_blocks):
logger.info(f'Executing Python block {i + 1}/{len(python_blocks)}')
block_code = all_blocks[block_key]
block_output, block_error, _ = await self._execute_code(block_code)
# Accumulate outputs
if block_output:
output = (output or '') + block_output
if block_error:
error = block_error
# Stop on first error
break
else:
# Single Python block - execute normally
output, error, _ = await self._execute_code(code)
# Track consecutive errors
if error:
self._consecutive_errors += 1
logger.warning(f'Consecutive errors: {self._consecutive_errors}/{self.max_failures}')
# Check if we've hit the consecutive error limit
if self._consecutive_errors >= self.max_failures:
logger.error(
f'Terminating: {self.max_failures} consecutive errors reached. The agent is unable to make progress.'
)
await self._demo_mode_log(
f'Terminating after {self.max_failures} consecutive errors without progress.',
'error',
{'step': step + 1},
)
# Add termination message to complete history before breaking
await self._add_step_to_complete_history(
model_output_code=code,
full_llm_response=f'[Terminated after {self.max_failures} consecutive errors]',
output=None,
error=f'Auto-terminated: {self.max_failures} consecutive errors without progress',
screenshot_path=None,
)
break
else:
# Reset consecutive error counter on success
self._consecutive_errors = 0
# Check if task is done - validate completion first if not at limits
if self._is_task_done():
# Get the final result from namespace (from done() call)
final_result: str | None = self.namespace.get('_task_result') # type: ignore[assignment]
# Check if we should validate (not at step/error limits and under max validations)
steps_remaining = self.max_steps - step - 1
should_validate = (
self._validation_count < self.max_validations # Haven't exceeded max validations
and steps_remaining >= 4 # At least 4 steps away from limit
and self._consecutive_errors < 3 # Not close to error limit (8 consecutive)
)
if should_validate:
self._validation_count += 1
logger.info('Validating task completion with LLM...')
from .namespace import validate_task_completion
is_complete, reasoning = await validate_task_completion(
task=self.task,
output=final_result,
llm=self.llm,
)
if not is_complete:
# Task not truly complete - inject feedback and continue
logger.warning('Validator: Task not complete, continuing...')
validation_feedback = (
f'\n\nโ ๏ธ VALIDATOR FEEDBACK:\n'
f'Your done() call was rejected. The task is NOT complete yet.\n\n'
f'Validation reasoning:\n{reasoning}\n\n'
f'You must continue working on the task. Analyze what is missing and complete it.\n'
f'Do NOT call done() again until the task is truly finished.'
)
# Clear the done flag so execution continues
self.namespace['_task_done'] = False
self.namespace.pop('_task_result', None)
self.namespace.pop('_task_success', None)
# Add validation feedback to LLM messages
self._llm_messages.append(UserMessage(content=validation_feedback))
# Don't override output - let execution continue normally
else:
logger.info('Validator: Task complete')
# Override output with done message for final step
if final_result:
output = final_result
else:
# At limits - skip validation and accept done()
if self._validation_count >= self.max_validations:
logger.info(
f'Reached max validations ({self.max_validations}) - skipping validation and accepting done()'
)
else:
logger.info('At step/error limits - skipping validation')
if final_result:
output = final_result
if output:
# Check if this is the final done() output
if self._is_task_done():
# Show done() output more prominently
logger.info(
f'โ Task completed - Final output from done():\n{output[:300] if len(output) > 300 else output}'
)
# Also show files_to_display if they exist in namespace
attachments: list[str] | None = self.namespace.get('_task_attachments') # type: ignore[assignment]
if attachments:
logger.info(f'Files displayed: {", ".join(attachments)}')
else:
logger.info(f'Code output:\n{output}')
# Browser state is now only logged when fetched before LLM call (not after execution)
# Take screenshot for eval tracking
screenshot_path = await self._capture_screenshot(step + 1)
# Add step to complete_history for eval system
await self._add_step_to_complete_history(
model_output_code=code,
full_llm_response=full_llm_response,
output=output,
error=error,
screenshot_path=screenshot_path,
)
# Check if task is done (after validation)
if self._is_task_done():
# Get the final result from namespace
final_result: str | None = self.namespace.get('_task_result', output) # type: ignore[assignment]
logger.info('Task completed successfully')
if final_result:
logger.info(f'Final result: {final_result}')
self._add_sample_output_cell(final_result)
if self._demo_mode_enabled:
await self._demo_mode_log(
f'Final Result: {final_result or "Task completed"}',
'success',
{'tag': 'task'},
)
should_delay_close = True
break
# If validation rejected done(), continue to next iteration
# The feedback message has already been added to _llm_messages
# Add result to LLM messages for next iteration (without browser state)
result_message = self._format_execution_result(code, output, error, current_step=step + 1)
truncated_result = truncate_message_content(result_message)
self._llm_messages.append(UserMessage(content=truncated_result))
except Exception as e:
logger.error(f'Error in step {step + 1}: {e}')
traceback.print_exc()
break
else:
# Loop completed without break - max_steps reached
logger.warning(f'Maximum steps ({self.max_steps}) reached without task completion')
await self._demo_mode_log(
f'Maximum steps ({self.max_steps}) reached without completing the task.',
'error',
{'tag': 'task'},
)
# If task is not done, capture the last step's output as partial result
if not self._is_task_done() and self.complete_history:
# Get the last step's output/error and use it as final extracted_content
last_step = self.complete_history[-1]
last_result = last_step.result[0] if last_step.result else None
last_output = last_result.extracted_content if last_result else None
last_error = last_result.error if last_result else None
# Build a partial result message from the last step
partial_result_parts = []
partial_result_parts.append(f'Task incomplete - reached step limit ({self.max_steps} steps).')
partial_result_parts.append('Last step output:')
if last_output:
partial_result_parts.append(f'\nOutput: {last_output}')
if last_error:
partial_result_parts.append(f'\nError: {last_error}')
# Add any accumulated variables that might contain useful data
data_vars = []
for var_name in sorted(self.namespace.keys()):
if not var_name.startswith('_') and var_name not in {'json', 'asyncio', 'csv', 're', 'datetime', 'Path'}:
var_value = self.namespace[var_name]
# Check if it's a list or dict that might contain collected data
if isinstance(var_value, (list, dict)) and var_value:
data_vars.append(f' - {var_name}: {type(var_value).__name__} with {len(var_value)} items')
if data_vars:
partial_result_parts.append('\nVariables in namespace that may contain partial data:')
partial_result_parts.extend(data_vars)
partial_result = '\n'.join(partial_result_parts)
# Update the last step's extracted_content with this partial result
if last_result:
last_result.extracted_content = partial_result
last_result.is_done = False
last_result.success = False
logger.info(f'\nPartial result captured from last step:\n{partial_result}')
if self._demo_mode_enabled:
await self._demo_mode_log(f'Partial result:\n{partial_result}', 'error', {'tag': 'task'})
# Log final summary if task was completed
if self._is_task_done():
logger.info('\n' + '=' * 60)
logger.info('TASK COMPLETED SUCCESSFULLY')
logger.info('=' * 60)
final_result: str | None = self.namespace.get('_task_result') # type: ignore[assignment]
if final_result:
logger.info(f'\nFinal Output:\n{final_result}')
self._add_sample_output_cell(final_result)
attachments: list[str] | None = self.namespace.get('_task_attachments') # type: ignore[assignment]
if attachments:
logger.info(f'\nFiles Attached:\n{chr(10).join(attachments)}')
logger.info('=' * 60 + '\n')
if self._demo_mode_enabled and not should_delay_close:
await self._demo_mode_log(
f'Final Result: {final_result or "Task completed"}',
'success',
{'tag': 'task'},
)
should_delay_close = True
# Auto-close browser if keep_alive is False
if should_delay_close and self._demo_mode_enabled:
await asyncio.sleep(30)
await self.close()
# Store usage summary for history property
self.usage_summary = await self.token_cost_service.get_usage_summary()
# Log token usage summary
await self.token_cost_service.log_usage_summary()
# Log telemetry event
try:
self._log_agent_event(max_steps=self.max_steps, agent_run_error=agent_run_error)
except Exception as log_e:
logger.error(f'Failed to log telemetry event: {log_e}', exc_info=True)
# Store history data in session for history property
self.session._complete_history = self.complete_history
self.session._usage_summary = self.usage_summary
return self.session
async def _get_code_from_llm(self, step_number: int | None = None) -> tuple[str, str]:
"""Get Python code from the LLM.
Returns:
Tuple of (extracted_code, full_llm_response)
"""
# Prepare messages for this request
# Include browser state as separate message if available (not accumulated in history)
messages_to_send = self._llm_messages.copy()
if self._last_browser_state_text:
# Create message with optional screenshot
if self.use_vision and self._last_screenshot:
# Build content with text + screenshot
content_parts: list[ContentPartTextParam | ContentPartImageParam] = [
ContentPartTextParam(text=self._last_browser_state_text)
]
# Add screenshot
content_parts.append(
ContentPartImageParam(
image_url=ImageURL(
url=f'data:image/png;base64,{self._last_screenshot}',
media_type='image/png',
detail='auto',
),
)
)
messages_to_send.append(UserMessage(content=content_parts))
else:
# Text only
messages_to_send.append(UserMessage(content=self._last_browser_state_text))
# Clear browser state after including it so it's only in this request
self._last_browser_state_text = None
self._last_screenshot = None
# Call LLM with message history (including temporary browser state message)
response = await self.llm.ainvoke(messages_to_send)
# Store usage stats from this LLM call
self._last_llm_usage = response.usage
# Log the LLM's raw output for debugging
logger.info(f'LLM Response:\n{response.completion}')
await self._demo_mode_log(
f'LLM Response:\n{response.completion}',
'thought',
{'step': step_number} if step_number else None,
)
# Check for token limit or repetition issues
max_tokens = getattr(self.llm, 'max_tokens', None)
completion_tokens = response.usage.completion_tokens if response.usage else None
is_problematic, issue_message = detect_token_limit_issue(
completion=response.completion,
completion_tokens=completion_tokens,
max_tokens=max_tokens,
stop_reason=response.stop_reason,
)
if is_problematic:
logger.warning(f'Token limit issue detected: {issue_message}')
# Don't add the bad response to history
# Instead, inject a system message prompting recovery
recovery_prompt = (
f'Your previous response hit a token limit or became repetitive: {issue_message}\n\n'
'Please write a SHORT plan (2 sentences) for what to do next, then execute ONE simple action.'
)
self._llm_messages.append(UserMessage(content=recovery_prompt))
# Return a controlled error message instead of corrupted code
return '', f'[Token limit error: {issue_message}]'
# Store the full response
full_response = response.completion
# Extract code blocks from response
# Support multiple code block types: python, js, bash, markdown
code_blocks = extract_code_blocks(response.completion)
# Inject non-python blocks into namespace as variables
# Track which variables are code blocks for browser state display
if '_code_block_vars' not in self.namespace:
self.namespace['_code_block_vars'] = set()
for block_type, block_content in code_blocks.items():
if not block_type.startswith('python'):
# Store js, bash, markdown blocks (and named variants) as variables in namespace
self.namespace[block_type] = block_content
self.namespace['_code_block_vars'].add(block_type)
print(f'โ Code block variable: {block_type} (str, {len(block_content)} chars)')
logger.debug(f'Injected {block_type} block into namespace ({len(block_content)} chars)')
# Store all code blocks for sequential execution
self.namespace['_all_code_blocks'] = code_blocks
# Get Python code if it exists
# If no python block exists and no other code blocks exist, return empty string to skip execution
# This prevents treating plain text explanations as code
code = code_blocks.get('python', response.completion)
# Add to LLM messages (truncate for history to save context)
truncated_completion = truncate_message_content(response.completion)
self._llm_messages.append(AssistantMessage(content=truncated_completion))
return code, full_response
def _print_variable_info(self, var_name: str, value: Any) -> None:
"""Print compact info about a variable assignment."""
# Skip built-in modules and known imports
skip_names = {
'json',
'asyncio',
'csv',
're',
'datetime',
'Path',
'pd',
'np',
'plt',
'requests',
'BeautifulSoup',
'PdfReader',
'browser',
'file_system',
}
if var_name in skip_names:
return
# Skip code block variables (already printed)
if '_code_block_vars' in self.namespace and var_name in self.namespace.get('_code_block_vars', set()):
return
# Print compact variable info
if isinstance(value, (list, dict)):
preview = str(value)[:100]
print(f'โ Variable: {var_name} ({type(value).__name__}, len={len(value)}, preview={preview}...)')
elif isinstance(value, str) and len(value) > 50:
print(f'โ Variable: {var_name} (str, {len(value)} chars, preview={value[:50]}...)')
elif callable(value):
print(f'โ Variable: {var_name} (function)')
else:
print(f'โ Variable: {var_name} ({type(value).__name__}, value={repr(value)[:50]})')
async def _execute_code(self, code: str) -> tuple[str | None, str | None, str | None]:
"""
Execute Python code in the namespace.
Args:
code: The Python code to execute
Returns:
Tuple of (output, error, browser_state)
"""
# Create new cell
cell = self.session.add_cell(source=code)
cell.status = ExecutionStatus.RUNNING
cell.execution_count = self.session.increment_execution_count()
output = None
error = None
browser_state = None
try:
# Capture output
import ast
import io
import sys
old_stdout = sys.stdout
sys.stdout = io.StringIO()
try:
# Add asyncio to namespace if not already there
if 'asyncio' not in self.namespace:
self.namespace['asyncio'] = asyncio
# Store the current code in namespace for done() validation
self.namespace['_current_cell_code'] = code
# Store consecutive errors count for done() validation
self.namespace['_consecutive_errors'] = self._consecutive_errors
# Check if code contains await expressions - if so, wrap in async function
# This mimics how Jupyter/IPython handles top-level await
try:
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | true |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/code_use/utils.py | browser_use/code_use/utils.py | """Utility functions for code-use agent."""
import re
def truncate_message_content(content: str, max_length: int = 10000) -> str:
"""Truncate message content to max_length characters for history."""
if len(content) <= max_length:
return content
# Truncate and add marker
return content[:max_length] + f'\n\n[... truncated {len(content) - max_length} characters for history]'
def detect_token_limit_issue(
completion: str,
completion_tokens: int | None,
max_tokens: int | None,
stop_reason: str | None,
) -> tuple[bool, str | None]:
"""
Detect if the LLM response hit token limits or is repetitive garbage.
Returns: (is_problematic, error_message)
"""
# Check 1: Stop reason indicates max_tokens
if stop_reason == 'max_tokens':
return True, f'Response terminated due to max_tokens limit (stop_reason: {stop_reason})'
# Check 2: Used 90%+ of max_tokens (if we have both values)
if completion_tokens is not None and max_tokens is not None and max_tokens > 0:
usage_ratio = completion_tokens / max_tokens
if usage_ratio >= 0.9:
return True, f'Response used {usage_ratio:.1%} of max_tokens ({completion_tokens}/{max_tokens})'
# Check 3: Last 6 characters repeat 40+ times (repetitive garbage)
if len(completion) >= 6:
last_6 = completion[-6:]
repetition_count = completion.count(last_6)
if repetition_count >= 40:
return True, f'Repetitive output detected: last 6 chars "{last_6}" appears {repetition_count} times'
return False, None
def extract_url_from_task(task: str) -> str | None:
"""Extract URL from task string using naive pattern matching."""
# Remove email addresses from task before looking for URLs
task_without_emails = re.sub(r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b', '', task)
# Look for common URL patterns
patterns = [
r'https?://[^\s<>"\']+', # Full URLs with http/https
r'(?:www\.)?[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*\.[a-zA-Z]{2,}(?:/[^\s<>"\']*)?', # Domain names with subdomains and optional paths
]
found_urls = []
for pattern in patterns:
matches = re.finditer(pattern, task_without_emails)
for match in matches:
url = match.group(0)
# Remove trailing punctuation that's not part of URLs
url = re.sub(r'[.,;:!?()\[\]]+$', '', url)
# Add https:// if missing
if not url.startswith(('http://', 'https://')):
url = 'https://' + url
found_urls.append(url)
unique_urls = list(set(found_urls))
# If multiple URLs found, skip auto-navigation to avoid ambiguity
if len(unique_urls) > 1:
return None
# If exactly one URL found, return it
if len(unique_urls) == 1:
return unique_urls[0]
return None
def extract_code_blocks(text: str) -> dict[str, str]:
"""Extract all code blocks from markdown response.
Supports:
- ```python, ```js, ```javascript, ```bash, ```markdown, ```md
- Named blocks: ```js variable_name โ saved as 'variable_name' in namespace
- Nested blocks: Use 4+ backticks for outer block when inner content has 3 backticks
Returns dict mapping block_name -> content
Note: Python blocks are NO LONGER COMBINED. Each python block executes separately
to allow sequential execution with JS/bash blocks in between.
"""
# Pattern to match code blocks with language identifier and optional variable name
# Matches: ```lang\n or ```lang varname\n or ````+lang\n (4+ backticks for nested blocks)
# Uses non-greedy matching and backreferences to match opening/closing backticks
pattern = r'(`{3,})(\w+)(?:\s+(\w+))?\n(.*?)\1(?:\n|$)'
matches = re.findall(pattern, text, re.DOTALL)
blocks: dict[str, str] = {}
python_block_counter = 0
for backticks, lang, var_name, content in matches:
lang = lang.lower()
# Normalize language names
if lang in ('javascript', 'js'):
lang_normalized = 'js'
elif lang in ('markdown', 'md'):
lang_normalized = 'markdown'
elif lang in ('sh', 'shell'):
lang_normalized = 'bash'
elif lang == 'python':
lang_normalized = 'python'
else:
# Unknown language, skip
continue
# Only process supported types
if lang_normalized in ('python', 'js', 'bash', 'markdown'):
content = content.rstrip() # Only strip trailing whitespace, preserve leading for indentation
if content:
# Determine the key to use
if var_name:
# Named block - use the variable name
block_key = var_name
blocks[block_key] = content
elif lang_normalized == 'python':
# Unnamed Python blocks - give each a unique key to preserve order
block_key = f'python_{python_block_counter}'
blocks[block_key] = content
python_block_counter += 1
else:
# Other unnamed blocks (js, bash, markdown) - keep last one only
blocks[lang_normalized] = content
# If we have multiple python blocks, mark the first one as 'python' for backward compat
if python_block_counter > 0:
blocks['python'] = blocks['python_0']
# Fallback: if no python block but there's generic ``` block, treat as python
if python_block_counter == 0 and 'python' not in blocks:
generic_pattern = r'```\n(.*?)```'
generic_matches = re.findall(generic_pattern, text, re.DOTALL)
if generic_matches:
combined = '\n\n'.join(m.strip() for m in generic_matches if m.strip())
if combined:
blocks['python'] = combined
return blocks
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/code_use/namespace.py | browser_use/code_use/namespace.py | """Namespace initialization for code-use mode.
This module creates a namespace with all browser tools available as functions,
similar to a Jupyter notebook environment.
"""
import asyncio
import csv
import datetime
import json
import logging
import re
from pathlib import Path
from typing import Any
import requests
from browser_use.browser import BrowserSession
from browser_use.filesystem.file_system import FileSystem
from browser_use.llm.base import BaseChatModel
from browser_use.tools.service import CodeAgentTools, Tools
logger = logging.getLogger(__name__)
# Try to import optional data science libraries
try:
import numpy as np # type: ignore
NUMPY_AVAILABLE = True
except ImportError:
NUMPY_AVAILABLE = False
try:
import pandas as pd # type: ignore
PANDAS_AVAILABLE = True
except ImportError:
PANDAS_AVAILABLE = False
try:
import matplotlib.pyplot as plt # type: ignore
MATPLOTLIB_AVAILABLE = True
except ImportError:
MATPLOTLIB_AVAILABLE = False
try:
from bs4 import BeautifulSoup # type: ignore
BS4_AVAILABLE = True
except ImportError:
BS4_AVAILABLE = False
try:
from pypdf import PdfReader # type: ignore
PYPDF_AVAILABLE = True
except ImportError:
PYPDF_AVAILABLE = False
try:
from tabulate import tabulate # type: ignore
TABULATE_AVAILABLE = True
except ImportError:
TABULATE_AVAILABLE = False
def _strip_js_comments(js_code: str) -> str:
"""
Remove JavaScript comments before CDP evaluation.
CDP's Runtime.evaluate doesn't handle comments in all contexts.
Args:
js_code: JavaScript code potentially containing comments
Returns:
JavaScript code with comments stripped
"""
# Remove multi-line comments (/* ... */)
js_code = re.sub(r'/\*.*?\*/', '', js_code, flags=re.DOTALL)
# Remove single-line comments - only lines that START with // (after whitespace)
# This avoids breaking XPath strings, URLs, regex patterns, etc.
js_code = re.sub(r'^\s*//.*$', '', js_code, flags=re.MULTILINE)
return js_code
class EvaluateError(Exception):
"""Special exception raised by evaluate() to stop Python execution immediately."""
pass
async def validate_task_completion(
task: str,
output: str | None,
llm: BaseChatModel,
) -> tuple[bool, str]:
"""
Validate if task is truly complete by asking LLM without system prompt or history.
Args:
task: The original task description
output: The output from the done() call
llm: The LLM to use for validation
Returns:
Tuple of (is_complete, reasoning)
"""
from browser_use.llm.messages import UserMessage
# Build validation prompt
validation_prompt = f"""You are a task completion validator. Analyze if the agent has truly completed the user's task.
**Original Task:**
{task}
**Agent's Output:**
{output[:100000] if output else '(No output provided)'}
**Your Task:**
Determine if the agent has successfully completed the user's task. Consider:
1. Has the agent delivered what the user requested?
2. If data extraction was requested, is there actual data?
3. If the task is impossible (e.g., localhost website, login required but no credentials), is it truly impossible?
4. Could the agent continue and make meaningful progress?
**Response Format:**
Reasoning: [Your analysis of whether the task is complete]
Verdict: [YES or NO]
YES = Task is complete OR truly impossible to complete
NO = Agent should continue working"""
try:
# Call LLM with just the validation prompt (no system prompt, no history)
response = await llm.ainvoke([UserMessage(content=validation_prompt)])
response_text = response.completion
# Parse the response
reasoning = ''
verdict = 'NO'
# Extract reasoning and verdict
lines = response_text.split('\n')
for line in lines:
if line.strip().lower().startswith('reasoning:'):
reasoning = line.split(':', 1)[1].strip()
elif line.strip().lower().startswith('verdict:'):
verdict_text = line.split(':', 1)[1].strip().upper()
if 'YES' in verdict_text:
verdict = 'YES'
elif 'NO' in verdict_text:
verdict = 'NO'
# If we couldn't parse, try to find YES/NO in the response
if not reasoning:
reasoning = response_text
is_complete = verdict == 'YES'
logger.info(f'Task validation: {verdict}')
logger.debug(f'Validation reasoning: {reasoning}')
return is_complete, reasoning
except Exception as e:
logger.warning(f'Failed to validate task completion: {e}')
# On error, assume the agent knows what they're doing
return True, f'Validation failed: {e}'
async def evaluate(code: str, browser_session: BrowserSession) -> Any:
"""
Execute JavaScript code in the browser and return the result.
Args:
code: JavaScript code to execute (must be wrapped in IIFE)
Returns:
The result of the JavaScript execution
Raises:
EvaluateError: If JavaScript execution fails. This stops Python execution immediately.
Example:
result = await evaluate('''
(function(){
return Array.from(document.querySelectorAll('.product')).map(p => ({
name: p.querySelector('.name').textContent,
price: p.querySelector('.price').textContent
}))
})()
''')
"""
# Strip JavaScript comments before CDP evaluation (CDP doesn't support them in all contexts)
code = _strip_js_comments(code)
cdp_session = await browser_session.get_or_create_cdp_session()
try:
# Execute JavaScript with proper error handling
result = await cdp_session.cdp_client.send.Runtime.evaluate(
params={'expression': code, 'returnByValue': True, 'awaitPromise': True},
session_id=cdp_session.session_id,
)
# Check for JavaScript execution errors
if result.get('exceptionDetails'):
exception = result['exceptionDetails']
error_text = exception.get('text', 'Unknown error')
# Try to get more details from the exception
error_details = []
if 'exception' in exception:
exc_obj = exception['exception']
if 'description' in exc_obj:
error_details.append(exc_obj['description'])
elif 'value' in exc_obj:
error_details.append(str(exc_obj['value']))
# Build comprehensive error message with full CDP context
error_msg = f'JavaScript execution error: {error_text}'
if error_details:
error_msg += f'\nDetails: {" | ".join(error_details)}'
# Raise special exception that will stop Python execution immediately
raise EvaluateError(error_msg)
# Get the result data
result_data = result.get('result', {})
# Get the actual value
value = result_data.get('value')
# Return the value directly
if value is None:
return None if 'value' in result_data else 'undefined'
elif isinstance(value, (dict, list)):
# Complex objects - already deserialized by returnByValue
return value
else:
# Primitive values
return value
except EvaluateError:
# Re-raise EvaluateError as-is to stop Python execution
raise
except Exception as e:
# Wrap other exceptions in EvaluateError
raise EvaluateError(f'Failed to execute JavaScript: {type(e).__name__}: {e}') from e
def create_namespace(
browser_session: BrowserSession,
tools: Tools | None = None,
page_extraction_llm: BaseChatModel | None = None,
file_system: FileSystem | None = None,
available_file_paths: list[str] | None = None,
sensitive_data: dict[str, str | dict[str, str]] | None = None,
) -> dict[str, Any]:
"""
Create a namespace with all browser tools available as functions.
This function creates a dictionary of functions that can be used to interact
with the browser, similar to a Jupyter notebook environment.
Args:
browser_session: The browser session to use
tools: Optional Tools instance (will create default if not provided)
page_extraction_llm: Optional LLM for page extraction
file_system: Optional file system for file operations
available_file_paths: Optional list of available file paths
sensitive_data: Optional sensitive data dictionary
Returns:
Dictionary containing all available functions and objects
Example:
namespace = create_namespace(browser_session)
await namespace['navigate'](url='https://google.com')
result = await namespace['evaluate']('document.title')
"""
if tools is None:
# Use CodeAgentTools with default exclusions optimized for code-use mode
# For code-use, we keep: navigate, evaluate, wait, done
# and exclude: most browser interaction, file system actions (use Python instead)
tools = CodeAgentTools()
if available_file_paths is None:
available_file_paths = []
namespace: dict[str, Any] = {
# Core objects
'browser': browser_session,
'file_system': file_system,
# Standard library modules (always available)
'json': json,
'asyncio': asyncio,
'Path': Path,
'csv': csv,
're': re,
'datetime': datetime,
'requests': requests,
}
# Add optional data science libraries if available
if NUMPY_AVAILABLE:
namespace['np'] = np
namespace['numpy'] = np
if PANDAS_AVAILABLE:
namespace['pd'] = pd
namespace['pandas'] = pd
if MATPLOTLIB_AVAILABLE:
namespace['plt'] = plt
namespace['matplotlib'] = plt
if BS4_AVAILABLE:
namespace['BeautifulSoup'] = BeautifulSoup
namespace['bs4'] = BeautifulSoup
if PYPDF_AVAILABLE:
namespace['PdfReader'] = PdfReader
namespace['pypdf'] = PdfReader
if TABULATE_AVAILABLE:
namespace['tabulate'] = tabulate
# Track failed evaluate() calls to detect repeated failed approaches
if '_evaluate_failures' not in namespace:
namespace['_evaluate_failures'] = []
# Add custom evaluate function that returns values directly
async def evaluate_wrapper(
code: str | None = None, variables: dict[str, Any] | None = None, *_args: Any, **kwargs: Any
) -> Any:
# Handle both positional and keyword argument styles
if code is None:
# Check if code was passed as keyword arg
code = kwargs.get('code', kwargs.get('js_code', kwargs.get('expression', '')))
# Extract variables if passed as kwarg
if variables is None:
variables = kwargs.get('variables')
if not code:
raise ValueError('No JavaScript code provided to evaluate()')
# Inject variables if provided
if variables:
vars_json = json.dumps(variables)
stripped = code.strip()
# Check if code is already a function expression expecting params
# Pattern: (function(params) { ... }) or (async function(params) { ... })
if re.match(r'\((?:async\s+)?function\s*\(\s*\w+\s*\)', stripped):
# Already expects params, wrap to call it with our variables
code = f'(function(){{ const params = {vars_json}; return {stripped}(params); }})()'
else:
# Not a parameterized function, inject params in scope
# Check if already wrapped in IIFE (including arrow function IIFEs)
is_wrapped = (
(stripped.startswith('(function()') and '})()' in stripped[-10:])
or (stripped.startswith('(async function()') and '})()' in stripped[-10:])
or (stripped.startswith('(() =>') and ')()' in stripped[-10:])
or (stripped.startswith('(async () =>') and ')()' in stripped[-10:])
)
if is_wrapped:
# Already wrapped, inject params at the start
# Try to match regular function IIFE
match = re.match(r'(\((?:async\s+)?function\s*\(\s*\)\s*\{)', stripped)
if match:
prefix = match.group(1)
rest = stripped[len(prefix) :]
code = f'{prefix} const params = {vars_json}; {rest}'
else:
# Try to match arrow function IIFE
# Patterns: (() => expr)() or (() => { ... })() or (async () => ...)()
arrow_match = re.match(r'(\((?:async\s+)?\(\s*\)\s*=>\s*\{)', stripped)
if arrow_match:
# Arrow function with block body: (() => { ... })()
prefix = arrow_match.group(1)
rest = stripped[len(prefix) :]
code = f'{prefix} const params = {vars_json}; {rest}'
else:
# Arrow function with expression body or fallback: wrap in outer function
code = f'(function(){{ const params = {vars_json}; return {stripped}; }})()'
else:
# Not wrapped, wrap with params
code = f'(function(){{ const params = {vars_json}; {code} }})()'
# Skip auto-wrap below
return await evaluate(code, browser_session)
# Auto-wrap in IIFE if not already wrapped (and no variables were injected)
if not variables:
stripped = code.strip()
# Check for regular function IIFEs, async function IIFEs, and arrow function IIFEs
is_wrapped = (
(stripped.startswith('(function()') and '})()' in stripped[-10:])
or (stripped.startswith('(async function()') and '})()' in stripped[-10:])
or (stripped.startswith('(() =>') and ')()' in stripped[-10:])
or (stripped.startswith('(async () =>') and ')()' in stripped[-10:])
)
if not is_wrapped:
code = f'(function(){{{code}}})()'
# Execute and track failures
try:
result = await evaluate(code, browser_session)
# Print result structure for debugging
if isinstance(result, list) and result and isinstance(result[0], dict):
result_preview = f'list of dicts - len={len(result)}, example 1:\n'
sample_result = result[0]
for key, value in list(sample_result.items())[:10]:
value_str = str(value)[:10] if not isinstance(value, (int, float, bool, type(None))) else str(value)
result_preview += f' {key}: {value_str}...\n'
if len(sample_result) > 10:
result_preview += f' ... {len(sample_result) - 10} more keys'
print(result_preview)
elif isinstance(result, list):
if len(result) == 0:
print('type=list, len=0')
else:
result_preview = str(result)[:100]
print(f'type=list, len={len(result)}, preview={result_preview}...')
elif isinstance(result, dict):
result_preview = f'type=dict, len={len(result)}, sample keys:\n'
for key, value in list(result.items())[:10]:
value_str = str(value)[:10] if not isinstance(value, (int, float, bool, type(None))) else str(value)
result_preview += f' {key}: {value_str}...\n'
if len(result) > 10:
result_preview += f' ... {len(result) - 10} more keys'
print(result_preview)
else:
print(f'type={type(result).__name__}, value={repr(result)[:50]}')
return result
except Exception as e:
# Track errors for pattern detection
namespace['_evaluate_failures'].append({'error': str(e), 'type': 'exception'})
raise
namespace['evaluate'] = evaluate_wrapper
# Add get_selector_from_index helper for code_use mode
async def get_selector_from_index_wrapper(index: int) -> str:
"""
Get the CSS selector for an element by its interactive index.
This allows you to use the element's index from the browser state to get
its CSS selector for use in JavaScript evaluate() calls.
Args:
index: The interactive index from the browser state (e.g., [123])
Returns:
str: CSS selector that can be used in JavaScript
Example:
selector = await get_selector_from_index(123)
await evaluate(f'''
(function(){{
const el = document.querySelector({json.dumps(selector)});
if (el) el.click();
}})()
''')
"""
from browser_use.dom.utils import generate_css_selector_for_element
# Get element by index from browser session
node = await browser_session.get_element_by_index(index)
if node is None:
msg = f'Element index {index} not available - page may have changed. Try refreshing browser state.'
logger.warning(f'โ ๏ธ {msg}')
raise RuntimeError(msg)
# Check if element is in shadow DOM
shadow_hosts = []
current = node.parent_node
while current:
if current.shadow_root_type is not None:
# This is a shadow host
host_tag = current.tag_name.lower()
host_id = current.attributes.get('id', '') if current.attributes else ''
host_desc = f'{host_tag}#{host_id}' if host_id else host_tag
shadow_hosts.insert(0, host_desc)
current = current.parent_node
# Check if in iframe
in_iframe = False
current = node.parent_node
while current:
if current.tag_name.lower() == 'iframe':
in_iframe = True
break
current = current.parent_node
# Use the robust selector generation function (now handles special chars in IDs)
selector = generate_css_selector_for_element(node)
# Log shadow DOM/iframe info if detected
if shadow_hosts:
shadow_path = ' > '.join(shadow_hosts)
logger.info(f'Element [{index}] is inside Shadow DOM. Path: {shadow_path}')
logger.info(f' Selector: {selector}')
logger.info(
f' To access: document.querySelector("{shadow_hosts[0].split("#")[0]}").shadowRoot.querySelector("{selector}")'
)
if in_iframe:
logger.info(f"Element [{index}] is inside an iframe. Regular querySelector won't work.")
if selector:
return selector
# Fallback: just use tag name if available
if node.tag_name:
return node.tag_name.lower()
raise ValueError(f'Could not generate selector for element index {index}')
namespace['get_selector_from_index'] = get_selector_from_index_wrapper
# Inject all tools as functions into the namespace
# Skip 'evaluate' since we have a custom implementation above
for action_name, action in tools.registry.registry.actions.items():
if action_name == 'evaluate':
continue # Skip - use custom evaluate that returns Python objects directly
param_model = action.param_model
action_function = action.function
# Create a closure to capture the current action_name, param_model, and action_function
def make_action_wrapper(act_name, par_model, act_func):
async def action_wrapper(*args, **kwargs):
# Convert positional args to kwargs based on param model fields
if args:
# Get the field names from the pydantic model
field_names = list(par_model.model_fields.keys())
for i, arg in enumerate(args):
if i < len(field_names):
kwargs[field_names[i]] = arg
# Create params from kwargs
try:
params = par_model(**kwargs)
except Exception as e:
raise ValueError(f'Invalid parameters for {act_name}: {e}') from e
# Special validation for done() - enforce minimal code cell
if act_name == 'done':
consecutive_failures = namespace.get('_consecutive_errors')
if consecutive_failures and consecutive_failures > 3:
pass
else:
# Check if there are multiple Python blocks in this response
all_blocks = namespace.get('_all_code_blocks', {})
python_blocks = [k for k in sorted(all_blocks.keys()) if k.startswith('python_')]
if len(python_blocks) > 1:
msg = (
'done() should be the ONLY code block in the response.\n'
'You have multiple Python blocks in this response. Consider calling done() in a separate response '
'Now verify the last output and if it satisfies the task, call done(), else continue working.'
)
print(msg)
# Get the current cell code from namespace (injected by service.py before execution)
current_code = namespace.get('_current_cell_code')
if current_code and isinstance(current_code, str):
# Count non-empty, non-comment lines
lines = [line.strip() for line in current_code.strip().split('\n')]
code_lines = [line for line in lines if line and not line.startswith('#')]
# Check if the line above await done() contains an if block
done_line_index = -1
for i, line in enumerate(reversed(code_lines)):
if 'await done()' in line or 'await done(' in line:
done_line_index = len(code_lines) - 1 - i
break
has_if_above = False
has_else_above = False
has_elif_above = False
if done_line_index > 0:
line_above = code_lines[done_line_index - 1]
has_if_above = line_above.strip().startswith('if ') and line_above.strip().endswith(':')
has_else_above = line_above.strip().startswith('else:')
has_elif_above = line_above.strip().startswith('elif ')
if has_if_above or has_else_above or has_elif_above:
msg = (
'done() should be called individually after verifying the result from any logic.\n'
'Consider validating your output first, THEN call done() in a final step without if/else/elif blocks only if the task is truly complete.'
)
logger.error(msg)
print(msg)
raise RuntimeError(msg)
# Build special context
special_context = {
'browser_session': browser_session,
'page_extraction_llm': page_extraction_llm,
'available_file_paths': available_file_paths,
'has_sensitive_data': False, # Can be handled separately if needed
'file_system': file_system,
}
# Execute the action
result = await act_func(params=params, **special_context)
# For code-use mode, we want to return the result directly
# not wrapped in ActionResult
if hasattr(result, 'extracted_content'):
# Special handling for done action - mark task as complete
if act_name == 'done' and hasattr(result, 'is_done') and result.is_done:
namespace['_task_done'] = True
# Store the extracted content as the final result
if result.extracted_content:
namespace['_task_result'] = result.extracted_content
# Store the self-reported success status
if hasattr(result, 'success'):
namespace['_task_success'] = result.success
# If there's extracted content, return it
if result.extracted_content:
return result.extracted_content
# If there's an error, raise it
if result.error:
raise RuntimeError(result.error)
# Otherwise return None
return None
return result
return action_wrapper
# Rename 'input' to 'input_text' to avoid shadowing Python's built-in input()
namespace_action_name = 'input_text' if action_name == 'input' else action_name
# Add the wrapper to the namespace
namespace[namespace_action_name] = make_action_wrapper(action_name, param_model, action_function)
return namespace
def get_namespace_documentation(namespace: dict[str, Any]) -> str:
"""
Generate documentation for all available functions in the namespace.
Args:
namespace: The namespace dictionary
Returns:
Markdown-formatted documentation string
"""
docs = ['# Available Functions\n']
# Document each function
for name, obj in sorted(namespace.items()):
if callable(obj) and not name.startswith('_'):
# Get function signature and docstring
if hasattr(obj, '__doc__') and obj.__doc__:
docs.append(f'## {name}\n')
docs.append(f'{obj.__doc__}\n')
return '\n'.join(docs)
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/code_use/notebook_export.py | browser_use/code_use/notebook_export.py | """Export code-use session to Jupyter notebook format."""
import json
import re
from pathlib import Path
from browser_use.code_use.service import CodeAgent
from .views import CellType, NotebookExport
def export_to_ipynb(agent: CodeAgent, output_path: str | Path) -> Path:
"""
Export a NotebookSession to a Jupyter notebook (.ipynb) file.
Now includes JavaScript code blocks that were stored in the namespace.
Args:
session: The NotebookSession to export
output_path: Path where to save the notebook file
agent: Optional CodeAgent instance to access namespace for JavaScript blocks
Returns:
Path to the saved notebook file
Example:
```python
session = await agent.run()
notebook_path = export_to_ipynb(agent, 'my_automation.ipynb')
print(f'Notebook saved to {notebook_path}')
```
"""
output_path = Path(output_path)
# Create notebook structure
notebook = NotebookExport(
metadata={
'kernelspec': {'display_name': 'Python 3', 'language': 'python', 'name': 'python3'},
'language_info': {
'name': 'python',
'version': '3.11.0',
'mimetype': 'text/x-python',
'codemirror_mode': {'name': 'ipython', 'version': 3},
'pygments_lexer': 'ipython3',
'nbconvert_exporter': 'python',
'file_extension': '.py',
},
}
)
# Add setup cell at the beginning with proper type hints
setup_code = """import asyncio
import json
from typing import Any
from browser_use import BrowserSession
from browser_use.code_use import create_namespace
# Initialize browser and namespace
browser = BrowserSession()
await browser.start()
# Create namespace with all browser control functions
namespace: dict[str, Any] = create_namespace(browser)
# Import all functions into the current namespace
globals().update(namespace)
# Type hints for better IDE support (these are now available globally)
# navigate, click, input, evaluate, search, extract, scroll, done, etc.
print("Browser-use environment initialized!")
print("Available functions: navigate, click, input, evaluate, search, extract, done, etc.")"""
setup_cell = {
'cell_type': 'code',
'metadata': {},
'source': setup_code.split('\n'),
'execution_count': None,
'outputs': [],
}
notebook.cells.append(setup_cell)
# Add JavaScript code blocks as variables FIRST
if hasattr(agent, 'namespace') and agent.namespace:
# Look for JavaScript variables in the namespace
code_block_vars = agent.namespace.get('_code_block_vars', set())
for var_name in sorted(code_block_vars):
var_value = agent.namespace.get(var_name)
if isinstance(var_value, str) and var_value.strip():
# Check if this looks like JavaScript code
# Look for common JS patterns
js_patterns = [
r'function\s+\w+\s*\(',
r'\(\s*function\s*\(\)',
r'=>\s*{',
r'document\.',
r'Array\.from\(',
r'\.querySelector',
r'\.textContent',
r'\.innerHTML',
r'return\s+',
r'console\.log',
r'window\.',
r'\.map\(',
r'\.filter\(',
r'\.forEach\(',
]
is_js = any(re.search(pattern, var_value, re.IGNORECASE) for pattern in js_patterns)
if is_js:
# Create a code cell with the JavaScript variable
js_cell = {
'cell_type': 'code',
'metadata': {},
'source': [f'# JavaScript Code Block: {var_name}\n', f'{var_name} = """{var_value}"""'],
'execution_count': None,
'outputs': [],
}
notebook.cells.append(js_cell)
# Convert cells
python_cell_count = 0
for cell in agent.session.cells:
notebook_cell: dict = {
'cell_type': cell.cell_type.value,
'metadata': {},
'source': cell.source.splitlines(keepends=True),
}
if cell.cell_type == CellType.CODE:
python_cell_count += 1
notebook_cell['execution_count'] = cell.execution_count
notebook_cell['outputs'] = []
# Add output if available
if cell.output:
notebook_cell['outputs'].append(
{
'output_type': 'stream',
'name': 'stdout',
'text': cell.output.split('\n'),
}
)
# Add error if available
if cell.error:
notebook_cell['outputs'].append(
{
'output_type': 'error',
'ename': 'Error',
'evalue': cell.error.split('\n')[0] if cell.error else '',
'traceback': cell.error.split('\n') if cell.error else [],
}
)
# Add browser state as a separate output
if cell.browser_state:
notebook_cell['outputs'].append(
{
'output_type': 'stream',
'name': 'stdout',
'text': [f'Browser State:\n{cell.browser_state}'],
}
)
notebook.cells.append(notebook_cell)
# Write to file
output_path.parent.mkdir(parents=True, exist_ok=True)
with open(output_path, 'w', encoding='utf-8') as f:
json.dump(notebook.model_dump(), f, indent=2, ensure_ascii=False)
return output_path
def session_to_python_script(agent: CodeAgent) -> str:
"""
Convert a CodeAgent session to a Python script.
Now includes JavaScript code blocks that were stored in the namespace.
Args:
agent: The CodeAgent instance to convert
Returns:
Python script as a string
Example:
```python
await agent.run()
script = session_to_python_script(agent)
print(script)
```
"""
lines = []
lines.append('# Generated from browser-use code-use session\n')
lines.append('import asyncio\n')
lines.append('import json\n')
lines.append('from browser_use import BrowserSession\n')
lines.append('from browser_use.code_use import create_namespace\n\n')
lines.append('async def main():\n')
lines.append('\t# Initialize browser and namespace\n')
lines.append('\tbrowser = BrowserSession()\n')
lines.append('\tawait browser.start()\n\n')
lines.append('\t# Create namespace with all browser control functions\n')
lines.append('\tnamespace = create_namespace(browser)\n\n')
lines.append('\t# Extract functions from namespace for direct access\n')
lines.append('\tnavigate = namespace["navigate"]\n')
lines.append('\tclick = namespace["click"]\n')
lines.append('\tinput_text = namespace["input"]\n')
lines.append('\tevaluate = namespace["evaluate"]\n')
lines.append('\tsearch = namespace["search"]\n')
lines.append('\textract = namespace["extract"]\n')
lines.append('\tscroll = namespace["scroll"]\n')
lines.append('\tdone = namespace["done"]\n')
lines.append('\tgo_back = namespace["go_back"]\n')
lines.append('\twait = namespace["wait"]\n')
lines.append('\tscreenshot = namespace["screenshot"]\n')
lines.append('\tfind_text = namespace["find_text"]\n')
lines.append('\tswitch_tab = namespace["switch"]\n')
lines.append('\tclose_tab = namespace["close"]\n')
lines.append('\tdropdown_options = namespace["dropdown_options"]\n')
lines.append('\tselect_dropdown = namespace["select_dropdown"]\n')
lines.append('\tupload_file = namespace["upload_file"]\n')
lines.append('\tsend_keys = namespace["send_keys"]\n\n')
# Add JavaScript code blocks as variables FIRST
if hasattr(agent, 'namespace') and agent.namespace:
code_block_vars = agent.namespace.get('_code_block_vars', set())
for var_name in sorted(code_block_vars):
var_value = agent.namespace.get(var_name)
if isinstance(var_value, str) and var_value.strip():
# Check if this looks like JavaScript code
js_patterns = [
r'function\s+\w+\s*\(',
r'\(\s*function\s*\(\)',
r'=>\s*{',
r'document\.',
r'Array\.from\(',
r'\.querySelector',
r'\.textContent',
r'\.innerHTML',
r'return\s+',
r'console\.log',
r'window\.',
r'\.map\(',
r'\.filter\(',
r'\.forEach\(',
]
is_js = any(re.search(pattern, var_value, re.IGNORECASE) for pattern in js_patterns)
if is_js:
lines.append(f'\t# JavaScript Code Block: {var_name}\n')
lines.append(f'\t{var_name} = """{var_value}"""\n\n')
for i, cell in enumerate(agent.session.cells):
if cell.cell_type == CellType.CODE:
lines.append(f'\t# Cell {i + 1}\n')
# Indent each line of source
source_lines = cell.source.split('\n')
for line in source_lines:
if line.strip(): # Only add non-empty lines
lines.append(f'\t{line}\n')
lines.append('\n')
lines.append('\tawait browser.stop()\n\n')
lines.append("if __name__ == '__main__':\n")
lines.append('\tasyncio.run(main())\n')
return ''.join(lines)
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/code_use/__init__.py | browser_use/code_use/__init__.py | """Code-use mode - Jupyter notebook-like code execution for browser automation."""
from browser_use.code_use.namespace import create_namespace
from browser_use.code_use.notebook_export import export_to_ipynb, session_to_python_script
from browser_use.code_use.service import CodeAgent
from browser_use.code_use.views import CodeCell, ExecutionStatus, NotebookSession
__all__ = [
'CodeAgent',
'create_namespace',
'export_to_ipynb',
'session_to_python_script',
'CodeCell',
'ExecutionStatus',
'NotebookSession',
]
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/code_use/formatting.py | browser_use/code_use/formatting.py | """Browser state formatting helpers for code-use agent."""
import logging
from typing import Any
from browser_use.browser.session import BrowserSession
from browser_use.browser.views import BrowserStateSummary
logger = logging.getLogger(__name__)
async def format_browser_state_for_llm(
state: BrowserStateSummary,
namespace: dict[str, Any],
browser_session: BrowserSession,
) -> str:
"""
Format browser state summary for LLM consumption in code-use mode.
Args:
state: Browser state summary from browser_session.get_browser_state_summary()
namespace: The code execution namespace (for showing available variables)
browser_session: Browser session for additional checks (jQuery, etc.)
Returns:
Formatted browser state text for LLM
"""
assert state.dom_state is not None
dom_state = state.dom_state
# Use eval_representation (compact serializer for code agents)
dom_html = dom_state.eval_representation()
if dom_html == '':
dom_html = 'Empty DOM tree (you might have to wait for the page to load)'
# Format with URL and title header
lines = ['## Browser State']
lines.append(f'**URL:** {state.url}')
lines.append(f'**Title:** {state.title}')
lines.append('')
# Add tabs info if multiple tabs exist
if len(state.tabs) > 1:
lines.append('**Tabs:**')
current_target_candidates = []
# Find tabs that match current URL and title
for tab in state.tabs:
if tab.url == state.url and tab.title == state.title:
current_target_candidates.append(tab.target_id)
current_target_id = current_target_candidates[0] if len(current_target_candidates) == 1 else None
for tab in state.tabs:
is_current = ' (current)' if tab.target_id == current_target_id else ''
lines.append(f' - Tab {tab.target_id[-4:]}: {tab.url} - {tab.title[:30]}{is_current}')
lines.append('')
# Add page scroll info if available
if state.page_info:
pi = state.page_info
pages_above = pi.pixels_above / pi.viewport_height if pi.viewport_height > 0 else 0
pages_below = pi.pixels_below / pi.viewport_height if pi.viewport_height > 0 else 0
total_pages = pi.page_height / pi.viewport_height if pi.viewport_height > 0 else 0
scroll_info = f'**Page:** {pages_above:.1f} pages above, {pages_below:.1f} pages below'
if total_pages > 1.2: # Only mention total if significantly > 1 page
scroll_info += f', {total_pages:.1f} total pages'
lines.append(scroll_info)
lines.append('')
# Add network loading info if there are pending requests
if state.pending_network_requests:
# Remove duplicates by URL (keep first occurrence with earliest duration)
seen_urls = set()
unique_requests = []
for req in state.pending_network_requests:
if req.url not in seen_urls:
seen_urls.add(req.url)
unique_requests.append(req)
lines.append(f'**โณ Loading:** {len(unique_requests)} network requests still loading')
# Show up to 20 unique requests with truncated URLs (30 chars max)
for req in unique_requests[:20]:
duration_sec = req.loading_duration_ms / 1000
url_display = req.url if len(req.url) <= 30 else req.url[:27] + '...'
logger.info(f' - [{duration_sec:.1f}s] {url_display}')
lines.append(f' - [{duration_sec:.1f}s] {url_display}')
if len(unique_requests) > 20:
lines.append(f' - ... and {len(unique_requests) - 20} more')
lines.append('**Tip:** Content may still be loading. Consider waiting with `await asyncio.sleep(1)` if data is missing.')
lines.append('')
# Add available variables and functions BEFORE DOM structure
# Show useful utilities (json, asyncio, etc.) and user-defined vars, but hide system objects
skip_vars = {
'browser',
'file_system', # System objects
'np',
'pd',
'plt',
'numpy',
'pandas',
'matplotlib',
'requests',
'BeautifulSoup',
'bs4',
'pypdf',
'PdfReader',
'wait',
}
# Highlight code block variables separately from regular variables
code_block_vars = []
regular_vars = []
tracked_code_blocks = namespace.get('_code_block_vars', set())
for name in namespace.keys():
# Skip private vars and system objects/actions
if not name.startswith('_') and name not in skip_vars:
if name in tracked_code_blocks:
code_block_vars.append(name)
else:
regular_vars.append(name)
# Sort for consistent display
available_vars_sorted = sorted(regular_vars)
code_block_vars_sorted = sorted(code_block_vars)
# Build available line with code blocks and variables
parts = []
if code_block_vars_sorted:
# Show detailed info for code block variables
code_block_details = []
for var_name in code_block_vars_sorted:
value = namespace.get(var_name)
if value is not None:
type_name = type(value).__name__
value_str = str(value) if not isinstance(value, str) else value
# Check if it's a function (starts with "(function" or "(async function")
is_function = value_str.strip().startswith('(function') or value_str.strip().startswith('(async function')
if is_function:
# For functions, only show name and type
detail = f'{var_name}({type_name})'
else:
# For non-functions, show first and last 20 chars
first_20 = value_str[:20].replace('\n', '\\n').replace('\t', '\\t')
last_20 = value_str[-20:].replace('\n', '\\n').replace('\t', '\\t') if len(value_str) > 20 else ''
if last_20 and first_20 != last_20:
detail = f'{var_name}({type_name}): "{first_20}...{last_20}"'
else:
detail = f'{var_name}({type_name}): "{first_20}"'
code_block_details.append(detail)
parts.append(f'**Code block variables:** {" | ".join(code_block_details)}')
if available_vars_sorted:
parts.append(f'**Variables:** {", ".join(available_vars_sorted)}')
lines.append(f'**Available:** {" | ".join(parts)}')
lines.append('')
# Add DOM structure
lines.append('**DOM Structure:**')
# Add scroll position hints for DOM
if state.page_info:
pi = state.page_info
pages_above = pi.pixels_above / pi.viewport_height if pi.viewport_height > 0 else 0
pages_below = pi.pixels_below / pi.viewport_height if pi.viewport_height > 0 else 0
if pages_above > 0:
dom_html = f'... {pages_above:.1f} pages above \n{dom_html}'
else:
dom_html = '[Start of page]\n' + dom_html
if pages_below <= 0:
dom_html += '\n[End of page]'
# Truncate DOM if too long and notify LLM
max_dom_length = 60000
if len(dom_html) > max_dom_length:
lines.append(dom_html[:max_dom_length])
lines.append(
f'\n[DOM truncated after {max_dom_length} characters. Full page contains {len(dom_html)} characters total. Use evaluate to explore more.]'
)
else:
lines.append(dom_html)
browser_state_text = '\n'.join(lines)
return browser_state_text
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/filesystem/file_system.py | browser_use/filesystem/file_system.py | import asyncio
import base64
import os
import re
import shutil
from abc import ABC, abstractmethod
from concurrent.futures import ThreadPoolExecutor
from pathlib import Path
from typing import Any
from pydantic import BaseModel, Field
INVALID_FILENAME_ERROR_MESSAGE = 'Error: Invalid filename format. Must be alphanumeric with supported extension.'
DEFAULT_FILE_SYSTEM_PATH = 'browseruse_agent_data'
class FileSystemError(Exception):
"""Custom exception for file system operations that should be shown to LLM"""
pass
class BaseFile(BaseModel, ABC):
"""Base class for all file types"""
name: str
content: str = ''
# --- Subclass must define this ---
@property
@abstractmethod
def extension(self) -> str:
"""File extension (e.g. 'txt', 'md')"""
pass
def write_file_content(self, content: str) -> None:
"""Update internal content (formatted)"""
self.update_content(content)
def append_file_content(self, content: str) -> None:
"""Append content to internal content"""
self.update_content(self.content + content)
# --- These are shared and implemented here ---
def update_content(self, content: str) -> None:
self.content = content
def sync_to_disk_sync(self, path: Path) -> None:
file_path = path / self.full_name
file_path.write_text(self.content)
async def sync_to_disk(self, path: Path) -> None:
file_path = path / self.full_name
with ThreadPoolExecutor() as executor:
await asyncio.get_event_loop().run_in_executor(executor, lambda: file_path.write_text(self.content))
async def write(self, content: str, path: Path) -> None:
self.write_file_content(content)
await self.sync_to_disk(path)
async def append(self, content: str, path: Path) -> None:
self.append_file_content(content)
await self.sync_to_disk(path)
def read(self) -> str:
return self.content
@property
def full_name(self) -> str:
return f'{self.name}.{self.extension}'
@property
def get_size(self) -> int:
return len(self.content)
@property
def get_line_count(self) -> int:
return len(self.content.splitlines())
class MarkdownFile(BaseFile):
"""Markdown file implementation"""
@property
def extension(self) -> str:
return 'md'
class TxtFile(BaseFile):
"""Plain text file implementation"""
@property
def extension(self) -> str:
return 'txt'
class JsonFile(BaseFile):
"""JSON file implementation"""
@property
def extension(self) -> str:
return 'json'
class CsvFile(BaseFile):
"""CSV file implementation"""
@property
def extension(self) -> str:
return 'csv'
class JsonlFile(BaseFile):
"""JSONL (JSON Lines) file implementation"""
@property
def extension(self) -> str:
return 'jsonl'
class PdfFile(BaseFile):
"""PDF file implementation"""
@property
def extension(self) -> str:
return 'pdf'
def sync_to_disk_sync(self, path: Path) -> None:
# Lazy import reportlab
from reportlab.lib.pagesizes import letter
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.platypus import Paragraph, SimpleDocTemplate, Spacer
file_path = path / self.full_name
try:
# Create PDF document
doc = SimpleDocTemplate(str(file_path), pagesize=letter)
styles = getSampleStyleSheet()
story = []
# Convert markdown content to simple text and add to PDF
# For basic implementation, we'll treat content as plain text
# This avoids the AGPL license issue while maintaining functionality
content_lines = self.content.split('\n')
for line in content_lines:
if line.strip():
# Handle basic markdown headers
if line.startswith('# '):
para = Paragraph(line[2:], styles['Title'])
elif line.startswith('## '):
para = Paragraph(line[3:], styles['Heading1'])
elif line.startswith('### '):
para = Paragraph(line[4:], styles['Heading2'])
else:
para = Paragraph(line, styles['Normal'])
story.append(para)
else:
story.append(Spacer(1, 6))
doc.build(story)
except Exception as e:
raise FileSystemError(f"Error: Could not write to file '{self.full_name}'. {str(e)}")
async def sync_to_disk(self, path: Path) -> None:
with ThreadPoolExecutor() as executor:
await asyncio.get_event_loop().run_in_executor(executor, lambda: self.sync_to_disk_sync(path))
class DocxFile(BaseFile):
"""DOCX file implementation"""
@property
def extension(self) -> str:
return 'docx'
def sync_to_disk_sync(self, path: Path) -> None:
file_path = path / self.full_name
try:
from docx import Document
doc = Document()
# Convert content to DOCX paragraphs
content_lines = self.content.split('\n')
for line in content_lines:
if line.strip():
# Handle basic markdown headers
if line.startswith('# '):
doc.add_heading(line[2:], level=1)
elif line.startswith('## '):
doc.add_heading(line[3:], level=2)
elif line.startswith('### '):
doc.add_heading(line[4:], level=3)
else:
doc.add_paragraph(line)
else:
doc.add_paragraph() # Empty paragraph for spacing
doc.save(str(file_path))
except Exception as e:
raise FileSystemError(f"Error: Could not write to file '{self.full_name}'. {str(e)}")
async def sync_to_disk(self, path: Path) -> None:
with ThreadPoolExecutor() as executor:
await asyncio.get_event_loop().run_in_executor(executor, lambda: self.sync_to_disk_sync(path))
class FileSystemState(BaseModel):
"""Serializable state of the file system"""
files: dict[str, dict[str, Any]] = Field(default_factory=dict) # full filename -> file data
base_dir: str
extracted_content_count: int = 0
class FileSystem:
"""Enhanced file system with in-memory storage and multiple file type support"""
def __init__(self, base_dir: str | Path, create_default_files: bool = True):
# Handle the Path conversion before calling super().__init__
self.base_dir = Path(base_dir) if isinstance(base_dir, str) else base_dir
self.base_dir.mkdir(parents=True, exist_ok=True)
# Create and use a dedicated subfolder for all operations
self.data_dir = self.base_dir / DEFAULT_FILE_SYSTEM_PATH
if self.data_dir.exists():
# clean the data directory
shutil.rmtree(self.data_dir)
self.data_dir.mkdir(exist_ok=True)
self._file_types: dict[str, type[BaseFile]] = {
'md': MarkdownFile,
'txt': TxtFile,
'json': JsonFile,
'jsonl': JsonlFile,
'csv': CsvFile,
'pdf': PdfFile,
'docx': DocxFile,
}
self.files = {}
if create_default_files:
self.default_files = ['todo.md']
self._create_default_files()
self.extracted_content_count = 0
def get_allowed_extensions(self) -> list[str]:
"""Get allowed extensions"""
return list(self._file_types.keys())
def _get_file_type_class(self, extension: str) -> type[BaseFile] | None:
"""Get the appropriate file class for an extension."""
return self._file_types.get(extension.lower(), None)
def _create_default_files(self) -> None:
"""Create default results and todo files"""
for full_filename in self.default_files:
name_without_ext, extension = self._parse_filename(full_filename)
file_class = self._get_file_type_class(extension)
if not file_class:
raise ValueError(f"Error: Invalid file extension '{extension}' for file '{full_filename}'.")
file_obj = file_class(name=name_without_ext)
self.files[full_filename] = file_obj # Use full filename as key
file_obj.sync_to_disk_sync(self.data_dir)
def _is_valid_filename(self, file_name: str) -> bool:
"""Check if filename matches the required pattern: name.extension"""
# Build extensions pattern from _file_types
extensions = '|'.join(self._file_types.keys())
pattern = rf'^[a-zA-Z0-9_\-\u4e00-\u9fff]+\.({extensions})$'
file_name_base = os.path.basename(file_name)
return bool(re.match(pattern, file_name_base))
def _parse_filename(self, filename: str) -> tuple[str, str]:
"""Parse filename into name and extension. Always check _is_valid_filename first."""
name, extension = filename.rsplit('.', 1)
return name, extension.lower()
def get_dir(self) -> Path:
"""Get the file system directory"""
return self.data_dir
def get_file(self, full_filename: str) -> BaseFile | None:
"""Get a file object by full filename"""
if not self._is_valid_filename(full_filename):
return None
# Use full filename as key
return self.files.get(full_filename)
def list_files(self) -> list[str]:
"""List all files in the system"""
return [file_obj.full_name for file_obj in self.files.values()]
def display_file(self, full_filename: str) -> str | None:
"""Display file content using file-specific display method"""
if not self._is_valid_filename(full_filename):
return None
file_obj = self.get_file(full_filename)
if not file_obj:
return None
return file_obj.read()
async def read_file_structured(self, full_filename: str, external_file: bool = False) -> dict[str, Any]:
"""Read file and return structured data including images if applicable.
Returns:
dict with keys:
- 'message': str - The message to display
- 'images': list[dict] | None - Image data if file is an image: [{"name": str, "data": base64_str}]
"""
result: dict[str, Any] = {'message': '', 'images': None}
if external_file:
try:
try:
_, extension = self._parse_filename(full_filename)
except Exception:
result['message'] = (
f'Error: Invalid filename format {full_filename}. Must be alphanumeric with a supported extension.'
)
return result
if extension in ['md', 'txt', 'json', 'jsonl', 'csv']:
import anyio
async with await anyio.open_file(full_filename, 'r') as f:
content = await f.read()
result['message'] = f'Read from file {full_filename}.\n<content>\n{content}\n</content>'
return result
elif extension == 'docx':
from docx import Document
doc = Document(full_filename)
content = '\n'.join([para.text for para in doc.paragraphs])
result['message'] = f'Read from file {full_filename}.\n<content>\n{content}\n</content>'
return result
elif extension == 'pdf':
import pypdf
reader = pypdf.PdfReader(full_filename)
num_pages = len(reader.pages)
MAX_PDF_PAGES = 20
extra_pages = num_pages - MAX_PDF_PAGES
extracted_text = ''
for page in reader.pages[:MAX_PDF_PAGES]:
extracted_text += page.extract_text()
extra_pages_text = f'{extra_pages} more pages...' if extra_pages > 0 else ''
result['message'] = (
f'Read from file {full_filename}.\n<content>\n{extracted_text}\n{extra_pages_text}</content>'
)
return result
elif extension in ['jpg', 'jpeg', 'png']:
import anyio
# Read image file and convert to base64
async with await anyio.open_file(full_filename, 'rb') as f:
img_data = await f.read()
base64_str = base64.b64encode(img_data).decode('utf-8')
result['message'] = f'Read image file {full_filename}.'
result['images'] = [{'name': os.path.basename(full_filename), 'data': base64_str}]
return result
else:
result['message'] = f'Error: Cannot read file {full_filename} as {extension} extension is not supported.'
return result
except FileNotFoundError:
result['message'] = f"Error: File '{full_filename}' not found."
return result
except PermissionError:
result['message'] = f"Error: Permission denied to read file '{full_filename}'."
return result
except Exception as e:
result['message'] = f"Error: Could not read file '{full_filename}'. {str(e)}"
return result
# For internal files, only non-image types are supported
if not self._is_valid_filename(full_filename):
result['message'] = INVALID_FILENAME_ERROR_MESSAGE
return result
file_obj = self.get_file(full_filename)
if not file_obj:
result['message'] = f"File '{full_filename}' not found."
return result
try:
content = file_obj.read()
result['message'] = f'Read from file {full_filename}.\n<content>\n{content}\n</content>'
return result
except FileSystemError as e:
result['message'] = str(e)
return result
except Exception as e:
result['message'] = f"Error: Could not read file '{full_filename}'. {str(e)}"
return result
async def read_file(self, full_filename: str, external_file: bool = False) -> str:
"""Read file content using file-specific read method and return appropriate message to LLM.
Note: For image files, use read_file_structured() to get image data.
"""
result = await self.read_file_structured(full_filename, external_file)
return result['message']
async def write_file(self, full_filename: str, content: str) -> str:
"""Write content to file using file-specific write method"""
if not self._is_valid_filename(full_filename):
return INVALID_FILENAME_ERROR_MESSAGE
try:
name_without_ext, extension = self._parse_filename(full_filename)
file_class = self._get_file_type_class(extension)
if not file_class:
raise ValueError(f"Error: Invalid file extension '{extension}' for file '{full_filename}'.")
# Create or get existing file using full filename as key
if full_filename in self.files:
file_obj = self.files[full_filename]
else:
file_obj = file_class(name=name_without_ext)
self.files[full_filename] = file_obj # Use full filename as key
# Use file-specific write method
await file_obj.write(content, self.data_dir)
return f'Data written to file {full_filename} successfully.'
except FileSystemError as e:
return str(e)
except Exception as e:
return f"Error: Could not write to file '{full_filename}'. {str(e)}"
async def append_file(self, full_filename: str, content: str) -> str:
"""Append content to file using file-specific append method"""
if not self._is_valid_filename(full_filename):
return INVALID_FILENAME_ERROR_MESSAGE
file_obj = self.get_file(full_filename)
if not file_obj:
return f"File '{full_filename}' not found."
try:
await file_obj.append(content, self.data_dir)
return f'Data appended to file {full_filename} successfully.'
except FileSystemError as e:
return str(e)
except Exception as e:
return f"Error: Could not append to file '{full_filename}'. {str(e)}"
async def replace_file_str(self, full_filename: str, old_str: str, new_str: str) -> str:
"""Replace old_str with new_str in file_name"""
if not self._is_valid_filename(full_filename):
return INVALID_FILENAME_ERROR_MESSAGE
if not old_str:
return 'Error: Cannot replace empty string. Please provide a non-empty string to replace.'
file_obj = self.get_file(full_filename)
if not file_obj:
return f"File '{full_filename}' not found."
try:
content = file_obj.read()
content = content.replace(old_str, new_str)
await file_obj.write(content, self.data_dir)
return f'Successfully replaced all occurrences of "{old_str}" with "{new_str}" in file {full_filename}'
except FileSystemError as e:
return str(e)
except Exception as e:
return f"Error: Could not replace string in file '{full_filename}'. {str(e)}"
async def save_extracted_content(self, content: str) -> str:
"""Save extracted content to a numbered file"""
initial_filename = f'extracted_content_{self.extracted_content_count}'
extracted_filename = f'{initial_filename}.md'
file_obj = MarkdownFile(name=initial_filename)
await file_obj.write(content, self.data_dir)
self.files[extracted_filename] = file_obj
self.extracted_content_count += 1
return extracted_filename
def describe(self) -> str:
"""List all files with their content information using file-specific display methods"""
DISPLAY_CHARS = 400
description = ''
for file_obj in self.files.values():
# Skip todo.md from description
if file_obj.full_name == 'todo.md':
continue
content = file_obj.read()
# Handle empty files
if not content:
description += f'<file>\n{file_obj.full_name} - [empty file]\n</file>\n'
continue
lines = content.splitlines()
line_count = len(lines)
# For small files, display the entire content
whole_file_description = (
f'<file>\n{file_obj.full_name} - {line_count} lines\n<content>\n{content}\n</content>\n</file>\n'
)
if len(content) < int(1.5 * DISPLAY_CHARS):
description += whole_file_description
continue
# For larger files, display start and end previews
half_display_chars = DISPLAY_CHARS // 2
# Get start preview
start_preview = ''
start_line_count = 0
chars_count = 0
for line in lines:
if chars_count + len(line) + 1 > half_display_chars:
break
start_preview += line + '\n'
chars_count += len(line) + 1
start_line_count += 1
# Get end preview
end_preview = ''
end_line_count = 0
chars_count = 0
for line in reversed(lines):
if chars_count + len(line) + 1 > half_display_chars:
break
end_preview = line + '\n' + end_preview
chars_count += len(line) + 1
end_line_count += 1
# Calculate lines in between
middle_line_count = line_count - start_line_count - end_line_count
if middle_line_count <= 0:
description += whole_file_description
continue
start_preview = start_preview.strip('\n').rstrip()
end_preview = end_preview.strip('\n').rstrip()
# Format output
if not (start_preview or end_preview):
description += f'<file>\n{file_obj.full_name} - {line_count} lines\n<content>\n{middle_line_count} lines...\n</content>\n</file>\n'
else:
description += f'<file>\n{file_obj.full_name} - {line_count} lines\n<content>\n{start_preview}\n'
description += f'... {middle_line_count} more lines ...\n'
description += f'{end_preview}\n'
description += '</content>\n</file>\n'
return description.strip('\n')
def get_todo_contents(self) -> str:
"""Get todo file contents"""
todo_file = self.get_file('todo.md')
return todo_file.read() if todo_file else ''
def get_state(self) -> FileSystemState:
"""Get serializable state of the file system"""
files_data = {}
for full_filename, file_obj in self.files.items():
files_data[full_filename] = {'type': file_obj.__class__.__name__, 'data': file_obj.model_dump()}
return FileSystemState(
files=files_data, base_dir=str(self.base_dir), extracted_content_count=self.extracted_content_count
)
def nuke(self) -> None:
"""Delete the file system directory"""
shutil.rmtree(self.data_dir)
@classmethod
def from_state(cls, state: FileSystemState) -> 'FileSystem':
"""Restore file system from serializable state at the exact same location"""
# Create file system without default files
fs = cls(base_dir=Path(state.base_dir), create_default_files=False)
fs.extracted_content_count = state.extracted_content_count
# Restore all files
for full_filename, file_data in state.files.items():
file_type = file_data['type']
file_info = file_data['data']
# Create the appropriate file object based on type
if file_type == 'MarkdownFile':
file_obj = MarkdownFile(**file_info)
elif file_type == 'TxtFile':
file_obj = TxtFile(**file_info)
elif file_type == 'JsonFile':
file_obj = JsonFile(**file_info)
elif file_type == 'JsonlFile':
file_obj = JsonlFile(**file_info)
elif file_type == 'CsvFile':
file_obj = CsvFile(**file_info)
elif file_type == 'PdfFile':
file_obj = PdfFile(**file_info)
elif file_type == 'DocxFile':
file_obj = DocxFile(**file_info)
else:
# Skip unknown file types
continue
# Add to files dict and sync to disk
fs.files[full_filename] = file_obj
file_obj.sync_to_disk_sync(fs.data_dir)
return fs
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/filesystem/__init__.py | browser_use/filesystem/__init__.py | python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false | |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/llm/views.py | browser_use/llm/views.py | from typing import Generic, TypeVar, Union
from pydantic import BaseModel
T = TypeVar('T', bound=Union[BaseModel, str])
class ChatInvokeUsage(BaseModel):
"""
Usage information for a chat model invocation.
"""
prompt_tokens: int
"""The number of tokens in the prompt (this includes the cached tokens as well. When calculating the cost, subtract the cached tokens from the prompt tokens)"""
prompt_cached_tokens: int | None
"""The number of cached tokens."""
prompt_cache_creation_tokens: int | None
"""Anthropic only: The number of tokens used to create the cache."""
prompt_image_tokens: int | None
"""Google only: The number of tokens in the image (prompt tokens is the text tokens + image tokens in that case)"""
completion_tokens: int
"""The number of tokens in the completion."""
total_tokens: int
"""The total number of tokens in the response."""
class ChatInvokeCompletion(BaseModel, Generic[T]):
"""
Response from a chat model invocation.
"""
completion: T
"""The completion of the response."""
# Thinking stuff
thinking: str | None = None
redacted_thinking: str | None = None
usage: ChatInvokeUsage | None
"""The usage of the response."""
stop_reason: str | None = None
"""The reason the model stopped generating. Common values: 'end_turn', 'max_tokens', 'stop_sequence'."""
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/llm/schema.py | browser_use/llm/schema.py | """
Utilities for creating optimized Pydantic schemas for LLM usage.
"""
from typing import Any
from pydantic import BaseModel
class SchemaOptimizer:
@staticmethod
def create_optimized_json_schema(
model: type[BaseModel],
*,
remove_min_items: bool = False,
remove_defaults: bool = False,
) -> dict[str, Any]:
"""
Create the most optimized schema by flattening all $ref/$defs while preserving
FULL descriptions and ALL action definitions. Also ensures OpenAI strict mode compatibility.
Args:
model: The Pydantic model to optimize
remove_min_items: If True, remove minItems from the schema
remove_defaults: If True, remove default values from the schema
Returns:
Optimized schema with all $refs resolved and strict mode compatibility
"""
# Generate original schema
original_schema = model.model_json_schema()
# Extract $defs for reference resolution, then flatten everything
defs_lookup = original_schema.get('$defs', {})
# Create optimized schema with flattening
# Pass flags to optimize_schema via closure
def optimize_schema(obj: Any, defs_lookup: dict[str, Any] | None = None, *, in_properties: bool = False) -> Any:
"""Apply all optimization techniques including flattening all $ref/$defs"""
if isinstance(obj, dict):
optimized: dict[str, Any] = {}
flattened_ref: dict[str, Any] | None = None
# Skip unnecessary fields AND $defs (we'll inline everything)
skip_fields = ['additionalProperties', '$defs']
for key, value in obj.items():
if key in skip_fields:
continue
# Skip metadata "title" unless we're iterating inside an actual `properties` map
if key == 'title' and not in_properties:
continue
# Preserve FULL descriptions without truncation, skip empty ones
elif key == 'description':
if value: # Only include non-empty descriptions
optimized[key] = value
# Handle type field - must recursively process in case value contains $ref
elif key == 'type':
optimized[key] = value if not isinstance(value, (dict, list)) else optimize_schema(value, defs_lookup)
# FLATTEN: Resolve $ref by inlining the actual definition
elif key == '$ref' and defs_lookup:
ref_path = value.split('/')[-1] # Get the definition name from "#/$defs/SomeName"
if ref_path in defs_lookup:
# Get the referenced definition and flatten it
referenced_def = defs_lookup[ref_path]
flattened_ref = optimize_schema(referenced_def, defs_lookup)
# Skip minItems/min_items and default if requested (check BEFORE processing)
elif key in ('minItems', 'min_items') and remove_min_items:
continue # Skip minItems/min_items
elif key == 'default' and remove_defaults:
continue # Skip default values
# Keep all anyOf structures (action unions) and resolve any $refs within
elif key == 'anyOf' and isinstance(value, list):
optimized[key] = [optimize_schema(item, defs_lookup) for item in value]
# Recursively optimize nested structures
elif key in ['properties', 'items']:
optimized[key] = optimize_schema(
value,
defs_lookup,
in_properties=(key == 'properties'),
)
# Keep essential validation fields
elif key in [
'type',
'required',
'minimum',
'maximum',
'minItems',
'min_items',
'maxItems',
'pattern',
'default',
]:
optimized[key] = value if not isinstance(value, (dict, list)) else optimize_schema(value, defs_lookup)
# Recursively process all other fields
else:
optimized[key] = optimize_schema(value, defs_lookup) if isinstance(value, (dict, list)) else value
# If we have a flattened reference, merge it with the optimized properties
if flattened_ref is not None and isinstance(flattened_ref, dict):
# Start with the flattened reference as the base
result = flattened_ref.copy()
# Merge in any sibling properties that were processed
for key, value in optimized.items():
# Preserve descriptions from the original object if they exist
if key == 'description' and 'description' not in result:
result[key] = value
elif key != 'description': # Don't overwrite description from flattened ref
result[key] = value
return result
else:
# No $ref, just return the optimized object
# CRITICAL: Add additionalProperties: false to ALL objects for OpenAI strict mode
if optimized.get('type') == 'object':
optimized['additionalProperties'] = False
return optimized
elif isinstance(obj, list):
return [optimize_schema(item, defs_lookup, in_properties=in_properties) for item in obj]
return obj
optimized_result = optimize_schema(original_schema, defs_lookup)
# Ensure we have a dictionary (should always be the case for schema root)
if not isinstance(optimized_result, dict):
raise ValueError('Optimized schema result is not a dictionary')
optimized_schema: dict[str, Any] = optimized_result
# Additional pass to ensure ALL objects have additionalProperties: false
def ensure_additional_properties_false(obj: Any) -> None:
"""Ensure all objects have additionalProperties: false"""
if isinstance(obj, dict):
# If it's an object type, ensure additionalProperties is false
if obj.get('type') == 'object':
obj['additionalProperties'] = False
# Recursively apply to all values
for value in obj.values():
if isinstance(value, (dict, list)):
ensure_additional_properties_false(value)
elif isinstance(obj, list):
for item in obj:
if isinstance(item, (dict, list)):
ensure_additional_properties_false(item)
ensure_additional_properties_false(optimized_schema)
SchemaOptimizer._make_strict_compatible(optimized_schema)
# Final pass to remove minItems/min_items and default values if requested
if remove_min_items or remove_defaults:
def remove_forbidden_fields(obj: Any) -> None:
"""Recursively remove minItems/min_items and default values"""
if isinstance(obj, dict):
# Remove forbidden keys
if remove_min_items:
obj.pop('minItems', None)
obj.pop('min_items', None)
if remove_defaults:
obj.pop('default', None)
# Recursively process all values
for value in obj.values():
if isinstance(value, (dict, list)):
remove_forbidden_fields(value)
elif isinstance(obj, list):
for item in obj:
if isinstance(item, (dict, list)):
remove_forbidden_fields(item)
remove_forbidden_fields(optimized_schema)
return optimized_schema
@staticmethod
def _make_strict_compatible(schema: dict[str, Any] | list[Any]) -> None:
"""Ensure all properties are required for OpenAI strict mode"""
if isinstance(schema, dict):
# First recursively apply to nested objects
for key, value in schema.items():
if isinstance(value, (dict, list)) and key != 'required':
SchemaOptimizer._make_strict_compatible(value)
# Then update required for this level
if 'properties' in schema and 'type' in schema and schema['type'] == 'object':
# Add all properties to required array
all_props = list(schema['properties'].keys())
schema['required'] = all_props # Set all properties as required
elif isinstance(schema, list):
for item in schema:
SchemaOptimizer._make_strict_compatible(item)
@staticmethod
def create_gemini_optimized_schema(model: type[BaseModel]) -> dict[str, Any]:
"""
Create Gemini-optimized schema, preserving explicit `required` arrays so Gemini
respects mandatory fields defined by the caller.
Args:
model: The Pydantic model to optimize
Returns:
Optimized schema suitable for Gemini structured output
"""
return SchemaOptimizer.create_optimized_json_schema(model)
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/llm/models.py | browser_use/llm/models.py | """
Convenient access to LLM models.
Usage:
from browser_use import llm
# Simple model access
model = llm.azure_gpt_4_1_mini
model = llm.openai_gpt_4o
model = llm.google_gemini_2_5_pro
model = llm.bu_latest
"""
import os
from typing import TYPE_CHECKING
from browser_use.llm.azure.chat import ChatAzureOpenAI
from browser_use.llm.browser_use.chat import ChatBrowserUse
from browser_use.llm.cerebras.chat import ChatCerebras
from browser_use.llm.google.chat import ChatGoogle
from browser_use.llm.mistral.chat import ChatMistral
from browser_use.llm.openai.chat import ChatOpenAI
# Optional OCI import
try:
from browser_use.llm.oci_raw.chat import ChatOCIRaw
OCI_AVAILABLE = True
except ImportError:
ChatOCIRaw = None
OCI_AVAILABLE = False
if TYPE_CHECKING:
from browser_use.llm.base import BaseChatModel
# Type stubs for IDE autocomplete
openai_gpt_4o: 'BaseChatModel'
openai_gpt_4o_mini: 'BaseChatModel'
openai_gpt_4_1_mini: 'BaseChatModel'
openai_o1: 'BaseChatModel'
openai_o1_mini: 'BaseChatModel'
openai_o1_pro: 'BaseChatModel'
openai_o3: 'BaseChatModel'
openai_o3_mini: 'BaseChatModel'
openai_o3_pro: 'BaseChatModel'
openai_o4_mini: 'BaseChatModel'
openai_gpt_5: 'BaseChatModel'
openai_gpt_5_mini: 'BaseChatModel'
openai_gpt_5_nano: 'BaseChatModel'
azure_gpt_4o: 'BaseChatModel'
azure_gpt_4o_mini: 'BaseChatModel'
azure_gpt_4_1_mini: 'BaseChatModel'
azure_o1: 'BaseChatModel'
azure_o1_mini: 'BaseChatModel'
azure_o1_pro: 'BaseChatModel'
azure_o3: 'BaseChatModel'
azure_o3_mini: 'BaseChatModel'
azure_o3_pro: 'BaseChatModel'
azure_gpt_5: 'BaseChatModel'
azure_gpt_5_mini: 'BaseChatModel'
google_gemini_2_0_flash: 'BaseChatModel'
google_gemini_2_0_pro: 'BaseChatModel'
google_gemini_2_5_pro: 'BaseChatModel'
google_gemini_2_5_flash: 'BaseChatModel'
google_gemini_2_5_flash_lite: 'BaseChatModel'
mistral_large: 'BaseChatModel'
mistral_medium: 'BaseChatModel'
mistral_small: 'BaseChatModel'
codestral: 'BaseChatModel'
pixtral_large: 'BaseChatModel'
cerebras_llama3_1_8b: 'BaseChatModel'
cerebras_llama3_3_70b: 'BaseChatModel'
cerebras_gpt_oss_120b: 'BaseChatModel'
cerebras_llama_4_scout_17b_16e_instruct: 'BaseChatModel'
cerebras_llama_4_maverick_17b_128e_instruct: 'BaseChatModel'
cerebras_qwen_3_32b: 'BaseChatModel'
cerebras_qwen_3_235b_a22b_instruct_2507: 'BaseChatModel'
cerebras_qwen_3_235b_a22b_thinking_2507: 'BaseChatModel'
cerebras_qwen_3_coder_480b: 'BaseChatModel'
bu_latest: 'BaseChatModel'
bu_1_0: 'BaseChatModel'
def get_llm_by_name(model_name: str):
"""
Factory function to create LLM instances from string names with API keys from environment.
Args:
model_name: String name like 'azure_gpt_4_1_mini', 'openai_gpt_4o', etc.
Returns:
LLM instance with API keys from environment variables
Raises:
ValueError: If model_name is not recognized
"""
if not model_name:
raise ValueError('Model name cannot be empty')
# Handle top-level Mistral aliases without provider prefix
mistral_aliases = {
'mistral_large': 'mistral-large-latest',
'mistral_medium': 'mistral-medium-latest',
'mistral_small': 'mistral-small-latest',
'codestral': 'codestral-latest',
'pixtral_large': 'pixtral-large-latest',
}
if model_name in mistral_aliases:
api_key = os.getenv('MISTRAL_API_KEY')
base_url = os.getenv('MISTRAL_BASE_URL', 'https://api.mistral.ai/v1')
return ChatMistral(model=mistral_aliases[model_name], api_key=api_key, base_url=base_url)
# Parse model name
parts = model_name.split('_', 1)
if len(parts) < 2:
raise ValueError(f"Invalid model name format: '{model_name}'. Expected format: 'provider_model_name'")
provider = parts[0]
model_part = parts[1]
# Convert underscores back to dots/dashes for actual model names
if 'gpt_4_1_mini' in model_part:
model = model_part.replace('gpt_4_1_mini', 'gpt-4.1-mini')
elif 'gpt_4o_mini' in model_part:
model = model_part.replace('gpt_4o_mini', 'gpt-4o-mini')
elif 'gpt_4o' in model_part:
model = model_part.replace('gpt_4o', 'gpt-4o')
elif 'gemini_2_0' in model_part:
model = model_part.replace('gemini_2_0', 'gemini-2.0').replace('_', '-')
elif 'gemini_2_5' in model_part:
model = model_part.replace('gemini_2_5', 'gemini-2.5').replace('_', '-')
elif 'llama3_1' in model_part:
model = model_part.replace('llama3_1', 'llama3.1').replace('_', '-')
elif 'llama3_3' in model_part:
model = model_part.replace('llama3_3', 'llama-3.3').replace('_', '-')
elif 'llama_4_scout' in model_part:
model = model_part.replace('llama_4_scout', 'llama-4-scout').replace('_', '-')
elif 'llama_4_maverick' in model_part:
model = model_part.replace('llama_4_maverick', 'llama-4-maverick').replace('_', '-')
elif 'gpt_oss_120b' in model_part:
model = model_part.replace('gpt_oss_120b', 'gpt-oss-120b')
elif 'qwen_3_32b' in model_part:
model = model_part.replace('qwen_3_32b', 'qwen-3-32b')
elif 'qwen_3_235b_a22b_instruct' in model_part:
if model_part.endswith('_2507'):
model = model_part.replace('qwen_3_235b_a22b_instruct_2507', 'qwen-3-235b-a22b-instruct-2507')
else:
model = model_part.replace('qwen_3_235b_a22b_instruct', 'qwen-3-235b-a22b-instruct-2507')
elif 'qwen_3_235b_a22b_thinking' in model_part:
if model_part.endswith('_2507'):
model = model_part.replace('qwen_3_235b_a22b_thinking_2507', 'qwen-3-235b-a22b-thinking-2507')
else:
model = model_part.replace('qwen_3_235b_a22b_thinking', 'qwen-3-235b-a22b-thinking-2507')
elif 'qwen_3_coder_480b' in model_part:
model = model_part.replace('qwen_3_coder_480b', 'qwen-3-coder-480b')
else:
model = model_part.replace('_', '-')
# OpenAI Models
if provider == 'openai':
api_key = os.getenv('OPENAI_API_KEY')
return ChatOpenAI(model=model, api_key=api_key)
# Azure OpenAI Models
elif provider == 'azure':
api_key = os.getenv('AZURE_OPENAI_KEY') or os.getenv('AZURE_OPENAI_API_KEY')
azure_endpoint = os.getenv('AZURE_OPENAI_ENDPOINT')
return ChatAzureOpenAI(model=model, api_key=api_key, azure_endpoint=azure_endpoint)
# Google Models
elif provider == 'google':
api_key = os.getenv('GOOGLE_API_KEY')
return ChatGoogle(model=model, api_key=api_key)
# Mistral Models
elif provider == 'mistral':
api_key = os.getenv('MISTRAL_API_KEY')
base_url = os.getenv('MISTRAL_BASE_URL', 'https://api.mistral.ai/v1')
mistral_map = {
'large': 'mistral-large-latest',
'medium': 'mistral-medium-latest',
'small': 'mistral-small-latest',
'codestral': 'codestral-latest',
'pixtral-large': 'pixtral-large-latest',
}
normalized_model_part = model_part.replace('_', '-')
resolved_model = mistral_map.get(normalized_model_part, model.replace('_', '-'))
return ChatMistral(model=resolved_model, api_key=api_key, base_url=base_url)
# OCI Models
elif provider == 'oci':
# OCI requires more complex configuration that can't be easily inferred from env vars
# Users should use ChatOCIRaw directly with proper configuration
raise ValueError('OCI models require manual configuration. Use ChatOCIRaw directly with your OCI credentials.')
# Cerebras Models
elif provider == 'cerebras':
api_key = os.getenv('CEREBRAS_API_KEY')
return ChatCerebras(model=model, api_key=api_key)
# Browser Use Models
elif provider == 'bu':
# Handle bu_latest -> bu-latest conversion (need to prepend 'bu-' back)
model = f'bu-{model_part.replace("_", "-")}'
api_key = os.getenv('BROWSER_USE_API_KEY')
return ChatBrowserUse(model=model, api_key=api_key)
else:
available_providers = ['openai', 'azure', 'google', 'oci', 'cerebras', 'bu']
raise ValueError(f"Unknown provider: '{provider}'. Available providers: {', '.join(available_providers)}")
# Pre-configured model instances (lazy loaded via __getattr__)
def __getattr__(name: str) -> 'BaseChatModel':
"""Create model instances on demand with API keys from environment."""
# Handle chat classes first
if name == 'ChatOpenAI':
return ChatOpenAI # type: ignore
elif name == 'ChatAzureOpenAI':
return ChatAzureOpenAI # type: ignore
elif name == 'ChatGoogle':
return ChatGoogle # type: ignore
elif name == 'ChatMistral':
return ChatMistral # type: ignore
elif name == 'ChatOCIRaw':
if not OCI_AVAILABLE:
raise ImportError('OCI integration not available. Install with: pip install "browser-use[oci]"')
return ChatOCIRaw # type: ignore
elif name == 'ChatCerebras':
return ChatCerebras # type: ignore
elif name == 'ChatBrowserUse':
return ChatBrowserUse # type: ignore
# Handle model instances - these are the main use case
try:
return get_llm_by_name(name)
except ValueError:
raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
# Export all classes and preconfigured instances, conditionally including ChatOCIRaw
__all__ = [
'ChatOpenAI',
'ChatAzureOpenAI',
'ChatGoogle',
'ChatMistral',
'ChatCerebras',
'ChatBrowserUse',
]
if OCI_AVAILABLE:
__all__.append('ChatOCIRaw')
__all__ += [
'get_llm_by_name',
# OpenAI instances - created on demand
'openai_gpt_4o',
'openai_gpt_4o_mini',
'openai_gpt_4_1_mini',
'openai_o1',
'openai_o1_mini',
'openai_o1_pro',
'openai_o3',
'openai_o3_mini',
'openai_o3_pro',
'openai_o4_mini',
'openai_gpt_5',
'openai_gpt_5_mini',
'openai_gpt_5_nano',
# Azure instances - created on demand
'azure_gpt_4o',
'azure_gpt_4o_mini',
'azure_gpt_4_1_mini',
'azure_o1',
'azure_o1_mini',
'azure_o1_pro',
'azure_o3',
'azure_o3_mini',
'azure_o3_pro',
'azure_gpt_5',
'azure_gpt_5_mini',
# Google instances - created on demand
'google_gemini_2_0_flash',
'google_gemini_2_0_pro',
'google_gemini_2_5_pro',
'google_gemini_2_5_flash',
'google_gemini_2_5_flash_lite',
# Mistral instances - created on demand
'mistral_large',
'mistral_medium',
'mistral_small',
'codestral',
'pixtral_large',
# Cerebras instances - created on demand
'cerebras_llama3_1_8b',
'cerebras_llama3_3_70b',
'cerebras_gpt_oss_120b',
'cerebras_llama_4_scout_17b_16e_instruct',
'cerebras_llama_4_maverick_17b_128e_instruct',
'cerebras_qwen_3_32b',
'cerebras_qwen_3_235b_a22b_instruct_2507',
'cerebras_qwen_3_235b_a22b_thinking_2507',
'cerebras_qwen_3_coder_480b',
# Browser Use instances - created on demand
'bu_latest',
'bu_1_0',
]
# NOTE: OCI backend is optional. The try/except ImportError and conditional __all__ are required
# so this module can be imported without browser-use[oci] installed.
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/llm/exceptions.py | browser_use/llm/exceptions.py | class ModelError(Exception):
pass
class ModelProviderError(ModelError):
"""Exception raised when a model provider returns an error."""
def __init__(
self,
message: str,
status_code: int = 502,
model: str | None = None,
):
super().__init__(message)
self.message = message
self.status_code = status_code
self.model = model
class ModelRateLimitError(ModelProviderError):
"""Exception raised when a model provider returns a rate limit error."""
def __init__(
self,
message: str,
status_code: int = 429,
model: str | None = None,
):
super().__init__(message, status_code, model)
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/llm/__init__.py | browser_use/llm/__init__.py | """
We have switched all of our code from langchain to openai.types.chat.chat_completion_message_param.
For easier transition we have
"""
from typing import TYPE_CHECKING
# Lightweight imports that are commonly used
from browser_use.llm.base import BaseChatModel
from browser_use.llm.messages import (
AssistantMessage,
BaseMessage,
SystemMessage,
UserMessage,
)
from browser_use.llm.messages import (
ContentPartImageParam as ContentImage,
)
from browser_use.llm.messages import (
ContentPartRefusalParam as ContentRefusal,
)
from browser_use.llm.messages import (
ContentPartTextParam as ContentText,
)
# Type stubs for lazy imports
if TYPE_CHECKING:
from browser_use.llm.anthropic.chat import ChatAnthropic
from browser_use.llm.aws.chat_anthropic import ChatAnthropicBedrock
from browser_use.llm.aws.chat_bedrock import ChatAWSBedrock
from browser_use.llm.azure.chat import ChatAzureOpenAI
from browser_use.llm.browser_use.chat import ChatBrowserUse
from browser_use.llm.cerebras.chat import ChatCerebras
from browser_use.llm.deepseek.chat import ChatDeepSeek
from browser_use.llm.google.chat import ChatGoogle
from browser_use.llm.groq.chat import ChatGroq
from browser_use.llm.mistral.chat import ChatMistral
from browser_use.llm.oci_raw.chat import ChatOCIRaw
from browser_use.llm.ollama.chat import ChatOllama
from browser_use.llm.openai.chat import ChatOpenAI
from browser_use.llm.openrouter.chat import ChatOpenRouter
from browser_use.llm.vercel.chat import ChatVercel
# Type stubs for model instances - enables IDE autocomplete
openai_gpt_4o: ChatOpenAI
openai_gpt_4o_mini: ChatOpenAI
openai_gpt_4_1_mini: ChatOpenAI
openai_o1: ChatOpenAI
openai_o1_mini: ChatOpenAI
openai_o1_pro: ChatOpenAI
openai_o3: ChatOpenAI
openai_o3_mini: ChatOpenAI
openai_o3_pro: ChatOpenAI
openai_o4_mini: ChatOpenAI
openai_gpt_5: ChatOpenAI
openai_gpt_5_mini: ChatOpenAI
openai_gpt_5_nano: ChatOpenAI
azure_gpt_4o: ChatAzureOpenAI
azure_gpt_4o_mini: ChatAzureOpenAI
azure_gpt_4_1_mini: ChatAzureOpenAI
azure_o1: ChatAzureOpenAI
azure_o1_mini: ChatAzureOpenAI
azure_o1_pro: ChatAzureOpenAI
azure_o3: ChatAzureOpenAI
azure_o3_mini: ChatAzureOpenAI
azure_o3_pro: ChatAzureOpenAI
azure_gpt_5: ChatAzureOpenAI
azure_gpt_5_mini: ChatAzureOpenAI
google_gemini_2_0_flash: ChatGoogle
google_gemini_2_0_pro: ChatGoogle
google_gemini_2_5_pro: ChatGoogle
google_gemini_2_5_flash: ChatGoogle
google_gemini_2_5_flash_lite: ChatGoogle
# Models are imported on-demand via __getattr__
# Lazy imports mapping for heavy chat models
_LAZY_IMPORTS = {
'ChatAnthropic': ('browser_use.llm.anthropic.chat', 'ChatAnthropic'),
'ChatAnthropicBedrock': ('browser_use.llm.aws.chat_anthropic', 'ChatAnthropicBedrock'),
'ChatAWSBedrock': ('browser_use.llm.aws.chat_bedrock', 'ChatAWSBedrock'),
'ChatAzureOpenAI': ('browser_use.llm.azure.chat', 'ChatAzureOpenAI'),
'ChatBrowserUse': ('browser_use.llm.browser_use.chat', 'ChatBrowserUse'),
'ChatCerebras': ('browser_use.llm.cerebras.chat', 'ChatCerebras'),
'ChatDeepSeek': ('browser_use.llm.deepseek.chat', 'ChatDeepSeek'),
'ChatGoogle': ('browser_use.llm.google.chat', 'ChatGoogle'),
'ChatGroq': ('browser_use.llm.groq.chat', 'ChatGroq'),
'ChatMistral': ('browser_use.llm.mistral.chat', 'ChatMistral'),
'ChatOCIRaw': ('browser_use.llm.oci_raw.chat', 'ChatOCIRaw'),
'ChatOllama': ('browser_use.llm.ollama.chat', 'ChatOllama'),
'ChatOpenAI': ('browser_use.llm.openai.chat', 'ChatOpenAI'),
'ChatOpenRouter': ('browser_use.llm.openrouter.chat', 'ChatOpenRouter'),
'ChatVercel': ('browser_use.llm.vercel.chat', 'ChatVercel'),
}
# Cache for model instances - only created when accessed
_model_cache: dict[str, 'BaseChatModel'] = {}
def __getattr__(name: str):
"""Lazy import mechanism for heavy chat model imports and model instances."""
if name in _LAZY_IMPORTS:
module_path, attr_name = _LAZY_IMPORTS[name]
try:
from importlib import import_module
module = import_module(module_path)
attr = getattr(module, attr_name)
return attr
except ImportError as e:
raise ImportError(f'Failed to import {name} from {module_path}: {e}') from e
# Check cache first for model instances
if name in _model_cache:
return _model_cache[name]
# Try to get model instances from models module on-demand
try:
from browser_use.llm.models import __getattr__ as models_getattr
attr = models_getattr(name)
# Cache in our clean cache dict
_model_cache[name] = attr
return attr
except (AttributeError, ImportError):
pass
raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
__all__ = [
# Message types -> for easier transition from langchain
'BaseMessage',
'UserMessage',
'SystemMessage',
'AssistantMessage',
# Content parts with better names
'ContentText',
'ContentRefusal',
'ContentImage',
# Chat models
'BaseChatModel',
'ChatOpenAI',
'ChatBrowserUse',
'ChatDeepSeek',
'ChatGoogle',
'ChatAnthropic',
'ChatAnthropicBedrock',
'ChatAWSBedrock',
'ChatGroq',
'ChatMistral',
'ChatAzureOpenAI',
'ChatOCIRaw',
'ChatOllama',
'ChatOpenRouter',
'ChatVercel',
'ChatCerebras',
]
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/llm/messages.py | browser_use/llm/messages.py | """
This implementation is based on the OpenAI types, while removing all the parts that are not needed for Browser Use.
"""
# region - Content parts
from typing import Literal, Union
from pydantic import BaseModel
def _truncate(text: str, max_length: int = 50) -> str:
"""Truncate text to max_length characters, adding ellipsis if truncated."""
if len(text) <= max_length:
return text
return text[: max_length - 3] + '...'
def _format_image_url(url: str, max_length: int = 50) -> str:
"""Format image URL for display, truncating if necessary."""
if url.startswith('data:'):
# Base64 image
media_type = url.split(';')[0].split(':')[1] if ';' in url else 'image'
return f'<base64 {media_type}>'
else:
# Regular URL
return _truncate(url, max_length)
class ContentPartTextParam(BaseModel):
text: str
type: Literal['text'] = 'text'
def __str__(self) -> str:
return f'Text: {_truncate(self.text)}'
def __repr__(self) -> str:
return f'ContentPartTextParam(text={_truncate(self.text)})'
class ContentPartRefusalParam(BaseModel):
refusal: str
type: Literal['refusal'] = 'refusal'
def __str__(self) -> str:
return f'Refusal: {_truncate(self.refusal)}'
def __repr__(self) -> str:
return f'ContentPartRefusalParam(refusal={_truncate(repr(self.refusal), 50)})'
SupportedImageMediaType = Literal['image/jpeg', 'image/png', 'image/gif', 'image/webp']
class ImageURL(BaseModel):
url: str
"""Either a URL of the image or the base64 encoded image data."""
detail: Literal['auto', 'low', 'high'] = 'auto'
"""Specifies the detail level of the image.
Learn more in the
[Vision guide](https://platform.openai.com/docs/guides/vision#low-or-high-fidelity-image-understanding).
"""
# needed for Anthropic
media_type: SupportedImageMediaType = 'image/png'
def __str__(self) -> str:
url_display = _format_image_url(self.url)
return f'๐ผ๏ธ Image[{self.media_type}, detail={self.detail}]: {url_display}'
def __repr__(self) -> str:
url_repr = _format_image_url(self.url, 30)
return f'ImageURL(url={repr(url_repr)}, detail={repr(self.detail)}, media_type={repr(self.media_type)})'
class ContentPartImageParam(BaseModel):
image_url: ImageURL
type: Literal['image_url'] = 'image_url'
def __str__(self) -> str:
return str(self.image_url)
def __repr__(self) -> str:
return f'ContentPartImageParam(image_url={repr(self.image_url)})'
class Function(BaseModel):
arguments: str
"""
The arguments to call the function with, as generated by the model in JSON
format. Note that the model does not always generate valid JSON, and may
hallucinate parameters not defined by your function schema. Validate the
arguments in your code before calling your function.
"""
name: str
"""The name of the function to call."""
def __str__(self) -> str:
args_preview = _truncate(self.arguments, 80)
return f'{self.name}({args_preview})'
def __repr__(self) -> str:
args_repr = _truncate(repr(self.arguments), 50)
return f'Function(name={repr(self.name)}, arguments={args_repr})'
class ToolCall(BaseModel):
id: str
"""The ID of the tool call."""
function: Function
"""The function that the model called."""
type: Literal['function'] = 'function'
"""The type of the tool. Currently, only `function` is supported."""
def __str__(self) -> str:
return f'ToolCall[{self.id}]: {self.function}'
def __repr__(self) -> str:
return f'ToolCall(id={repr(self.id)}, function={repr(self.function)})'
# endregion
# region - Message types
class _MessageBase(BaseModel):
"""Base class for all message types"""
role: Literal['user', 'system', 'assistant']
cache: bool = False
"""Whether to cache this message. This is only applicable when using Anthropic models.
"""
class UserMessage(_MessageBase):
role: Literal['user'] = 'user'
"""The role of the messages author, in this case `user`."""
content: str | list[ContentPartTextParam | ContentPartImageParam]
"""The contents of the user message."""
name: str | None = None
"""An optional name for the participant.
Provides the model information to differentiate between participants of the same
role.
"""
@property
def text(self) -> str:
"""
Automatically parse the text inside content, whether it's a string or a list of content parts.
"""
if isinstance(self.content, str):
return self.content
elif isinstance(self.content, list):
return '\n'.join([part.text for part in self.content if part.type == 'text'])
else:
return ''
def __str__(self) -> str:
return f'UserMessage(content={self.text})'
def __repr__(self) -> str:
return f'UserMessage(content={repr(self.text)})'
class SystemMessage(_MessageBase):
role: Literal['system'] = 'system'
"""The role of the messages author, in this case `system`."""
content: str | list[ContentPartTextParam]
"""The contents of the system message."""
name: str | None = None
@property
def text(self) -> str:
"""
Automatically parse the text inside content, whether it's a string or a list of content parts.
"""
if isinstance(self.content, str):
return self.content
elif isinstance(self.content, list):
return '\n'.join([part.text for part in self.content if part.type == 'text'])
else:
return ''
def __str__(self) -> str:
return f'SystemMessage(content={self.text})'
def __repr__(self) -> str:
return f'SystemMessage(content={repr(self.text)})'
class AssistantMessage(_MessageBase):
role: Literal['assistant'] = 'assistant'
"""The role of the messages author, in this case `assistant`."""
content: str | list[ContentPartTextParam | ContentPartRefusalParam] | None
"""The contents of the assistant message."""
name: str | None = None
refusal: str | None = None
"""The refusal message by the assistant."""
tool_calls: list[ToolCall] = []
"""The tool calls generated by the model, such as function calls."""
@property
def text(self) -> str:
"""
Automatically parse the text inside content, whether it's a string or a list of content parts.
"""
if isinstance(self.content, str):
return self.content
elif isinstance(self.content, list):
text = ''
for part in self.content:
if part.type == 'text':
text += part.text
elif part.type == 'refusal':
text += f'[Refusal] {part.refusal}'
return text
else:
return ''
def __str__(self) -> str:
return f'AssistantMessage(content={self.text})'
def __repr__(self) -> str:
return f'AssistantMessage(content={repr(self.text)})'
BaseMessage = Union[UserMessage, SystemMessage, AssistantMessage]
# endregion
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/llm/base.py | browser_use/llm/base.py | """
We have switched all of our code from langchain to openai.types.chat.chat_completion_message_param.
For easier transition we have
"""
from typing import Any, Protocol, TypeVar, overload, runtime_checkable
from pydantic import BaseModel
from browser_use.llm.messages import BaseMessage
from browser_use.llm.views import ChatInvokeCompletion
T = TypeVar('T', bound=BaseModel)
@runtime_checkable
class BaseChatModel(Protocol):
_verified_api_keys: bool = False
model: str
@property
def provider(self) -> str: ...
@property
def name(self) -> str: ...
@property
def model_name(self) -> str:
# for legacy support
return self.model
@overload
async def ainvoke(
self, messages: list[BaseMessage], output_format: None = None, **kwargs: Any
) -> ChatInvokeCompletion[str]: ...
@overload
async def ainvoke(self, messages: list[BaseMessage], output_format: type[T], **kwargs: Any) -> ChatInvokeCompletion[T]: ...
async def ainvoke(
self, messages: list[BaseMessage], output_format: type[T] | None = None, **kwargs: Any
) -> ChatInvokeCompletion[T] | ChatInvokeCompletion[str]: ...
@classmethod
def __get_pydantic_core_schema__(
cls,
source_type: type,
handler: Any,
) -> Any:
"""
Allow this Protocol to be used in Pydantic models -> very useful to typesafe the agent settings for example.
Returns a schema that allows any object (since this is a Protocol).
"""
from pydantic_core import core_schema
# Return a schema that accepts any object for Protocol types
return core_schema.any_schema()
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/llm/oci_raw/serializer.py | browser_use/llm/oci_raw/serializer.py | """
Message serializer for OCI Raw API integration.
This module handles the conversion between browser-use message formats
and the OCI Raw API message format using proper OCI SDK models.
"""
from oci.generative_ai_inference.models import ImageContent, ImageUrl, Message, TextContent
from browser_use.llm.messages import (
AssistantMessage,
BaseMessage,
ContentPartImageParam,
SystemMessage,
UserMessage,
)
class OCIRawMessageSerializer:
"""
Serializer for converting between browser-use message types and OCI Raw API message formats.
Uses proper OCI SDK model objects as shown in the working example.
Supports both:
- GenericChatRequest (Meta, xAI models) - uses messages array
- CohereChatRequest (Cohere models) - uses single message string
"""
@staticmethod
def _is_base64_image(url: str) -> bool:
"""Check if the URL is a base64 encoded image."""
return url.startswith('data:image/')
@staticmethod
def _parse_base64_url(url: str) -> str:
"""Parse base64 URL and return the base64 data."""
if not OCIRawMessageSerializer._is_base64_image(url):
raise ValueError(f'Not a base64 image URL: {url}')
# Extract the base64 data from data:image/png;base64,<data>
try:
header, data = url.split(',', 1)
return data
except ValueError:
raise ValueError(f'Invalid base64 image URL format: {url}')
@staticmethod
def _create_image_content(part: ContentPartImageParam) -> ImageContent:
"""Convert ContentPartImageParam to OCI ImageContent."""
url = part.image_url.url
if OCIRawMessageSerializer._is_base64_image(url):
# Handle base64 encoded images - OCI expects data URLs as-is
image_url = ImageUrl(url=url)
else:
# Handle regular URLs
image_url = ImageUrl(url=url)
return ImageContent(image_url=image_url)
@staticmethod
def serialize_messages(messages: list[BaseMessage]) -> list[Message]:
"""
Serialize a list of browser-use messages to OCI Raw API Message objects.
Args:
messages: List of browser-use messages
Returns:
List of OCI Message objects
"""
oci_messages = []
for message in messages:
oci_message = Message()
if isinstance(message, UserMessage):
oci_message.role = 'USER'
content = message.content
if isinstance(content, str):
text_content = TextContent()
text_content.text = content
oci_message.content = [text_content]
elif isinstance(content, list):
# Handle content parts - text and images
contents = []
for part in content:
if part.type == 'text':
text_content = TextContent()
text_content.text = part.text
contents.append(text_content)
elif part.type == 'image_url':
image_content = OCIRawMessageSerializer._create_image_content(part)
contents.append(image_content)
if contents:
oci_message.content = contents
elif isinstance(message, SystemMessage):
oci_message.role = 'SYSTEM'
content = message.content
if isinstance(content, str):
text_content = TextContent()
text_content.text = content
oci_message.content = [text_content]
elif isinstance(content, list):
# Handle content parts - typically just text for system messages
contents = []
for part in content:
if part.type == 'text':
text_content = TextContent()
text_content.text = part.text
contents.append(text_content)
elif part.type == 'image_url':
# System messages can theoretically have images too
image_content = OCIRawMessageSerializer._create_image_content(part)
contents.append(image_content)
if contents:
oci_message.content = contents
elif isinstance(message, AssistantMessage):
oci_message.role = 'ASSISTANT'
content = message.content
if isinstance(content, str):
text_content = TextContent()
text_content.text = content
oci_message.content = [text_content]
elif isinstance(content, list):
# Handle content parts - text, images, and refusals
contents = []
for part in content:
if part.type == 'text':
text_content = TextContent()
text_content.text = part.text
contents.append(text_content)
elif part.type == 'image_url':
# Assistant messages can have images in responses
# Note: This is currently unreachable in browser-use but kept for completeness
image_content = OCIRawMessageSerializer._create_image_content(part)
contents.append(image_content)
elif part.type == 'refusal':
text_content = TextContent()
text_content.text = f'[Refusal] {part.refusal}'
contents.append(text_content)
if contents:
oci_message.content = contents
else:
# Fallback for any message format issues
oci_message.role = 'USER'
text_content = TextContent()
text_content.text = str(message)
oci_message.content = [text_content]
# Only append messages that have content
if hasattr(oci_message, 'content') and oci_message.content:
oci_messages.append(oci_message)
return oci_messages
@staticmethod
def serialize_messages_for_cohere(messages: list[BaseMessage]) -> str:
"""
Serialize messages for Cohere models which expect a single message string.
Cohere models use CohereChatRequest.message (string) instead of messages array.
We combine all messages into a single conversation string.
Args:
messages: List of browser-use messages
Returns:
Single string containing the conversation
"""
conversation_parts = []
for message in messages:
content = ''
if isinstance(message, UserMessage):
if isinstance(message.content, str):
content = message.content
elif isinstance(message.content, list):
# Extract text from content parts
text_parts = []
for part in message.content:
if part.type == 'text':
text_parts.append(part.text)
elif part.type == 'image_url':
# Cohere may not support images in all models, use a short placeholder
# to avoid massive token usage from base64 data URIs
if part.image_url.url.startswith('data:image/'):
text_parts.append('[Image: base64_data]')
else:
text_parts.append('[Image: external_url]')
content = ' '.join(text_parts)
conversation_parts.append(f'User: {content}')
elif isinstance(message, SystemMessage):
if isinstance(message.content, str):
content = message.content
elif isinstance(message.content, list):
# Extract text from content parts
text_parts = []
for part in message.content:
if part.type == 'text':
text_parts.append(part.text)
content = ' '.join(text_parts)
conversation_parts.append(f'System: {content}')
elif isinstance(message, AssistantMessage):
if isinstance(message.content, str):
content = message.content
elif isinstance(message.content, list):
# Extract text from content parts
text_parts = []
for part in message.content:
if part.type == 'text':
text_parts.append(part.text)
elif part.type == 'refusal':
text_parts.append(f'[Refusal] {part.refusal}')
content = ' '.join(text_parts)
conversation_parts.append(f'Assistant: {content}')
else:
# Fallback
conversation_parts.append(f'User: {str(message)}')
return '\n\n'.join(conversation_parts)
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/llm/oci_raw/chat.py | browser_use/llm/oci_raw/chat.py | """
OCI Raw API chat model integration for browser-use.
This module provides direct integration with Oracle Cloud Infrastructure's
Generative AI service using raw API calls without Langchain dependencies.
"""
import asyncio
import json
from dataclasses import dataclass
from typing import Any, TypeVar, overload
import oci
from oci.generative_ai_inference import GenerativeAiInferenceClient
from oci.generative_ai_inference.models import (
BaseChatRequest,
ChatDetails,
CohereChatRequest,
GenericChatRequest,
OnDemandServingMode,
)
from pydantic import BaseModel
from browser_use.llm.base import BaseChatModel
from browser_use.llm.exceptions import ModelProviderError, ModelRateLimitError
from browser_use.llm.messages import BaseMessage
from browser_use.llm.schema import SchemaOptimizer
from browser_use.llm.views import ChatInvokeCompletion, ChatInvokeUsage
from .serializer import OCIRawMessageSerializer
T = TypeVar('T', bound=BaseModel)
@dataclass
class ChatOCIRaw(BaseChatModel):
"""
A direct OCI Raw API integration for browser-use that bypasses Langchain.
This class provides a browser-use compatible interface for OCI GenAI models
using direct API calls to Oracle Cloud Infrastructure.
Args:
model_id: The OCI GenAI model OCID
service_endpoint: The OCI service endpoint URL
compartment_id: The OCI compartment OCID
provider: The model provider (e.g., "meta", "cohere", "xai")
temperature: Temperature for response generation (0.0-2.0) - supported by all providers
max_tokens: Maximum tokens in response - supported by all providers
frequency_penalty: Frequency penalty for response generation - supported by Meta and Cohere only
presence_penalty: Presence penalty for response generation - supported by Meta only
top_p: Top-p sampling parameter - supported by all providers
top_k: Top-k sampling parameter - supported by Cohere and xAI only
auth_type: Authentication type (e.g., "API_KEY")
auth_profile: Authentication profile name
timeout: Request timeout in seconds
"""
# Model configuration
model_id: str
service_endpoint: str
compartment_id: str
provider: str = 'meta'
# Model parameters
temperature: float | None = 1.0
max_tokens: int | None = 600
frequency_penalty: float | None = 0.0
presence_penalty: float | None = 0.0
top_p: float | None = 0.75
top_k: int | None = 0 # Used by Cohere models
# Authentication
auth_type: str = 'API_KEY'
auth_profile: str = 'DEFAULT'
# Client configuration
timeout: float = 60.0
# Static properties
@property
def provider_name(self) -> str:
return 'oci-raw'
@property
def name(self) -> str:
# Return a shorter name for telemetry (max 100 chars)
if len(self.model_id) > 90:
# Extract the model name from the OCID
parts = self.model_id.split('.')
if len(parts) >= 4:
return f'oci-{self.provider}-{parts[3]}' # e.g., "oci-meta-us-chicago-1"
else:
return f'oci-{self.provider}-model'
return self.model_id
@property
def model(self) -> str:
return self.model_id
@property
def model_name(self) -> str:
# Override for telemetry - return shorter name (max 100 chars)
if len(self.model_id) > 90:
# Extract the model name from the OCID
parts = self.model_id.split('.')
if len(parts) >= 4:
return f'oci-{self.provider}-{parts[3]}' # e.g., "oci-meta-us-chicago-1"
else:
return f'oci-{self.provider}-model'
return self.model_id
def _uses_cohere_format(self) -> bool:
"""Check if the provider uses Cohere chat request format."""
return self.provider.lower() == 'cohere'
def _get_supported_parameters(self) -> dict[str, bool]:
"""Get which parameters are supported by the current provider."""
provider = self.provider.lower()
if provider == 'meta':
return {
'temperature': True,
'max_tokens': True,
'frequency_penalty': True,
'presence_penalty': True,
'top_p': True,
'top_k': False,
}
elif provider == 'cohere':
return {
'temperature': True,
'max_tokens': True,
'frequency_penalty': True,
'presence_penalty': False,
'top_p': True,
'top_k': True,
}
elif provider == 'xai':
return {
'temperature': True,
'max_tokens': True,
'frequency_penalty': False,
'presence_penalty': False,
'top_p': True,
'top_k': True,
}
else:
# Default: assume all parameters are supported
return {
'temperature': True,
'max_tokens': True,
'frequency_penalty': True,
'presence_penalty': True,
'top_p': True,
'top_k': True,
}
def _get_oci_client(self) -> GenerativeAiInferenceClient:
"""Get the OCI GenerativeAiInferenceClient following your working example."""
if not hasattr(self, '_client'):
# Configure OCI client based on auth_type (following your working example)
if self.auth_type == 'API_KEY':
config = oci.config.from_file('~/.oci/config', self.auth_profile)
self._client = GenerativeAiInferenceClient(
config=config,
service_endpoint=self.service_endpoint,
retry_strategy=oci.retry.NoneRetryStrategy(),
timeout=(10, 240), # Following your working example
)
elif self.auth_type == 'INSTANCE_PRINCIPAL':
config = {}
signer = oci.auth.signers.InstancePrincipalsSecurityTokenSigner()
self._client = GenerativeAiInferenceClient(
config=config,
signer=signer,
service_endpoint=self.service_endpoint,
retry_strategy=oci.retry.NoneRetryStrategy(),
timeout=(10, 240),
)
elif self.auth_type == 'RESOURCE_PRINCIPAL':
config = {}
signer = oci.auth.signers.get_resource_principals_signer()
self._client = GenerativeAiInferenceClient(
config=config,
signer=signer,
service_endpoint=self.service_endpoint,
retry_strategy=oci.retry.NoneRetryStrategy(),
timeout=(10, 240),
)
else:
# Fallback to API_KEY
config = oci.config.from_file('~/.oci/config', self.auth_profile)
self._client = GenerativeAiInferenceClient(
config=config,
service_endpoint=self.service_endpoint,
retry_strategy=oci.retry.NoneRetryStrategy(),
timeout=(10, 240),
)
return self._client
def _extract_usage(self, response) -> ChatInvokeUsage | None:
"""Extract usage information from OCI response."""
try:
# The response is the direct OCI response object, not a dict
if hasattr(response, 'data') and hasattr(response.data, 'chat_response'):
chat_response = response.data.chat_response
if hasattr(chat_response, 'usage'):
usage = chat_response.usage
return ChatInvokeUsage(
prompt_tokens=getattr(usage, 'prompt_tokens', 0),
prompt_cached_tokens=None,
prompt_cache_creation_tokens=None,
prompt_image_tokens=None,
completion_tokens=getattr(usage, 'completion_tokens', 0),
total_tokens=getattr(usage, 'total_tokens', 0),
)
return None
except Exception:
return None
def _extract_content(self, response) -> str:
"""Extract text content from OCI response."""
try:
# The response is the direct OCI response object, not a dict
if not hasattr(response, 'data'):
raise ModelProviderError(message='Invalid response format: no data attribute', status_code=500, model=self.name)
chat_response = response.data.chat_response
# Handle different response types based on provider
if hasattr(chat_response, 'text'):
# Cohere response format - has direct text attribute
return chat_response.text or ''
elif hasattr(chat_response, 'choices') and chat_response.choices:
# Generic response format - has choices array (Meta, xAI)
choice = chat_response.choices[0]
message = choice.message
content_parts = message.content
# Extract text from content parts
text_parts = []
for part in content_parts:
if hasattr(part, 'text'):
text_parts.append(part.text)
return '\n'.join(text_parts) if text_parts else ''
else:
raise ModelProviderError(
message=f'Unsupported response format: {type(chat_response).__name__}', status_code=500, model=self.name
)
except Exception as e:
raise ModelProviderError(
message=f'Failed to extract content from response: {str(e)}', status_code=500, model=self.name
) from e
async def _make_request(self, messages: list[BaseMessage]):
"""Make async request to OCI API using proper OCI SDK models."""
# Create chat request based on provider type
if self._uses_cohere_format():
# Cohere models use CohereChatRequest with single message string
message_text = OCIRawMessageSerializer.serialize_messages_for_cohere(messages)
chat_request = CohereChatRequest()
chat_request.message = message_text
chat_request.max_tokens = self.max_tokens
chat_request.temperature = self.temperature
chat_request.frequency_penalty = self.frequency_penalty
chat_request.top_p = self.top_p
chat_request.top_k = self.top_k
else:
# Meta, xAI and other models use GenericChatRequest with messages array
oci_messages = OCIRawMessageSerializer.serialize_messages(messages)
chat_request = GenericChatRequest()
chat_request.api_format = BaseChatRequest.API_FORMAT_GENERIC
chat_request.messages = oci_messages
chat_request.max_tokens = self.max_tokens
chat_request.temperature = self.temperature
chat_request.top_p = self.top_p
# Provider-specific parameters
if self.provider.lower() == 'meta':
# Meta models support frequency_penalty and presence_penalty
chat_request.frequency_penalty = self.frequency_penalty
chat_request.presence_penalty = self.presence_penalty
elif self.provider.lower() == 'xai':
# xAI models support top_k but not frequency_penalty or presence_penalty
chat_request.top_k = self.top_k
else:
# Default: include all parameters for unknown providers
chat_request.frequency_penalty = self.frequency_penalty
chat_request.presence_penalty = self.presence_penalty
# Create serving mode
serving_mode = OnDemandServingMode(model_id=self.model_id)
# Create chat details
chat_details = ChatDetails()
chat_details.serving_mode = serving_mode
chat_details.chat_request = chat_request
chat_details.compartment_id = self.compartment_id
# Make the request in a thread to avoid blocking
def _sync_request():
try:
client = self._get_oci_client()
response = client.chat(chat_details)
return response # Return the raw response object
except Exception as e:
# Handle OCI-specific exceptions
status_code = getattr(e, 'status', 500)
if status_code == 429:
raise ModelRateLimitError(message=f'Rate limit exceeded: {str(e)}', model=self.name) from e
else:
raise ModelProviderError(message=str(e), status_code=status_code, model=self.name) from e
# Run in thread pool to make it async
loop = asyncio.get_event_loop()
return await loop.run_in_executor(None, _sync_request)
@overload
async def ainvoke(
self, messages: list[BaseMessage], output_format: None = None, **kwargs: Any
) -> ChatInvokeCompletion[str]: ...
@overload
async def ainvoke(self, messages: list[BaseMessage], output_format: type[T], **kwargs: Any) -> ChatInvokeCompletion[T]: ...
async def ainvoke(
self, messages: list[BaseMessage], output_format: type[T] | None = None, **kwargs: Any
) -> ChatInvokeCompletion[T] | ChatInvokeCompletion[str]:
"""
Invoke the OCI GenAI model with the given messages using raw API.
Args:
messages: List of chat messages
output_format: Optional Pydantic model class for structured output
Returns:
Either a string response or an instance of output_format
"""
try:
if output_format is None:
# Return string response
response = await self._make_request(messages)
content = self._extract_content(response)
usage = self._extract_usage(response)
return ChatInvokeCompletion(
completion=content,
usage=usage,
)
else:
# For structured output, add JSON schema instructions
optimized_schema = SchemaOptimizer.create_optimized_json_schema(output_format)
# Add JSON schema instruction to messages
system_instruction = f"""
You must respond with ONLY a valid JSON object that matches this exact schema:
{json.dumps(optimized_schema, indent=2)}
IMPORTANT:
- Your response must be ONLY the JSON object, no additional text
- The JSON must be valid and parseable
- All required fields must be present
- No extra fields are allowed
- Use proper JSON syntax with double quotes
"""
# Clone messages and add system instruction
modified_messages = messages.copy()
# Add or modify system message
from browser_use.llm.messages import SystemMessage
if modified_messages and hasattr(modified_messages[0], 'role') and modified_messages[0].role == 'system':
# Modify existing system message
existing_content = modified_messages[0].content
if isinstance(existing_content, str):
modified_messages[0].content = existing_content + '\n\n' + system_instruction
else:
# Handle list content
modified_messages[0].content = str(existing_content) + '\n\n' + system_instruction
else:
# Insert new system message at the beginning
modified_messages.insert(0, SystemMessage(content=system_instruction))
response = await self._make_request(modified_messages)
response_text = self._extract_content(response)
# Clean and parse the JSON response
try:
# Clean the response text
cleaned_text = response_text.strip()
# Remove markdown code blocks if present
if cleaned_text.startswith('```json'):
cleaned_text = cleaned_text[7:]
if cleaned_text.startswith('```'):
cleaned_text = cleaned_text[3:]
if cleaned_text.endswith('```'):
cleaned_text = cleaned_text[:-3]
cleaned_text = cleaned_text.strip()
# Try to find JSON object in the response
if not cleaned_text.startswith('{'):
start_idx = cleaned_text.find('{')
end_idx = cleaned_text.rfind('}')
if start_idx != -1 and end_idx != -1 and end_idx > start_idx:
cleaned_text = cleaned_text[start_idx : end_idx + 1]
# Parse the JSON
parsed_data = json.loads(cleaned_text)
parsed = output_format.model_validate(parsed_data)
usage = self._extract_usage(response)
return ChatInvokeCompletion(
completion=parsed,
usage=usage,
)
except (json.JSONDecodeError, ValueError) as e:
raise ModelProviderError(
message=f'Failed to parse structured output: {str(e)}. Response was: {response_text[:200]}...',
status_code=500,
model=self.name,
) from e
except ModelRateLimitError:
# Re-raise rate limit errors as-is
raise
except ModelProviderError:
# Re-raise provider errors as-is
raise
except Exception as e:
# Handle any other exceptions
raise ModelProviderError(
message=f'Unexpected error: {str(e)}',
status_code=500,
model=self.name,
) from e
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/llm/oci_raw/__init__.py | browser_use/llm/oci_raw/__init__.py | """
OCI Raw API integration for browser-use.
This module provides direct integration with Oracle Cloud Infrastructure's
Generative AI service using the raw API endpoints, without Langchain dependencies.
"""
from .chat import ChatOCIRaw
__all__ = ['ChatOCIRaw']
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/llm/openai/responses_serializer.py | browser_use/llm/openai/responses_serializer.py | """Serializer for converting messages to OpenAI Responses API input format."""
from typing import overload
from openai.types.responses.easy_input_message_param import EasyInputMessageParam
from openai.types.responses.response_input_image_param import ResponseInputImageParam
from openai.types.responses.response_input_message_content_list_param import (
ResponseInputMessageContentListParam,
)
from openai.types.responses.response_input_text_param import ResponseInputTextParam
from browser_use.llm.messages import (
AssistantMessage,
BaseMessage,
ContentPartImageParam,
ContentPartRefusalParam,
ContentPartTextParam,
SystemMessage,
UserMessage,
)
class ResponsesAPIMessageSerializer:
"""Serializer for converting between custom message types and OpenAI Responses API input format."""
@staticmethod
def _serialize_content_part_text(part: ContentPartTextParam) -> ResponseInputTextParam:
return ResponseInputTextParam(text=part.text, type='input_text')
@staticmethod
def _serialize_content_part_image(part: ContentPartImageParam) -> ResponseInputImageParam:
return ResponseInputImageParam(
image_url=part.image_url.url,
detail=part.image_url.detail,
type='input_image',
)
@staticmethod
def _serialize_user_content(
content: str | list[ContentPartTextParam | ContentPartImageParam],
) -> str | ResponseInputMessageContentListParam:
"""Serialize content for user messages (text and images allowed)."""
if isinstance(content, str):
return content
serialized_parts: ResponseInputMessageContentListParam = []
for part in content:
if part.type == 'text':
serialized_parts.append(ResponsesAPIMessageSerializer._serialize_content_part_text(part))
elif part.type == 'image_url':
serialized_parts.append(ResponsesAPIMessageSerializer._serialize_content_part_image(part))
return serialized_parts
@staticmethod
def _serialize_system_content(
content: str | list[ContentPartTextParam],
) -> str | ResponseInputMessageContentListParam:
"""Serialize content for system messages (text only)."""
if isinstance(content, str):
return content
serialized_parts: ResponseInputMessageContentListParam = []
for part in content:
if part.type == 'text':
serialized_parts.append(ResponsesAPIMessageSerializer._serialize_content_part_text(part))
return serialized_parts
@staticmethod
def _serialize_assistant_content(
content: str | list[ContentPartTextParam | ContentPartRefusalParam] | None,
) -> str | ResponseInputMessageContentListParam | None:
"""Serialize content for assistant messages (text only for Responses API)."""
if content is None:
return None
if isinstance(content, str):
return content
serialized_parts: ResponseInputMessageContentListParam = []
for part in content:
if part.type == 'text':
serialized_parts.append(ResponsesAPIMessageSerializer._serialize_content_part_text(part))
# Refusals are converted to text for the Responses API
elif part.type == 'refusal':
serialized_parts.append(ResponseInputTextParam(text=f'[Refusal: {part.refusal}]', type='input_text'))
return serialized_parts
@overload
@staticmethod
def serialize(message: UserMessage) -> EasyInputMessageParam: ...
@overload
@staticmethod
def serialize(message: SystemMessage) -> EasyInputMessageParam: ...
@overload
@staticmethod
def serialize(message: AssistantMessage) -> EasyInputMessageParam: ...
@staticmethod
def serialize(message: BaseMessage) -> EasyInputMessageParam:
"""Serialize a custom message to an OpenAI Responses API input message param."""
if isinstance(message, UserMessage):
return EasyInputMessageParam(
role='user',
content=ResponsesAPIMessageSerializer._serialize_user_content(message.content),
)
elif isinstance(message, SystemMessage):
# Note: Responses API uses 'developer' role for system messages in some contexts,
# but 'system' is also supported via EasyInputMessageParam
return EasyInputMessageParam(
role='system',
content=ResponsesAPIMessageSerializer._serialize_system_content(message.content),
)
elif isinstance(message, AssistantMessage):
content = ResponsesAPIMessageSerializer._serialize_assistant_content(message.content)
# For assistant messages, we need to provide content
# If content is None but there are tool calls, we represent them as text
if content is None:
if message.tool_calls:
# Convert tool calls to a text representation for context
tool_call_text = '\n'.join(
f'[Tool call: {tc.function.name}({tc.function.arguments})]' for tc in message.tool_calls
)
content = tool_call_text
else:
content = ''
return EasyInputMessageParam(
role='assistant',
content=content,
)
else:
raise ValueError(f'Unknown message type: {type(message)}')
@staticmethod
def serialize_messages(messages: list[BaseMessage]) -> list[EasyInputMessageParam]:
"""Serialize a list of messages to Responses API input format."""
return [ResponsesAPIMessageSerializer.serialize(m) for m in messages]
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/llm/openai/serializer.py | browser_use/llm/openai/serializer.py | from typing import overload
from openai.types.chat import (
ChatCompletionAssistantMessageParam,
ChatCompletionContentPartImageParam,
ChatCompletionContentPartRefusalParam,
ChatCompletionContentPartTextParam,
ChatCompletionMessageFunctionToolCallParam,
ChatCompletionMessageParam,
ChatCompletionSystemMessageParam,
ChatCompletionUserMessageParam,
)
from openai.types.chat.chat_completion_content_part_image_param import ImageURL
from openai.types.chat.chat_completion_message_function_tool_call_param import Function
from browser_use.llm.messages import (
AssistantMessage,
BaseMessage,
ContentPartImageParam,
ContentPartRefusalParam,
ContentPartTextParam,
SystemMessage,
ToolCall,
UserMessage,
)
class OpenAIMessageSerializer:
"""Serializer for converting between custom message types and OpenAI message param types."""
@staticmethod
def _serialize_content_part_text(part: ContentPartTextParam) -> ChatCompletionContentPartTextParam:
return ChatCompletionContentPartTextParam(text=part.text, type='text')
@staticmethod
def _serialize_content_part_image(part: ContentPartImageParam) -> ChatCompletionContentPartImageParam:
return ChatCompletionContentPartImageParam(
image_url=ImageURL(url=part.image_url.url, detail=part.image_url.detail),
type='image_url',
)
@staticmethod
def _serialize_content_part_refusal(part: ContentPartRefusalParam) -> ChatCompletionContentPartRefusalParam:
return ChatCompletionContentPartRefusalParam(refusal=part.refusal, type='refusal')
@staticmethod
def _serialize_user_content(
content: str | list[ContentPartTextParam | ContentPartImageParam],
) -> str | list[ChatCompletionContentPartTextParam | ChatCompletionContentPartImageParam]:
"""Serialize content for user messages (text and images allowed)."""
if isinstance(content, str):
return content
serialized_parts: list[ChatCompletionContentPartTextParam | ChatCompletionContentPartImageParam] = []
for part in content:
if part.type == 'text':
serialized_parts.append(OpenAIMessageSerializer._serialize_content_part_text(part))
elif part.type == 'image_url':
serialized_parts.append(OpenAIMessageSerializer._serialize_content_part_image(part))
return serialized_parts
@staticmethod
def _serialize_system_content(
content: str | list[ContentPartTextParam],
) -> str | list[ChatCompletionContentPartTextParam]:
"""Serialize content for system messages (text only)."""
if isinstance(content, str):
return content
serialized_parts: list[ChatCompletionContentPartTextParam] = []
for part in content:
if part.type == 'text':
serialized_parts.append(OpenAIMessageSerializer._serialize_content_part_text(part))
return serialized_parts
@staticmethod
def _serialize_assistant_content(
content: str | list[ContentPartTextParam | ContentPartRefusalParam] | None,
) -> str | list[ChatCompletionContentPartTextParam | ChatCompletionContentPartRefusalParam] | None:
"""Serialize content for assistant messages (text and refusal allowed)."""
if content is None:
return None
if isinstance(content, str):
return content
serialized_parts: list[ChatCompletionContentPartTextParam | ChatCompletionContentPartRefusalParam] = []
for part in content:
if part.type == 'text':
serialized_parts.append(OpenAIMessageSerializer._serialize_content_part_text(part))
elif part.type == 'refusal':
serialized_parts.append(OpenAIMessageSerializer._serialize_content_part_refusal(part))
return serialized_parts
@staticmethod
def _serialize_tool_call(tool_call: ToolCall) -> ChatCompletionMessageFunctionToolCallParam:
return ChatCompletionMessageFunctionToolCallParam(
id=tool_call.id,
function=Function(name=tool_call.function.name, arguments=tool_call.function.arguments),
type='function',
)
# endregion
# region - Serialize overloads
@overload
@staticmethod
def serialize(message: UserMessage) -> ChatCompletionUserMessageParam: ...
@overload
@staticmethod
def serialize(message: SystemMessage) -> ChatCompletionSystemMessageParam: ...
@overload
@staticmethod
def serialize(message: AssistantMessage) -> ChatCompletionAssistantMessageParam: ...
@staticmethod
def serialize(message: BaseMessage) -> ChatCompletionMessageParam:
"""Serialize a custom message to an OpenAI message param."""
if isinstance(message, UserMessage):
user_result: ChatCompletionUserMessageParam = {
'role': 'user',
'content': OpenAIMessageSerializer._serialize_user_content(message.content),
}
if message.name is not None:
user_result['name'] = message.name
return user_result
elif isinstance(message, SystemMessage):
system_result: ChatCompletionSystemMessageParam = {
'role': 'system',
'content': OpenAIMessageSerializer._serialize_system_content(message.content),
}
if message.name is not None:
system_result['name'] = message.name
return system_result
elif isinstance(message, AssistantMessage):
# Handle content serialization
content = None
if message.content is not None:
content = OpenAIMessageSerializer._serialize_assistant_content(message.content)
assistant_result: ChatCompletionAssistantMessageParam = {'role': 'assistant'}
# Only add content if it's not None
if content is not None:
assistant_result['content'] = content
if message.name is not None:
assistant_result['name'] = message.name
if message.refusal is not None:
assistant_result['refusal'] = message.refusal
if message.tool_calls:
assistant_result['tool_calls'] = [OpenAIMessageSerializer._serialize_tool_call(tc) for tc in message.tool_calls]
return assistant_result
else:
raise ValueError(f'Unknown message type: {type(message)}')
@staticmethod
def serialize_messages(messages: list[BaseMessage]) -> list[ChatCompletionMessageParam]:
return [OpenAIMessageSerializer.serialize(m) for m in messages]
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/llm/openai/chat.py | browser_use/llm/openai/chat.py | from collections.abc import Iterable, Mapping
from dataclasses import dataclass, field
from typing import Any, Literal, TypeVar, overload
import httpx
from openai import APIConnectionError, APIStatusError, AsyncOpenAI, RateLimitError
from openai.types.chat import ChatCompletionContentPartTextParam
from openai.types.chat.chat_completion import ChatCompletion
from openai.types.shared.chat_model import ChatModel
from openai.types.shared_params.reasoning_effort import ReasoningEffort
from openai.types.shared_params.response_format_json_schema import JSONSchema, ResponseFormatJSONSchema
from pydantic import BaseModel
from browser_use.llm.base import BaseChatModel
from browser_use.llm.exceptions import ModelProviderError, ModelRateLimitError
from browser_use.llm.messages import BaseMessage
from browser_use.llm.openai.serializer import OpenAIMessageSerializer
from browser_use.llm.schema import SchemaOptimizer
from browser_use.llm.views import ChatInvokeCompletion, ChatInvokeUsage
T = TypeVar('T', bound=BaseModel)
@dataclass
class ChatOpenAI(BaseChatModel):
"""
A wrapper around AsyncOpenAI that implements the BaseLLM protocol.
This class accepts all AsyncOpenAI parameters while adding model
and temperature parameters for the LLM interface (if temperature it not `None`).
"""
# Model configuration
model: ChatModel | str
# Model params
temperature: float | None = 0.2
frequency_penalty: float | None = 0.3 # this avoids infinite generation of \t for models like 4.1-mini
reasoning_effort: ReasoningEffort = 'low'
seed: int | None = None
service_tier: Literal['auto', 'default', 'flex', 'priority', 'scale'] | None = None
top_p: float | None = None
add_schema_to_system_prompt: bool = False # Add JSON schema to system prompt instead of using response_format
dont_force_structured_output: bool = False # If True, the model will not be forced to output a structured output
remove_min_items_from_schema: bool = (
False # If True, remove minItems from JSON schema (for compatibility with some providers)
)
remove_defaults_from_schema: bool = (
False # If True, remove default values from JSON schema (for compatibility with some providers)
)
# Client initialization parameters
api_key: str | None = None
organization: str | None = None
project: str | None = None
base_url: str | httpx.URL | None = None
websocket_base_url: str | httpx.URL | None = None
timeout: float | httpx.Timeout | None = None
max_retries: int = 5 # Increase default retries for automation reliability
default_headers: Mapping[str, str] | None = None
default_query: Mapping[str, object] | None = None
http_client: httpx.AsyncClient | None = None
_strict_response_validation: bool = False
max_completion_tokens: int | None = 4096
reasoning_models: list[ChatModel | str] | None = field(
default_factory=lambda: [
'o4-mini',
'o3',
'o3-mini',
'o1',
'o1-pro',
'o3-pro',
'gpt-5',
'gpt-5-mini',
'gpt-5-nano',
]
)
# Static
@property
def provider(self) -> str:
return 'openai'
def _get_client_params(self) -> dict[str, Any]:
"""Prepare client parameters dictionary."""
# Define base client params
base_params = {
'api_key': self.api_key,
'organization': self.organization,
'project': self.project,
'base_url': self.base_url,
'websocket_base_url': self.websocket_base_url,
'timeout': self.timeout,
'max_retries': self.max_retries,
'default_headers': self.default_headers,
'default_query': self.default_query,
'_strict_response_validation': self._strict_response_validation,
}
# Create client_params dict with non-None values
client_params = {k: v for k, v in base_params.items() if v is not None}
# Add http_client if provided
if self.http_client is not None:
client_params['http_client'] = self.http_client
return client_params
def get_client(self) -> AsyncOpenAI:
"""
Returns an AsyncOpenAI client.
Returns:
AsyncOpenAI: An instance of the AsyncOpenAI client.
"""
client_params = self._get_client_params()
return AsyncOpenAI(**client_params)
@property
def name(self) -> str:
return str(self.model)
def _get_usage(self, response: ChatCompletion) -> ChatInvokeUsage | None:
if response.usage is not None:
completion_tokens = response.usage.completion_tokens
completion_token_details = response.usage.completion_tokens_details
if completion_token_details is not None:
reasoning_tokens = completion_token_details.reasoning_tokens
if reasoning_tokens is not None:
completion_tokens += reasoning_tokens
usage = ChatInvokeUsage(
prompt_tokens=response.usage.prompt_tokens,
prompt_cached_tokens=response.usage.prompt_tokens_details.cached_tokens
if response.usage.prompt_tokens_details is not None
else None,
prompt_cache_creation_tokens=None,
prompt_image_tokens=None,
# Completion
completion_tokens=completion_tokens,
total_tokens=response.usage.total_tokens,
)
else:
usage = None
return usage
@overload
async def ainvoke(
self, messages: list[BaseMessage], output_format: None = None, **kwargs: Any
) -> ChatInvokeCompletion[str]: ...
@overload
async def ainvoke(self, messages: list[BaseMessage], output_format: type[T], **kwargs: Any) -> ChatInvokeCompletion[T]: ...
async def ainvoke(
self, messages: list[BaseMessage], output_format: type[T] | None = None, **kwargs: Any
) -> ChatInvokeCompletion[T] | ChatInvokeCompletion[str]:
"""
Invoke the model with the given messages.
Args:
messages: List of chat messages
output_format: Optional Pydantic model class for structured output
Returns:
Either a string response or an instance of output_format
"""
openai_messages = OpenAIMessageSerializer.serialize_messages(messages)
try:
model_params: dict[str, Any] = {}
if self.temperature is not None:
model_params['temperature'] = self.temperature
if self.frequency_penalty is not None:
model_params['frequency_penalty'] = self.frequency_penalty
if self.max_completion_tokens is not None:
model_params['max_completion_tokens'] = self.max_completion_tokens
if self.top_p is not None:
model_params['top_p'] = self.top_p
if self.seed is not None:
model_params['seed'] = self.seed
if self.service_tier is not None:
model_params['service_tier'] = self.service_tier
if self.reasoning_models and any(str(m).lower() in str(self.model).lower() for m in self.reasoning_models):
model_params['reasoning_effort'] = self.reasoning_effort
model_params.pop('temperature', None)
model_params.pop('frequency_penalty', None)
if output_format is None:
# Return string response
response = await self.get_client().chat.completions.create(
model=self.model,
messages=openai_messages,
**model_params,
)
usage = self._get_usage(response)
return ChatInvokeCompletion(
completion=response.choices[0].message.content or '',
usage=usage,
stop_reason=response.choices[0].finish_reason if response.choices else None,
)
else:
response_format: JSONSchema = {
'name': 'agent_output',
'strict': True,
'schema': SchemaOptimizer.create_optimized_json_schema(
output_format,
remove_min_items=self.remove_min_items_from_schema,
remove_defaults=self.remove_defaults_from_schema,
),
}
# Add JSON schema to system prompt if requested
if self.add_schema_to_system_prompt and openai_messages and openai_messages[0]['role'] == 'system':
schema_text = f'\n<json_schema>\n{response_format}\n</json_schema>'
if isinstance(openai_messages[0]['content'], str):
openai_messages[0]['content'] += schema_text
elif isinstance(openai_messages[0]['content'], Iterable):
openai_messages[0]['content'] = list(openai_messages[0]['content']) + [
ChatCompletionContentPartTextParam(text=schema_text, type='text')
]
if self.dont_force_structured_output:
response = await self.get_client().chat.completions.create(
model=self.model,
messages=openai_messages,
**model_params,
)
else:
# Return structured response
response = await self.get_client().chat.completions.create(
model=self.model,
messages=openai_messages,
response_format=ResponseFormatJSONSchema(json_schema=response_format, type='json_schema'),
**model_params,
)
if response.choices[0].message.content is None:
raise ModelProviderError(
message='Failed to parse structured output from model response',
status_code=500,
model=self.name,
)
usage = self._get_usage(response)
parsed = output_format.model_validate_json(response.choices[0].message.content)
return ChatInvokeCompletion(
completion=parsed,
usage=usage,
stop_reason=response.choices[0].finish_reason if response.choices else None,
)
except RateLimitError as e:
raise ModelRateLimitError(message=e.message, model=self.name) from e
except APIConnectionError as e:
raise ModelProviderError(message=str(e), model=self.name) from e
except APIStatusError as e:
raise ModelProviderError(message=e.message, status_code=e.status_code, model=self.name) from e
except Exception as e:
raise ModelProviderError(message=str(e), model=self.name) from e
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/llm/openai/like.py | browser_use/llm/openai/like.py | from dataclasses import dataclass
from browser_use.llm.openai.chat import ChatOpenAI
@dataclass
class ChatOpenAILike(ChatOpenAI):
"""
A class for to interact with any provider using the OpenAI API schema.
Args:
model (str): The name of the OpenAI model to use.
"""
model: str
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/llm/browser_use/chat.py | browser_use/llm/browser_use/chat.py | """
ChatBrowserUse - Client for browser-use cloud API
This wraps the BaseChatModel protocol and sends requests to the browser-use cloud API
for optimized browser automation LLM inference.
"""
import asyncio
import logging
import os
import random
from typing import Any, TypeVar, overload
import httpx
from pydantic import BaseModel
from browser_use.llm.base import BaseChatModel
from browser_use.llm.exceptions import ModelProviderError, ModelRateLimitError
from browser_use.llm.messages import BaseMessage
from browser_use.llm.views import ChatInvokeCompletion
from browser_use.observability import observe
T = TypeVar('T', bound=BaseModel)
logger = logging.getLogger(__name__)
# HTTP status codes that should trigger a retry
RETRYABLE_STATUS_CODES = {429, 500, 502, 503, 504}
class ChatBrowserUse(BaseChatModel):
"""
Client for browser-use cloud API.
This sends requests to the browser-use cloud API which uses optimized models
and prompts for browser automation tasks.
Usage:
agent = Agent(
task="Find the number of stars of the browser-use repo",
llm=ChatBrowserUse(model='bu-latest'),
)
"""
def __init__(
self,
model: str = 'bu-latest',
api_key: str | None = None,
base_url: str | None = None,
timeout: float = 120.0,
max_retries: int = 5,
retry_base_delay: float = 1.0,
retry_max_delay: float = 60.0,
**kwargs,
):
"""
Initialize ChatBrowserUse client.
Args:
model: Model name to use. Options:
- 'bu-latest' or 'bu-1-0': Default model
- 'browser-use/bu-30b-a3b-preview': Browser Use Open Source Model
api_key: API key for browser-use cloud. Defaults to BROWSER_USE_API_KEY env var.
base_url: Base URL for the API. Defaults to BROWSER_USE_LLM_URL env var or production URL.
timeout: Request timeout in seconds.
max_retries: Maximum number of retries for transient errors (default: 5).
retry_base_delay: Base delay in seconds for exponential backoff (default: 1.0).
retry_max_delay: Maximum delay in seconds between retries (default: 60.0).
"""
# Validate model name - allow bu-* and browser-use/* patterns
valid_models = ['bu-latest', 'bu-1-0']
is_valid = model in valid_models or model.startswith('browser-use/')
if not is_valid:
raise ValueError(f"Invalid model: '{model}'. Must be one of {valid_models} or start with 'browser-use/'")
# Normalize bu-latest to bu-1-0 for default models
if model == 'bu-latest':
self.model = 'bu-1-0'
else:
self.model = model
self.fast = False
self.api_key = api_key or os.getenv('BROWSER_USE_API_KEY')
self.base_url = base_url or os.getenv('BROWSER_USE_LLM_URL', 'https://llm.api.browser-use.com')
self.timeout = timeout
self.max_retries = max_retries
self.retry_base_delay = retry_base_delay
self.retry_max_delay = retry_max_delay
if not self.api_key:
raise ValueError(
'You need to set the BROWSER_USE_API_KEY environment variable. '
'Get your key at https://cloud.browser-use.com/new-api-key'
)
@property
def provider(self) -> str:
return 'browser-use'
@property
def name(self) -> str:
return self.model
@overload
async def ainvoke(
self, messages: list[BaseMessage], output_format: None = None, request_type: str = 'browser_agent', **kwargs: Any
) -> ChatInvokeCompletion[str]: ...
@overload
async def ainvoke(
self, messages: list[BaseMessage], output_format: type[T], request_type: str = 'browser_agent', **kwargs: Any
) -> ChatInvokeCompletion[T]: ...
@observe(name='chat_browser_use_ainvoke')
async def ainvoke(
self,
messages: list[BaseMessage],
output_format: type[T] | None = None,
request_type: str = 'browser_agent',
**kwargs: Any,
) -> ChatInvokeCompletion[T] | ChatInvokeCompletion[str]:
"""
Send request to browser-use cloud API.
Args:
messages: List of messages to send
output_format: Expected output format (Pydantic model)
request_type: Type of request - 'browser_agent' or 'judge'
**kwargs: Additional arguments, including:
- session_id: Session ID for sticky routing (same session โ same container)
Returns:
ChatInvokeCompletion with structured response and usage info
"""
# Get ANONYMIZED_TELEMETRY setting from config
from browser_use.config import CONFIG
anonymized_telemetry = CONFIG.ANONYMIZED_TELEMETRY
# Extract session_id from kwargs for sticky routing
session_id = kwargs.get('session_id')
# Prepare request payload
payload: dict[str, Any] = {
'model': self.model,
'messages': [self._serialize_message(msg) for msg in messages],
'fast': self.fast,
'request_type': request_type,
'anonymized_telemetry': anonymized_telemetry,
}
# Add session_id for sticky routing if provided
if session_id:
payload['session_id'] = session_id
# Add output format schema if provided
if output_format is not None:
payload['output_format'] = output_format.model_json_schema()
last_error: Exception | None = None
# Retry loop with exponential backoff
for attempt in range(self.max_retries):
try:
result = await self._make_request(payload)
break
except httpx.HTTPStatusError as e:
last_error = e
status_code = e.response.status_code
# Check if this is a retryable error
if status_code in RETRYABLE_STATUS_CODES and attempt < self.max_retries - 1:
delay = min(self.retry_base_delay * (2**attempt), self.retry_max_delay)
jitter = random.uniform(0, delay * 0.1)
total_delay = delay + jitter
logger.warning(
f'โ ๏ธ Got {status_code} error, retrying in {total_delay:.1f}s... (attempt {attempt + 1}/{self.max_retries})'
)
await asyncio.sleep(total_delay)
continue
# Non-retryable HTTP error or exhausted retries
self._raise_http_error(e)
except (httpx.TimeoutException, httpx.ConnectError) as e:
last_error = e
# Network errors are retryable
if attempt < self.max_retries - 1:
delay = min(self.retry_base_delay * (2**attempt), self.retry_max_delay)
jitter = random.uniform(0, delay * 0.1)
total_delay = delay + jitter
error_type = 'timeout' if isinstance(e, httpx.TimeoutException) else 'connection error'
logger.warning(
f'โ ๏ธ Got {error_type}, retrying in {total_delay:.1f}s... (attempt {attempt + 1}/{self.max_retries})'
)
await asyncio.sleep(total_delay)
continue
# Exhausted retries
if isinstance(e, httpx.TimeoutException):
raise ValueError(f'Request timed out after {self.timeout}s (retried {self.max_retries} times)')
raise ValueError(f'Failed to connect to browser-use API after {self.max_retries} attempts: {e}')
except Exception as e:
raise ValueError(f'Failed to connect to browser-use API: {e}')
else:
# Loop completed without break (all retries exhausted)
if last_error is not None:
if isinstance(last_error, httpx.HTTPStatusError):
self._raise_http_error(last_error)
raise ValueError(f'Request failed after {self.max_retries} attempts: {last_error}')
raise RuntimeError('Retry loop completed without return or exception')
# Parse response - server returns structured data as dict
if output_format is not None:
# Server returns structured data as a dict, validate it
completion_data = result['completion']
logger.debug(
f'๐ฅ Got structured data from service: {list(completion_data.keys()) if isinstance(completion_data, dict) else type(completion_data)}'
)
# Convert action dicts to ActionModel instances if needed
# llm-use returns dicts to avoid validation with empty ActionModel
if isinstance(completion_data, dict) and 'action' in completion_data:
actions = completion_data['action']
if actions and isinstance(actions[0], dict):
from typing import get_args
# Get ActionModel type from output_format
action_model_type = get_args(output_format.model_fields['action'].annotation)[0]
# Convert dicts to ActionModel instances
completion_data['action'] = [action_model_type.model_validate(action_dict) for action_dict in actions]
completion = output_format.model_validate(completion_data)
else:
completion = result['completion']
# Parse usage info
usage = None
if 'usage' in result and result['usage'] is not None:
from browser_use.llm.views import ChatInvokeUsage
usage = ChatInvokeUsage(**result['usage'])
return ChatInvokeCompletion(
completion=completion,
usage=usage,
)
async def _make_request(self, payload: dict) -> dict:
"""Make a single API request."""
async with httpx.AsyncClient(timeout=self.timeout) as client:
response = await client.post(
f'{self.base_url}/v1/chat/completions',
json=payload,
headers={
'Authorization': f'Bearer {self.api_key}',
'Content-Type': 'application/json',
},
)
response.raise_for_status()
return response.json()
def _raise_http_error(self, e: httpx.HTTPStatusError) -> None:
"""Raise appropriate ModelProviderError for HTTP errors."""
error_detail = ''
try:
error_data = e.response.json()
error_detail = error_data.get('detail', str(e))
except Exception:
error_detail = str(e)
status_code = e.response.status_code
if status_code == 401:
raise ModelProviderError(message=f'Invalid API key. {error_detail}', status_code=401, model=self.name)
elif status_code == 402:
raise ModelProviderError(message=f'Insufficient credits. {error_detail}', status_code=402, model=self.name)
elif status_code == 429:
raise ModelRateLimitError(message=f'Rate limit exceeded. {error_detail}', status_code=429, model=self.name)
elif status_code in {500, 502, 503, 504}:
raise ModelProviderError(message=f'Server error. {error_detail}', status_code=status_code, model=self.name)
else:
raise ModelProviderError(message=f'API request failed: {error_detail}', status_code=status_code, model=self.name)
def _serialize_message(self, message: BaseMessage) -> dict:
"""Serialize a message to JSON format."""
# Handle Union types by checking the actual message type
msg_dict = message.model_dump()
return {
'role': msg_dict['role'],
'content': msg_dict['content'],
}
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/llm/browser_use/__init__.py | browser_use/llm/browser_use/__init__.py | from browser_use.llm.browser_use.chat import ChatBrowserUse
__all__ = ['ChatBrowserUse']
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/llm/openrouter/serializer.py | browser_use/llm/openrouter/serializer.py | from openai.types.chat import ChatCompletionMessageParam
from browser_use.llm.messages import BaseMessage
from browser_use.llm.openai.serializer import OpenAIMessageSerializer
class OpenRouterMessageSerializer:
"""
Serializer for converting between custom message types and OpenRouter message formats.
OpenRouter uses the OpenAI-compatible API, so we can reuse the OpenAI serializer.
"""
@staticmethod
def serialize_messages(messages: list[BaseMessage]) -> list[ChatCompletionMessageParam]:
"""
Serialize a list of browser_use messages to OpenRouter-compatible messages.
Args:
messages: List of browser_use messages
Returns:
List of OpenRouter-compatible messages (identical to OpenAI format)
"""
# OpenRouter uses the same message format as OpenAI
return OpenAIMessageSerializer.serialize_messages(messages)
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/llm/openrouter/chat.py | browser_use/llm/openrouter/chat.py | from collections.abc import Mapping
from dataclasses import dataclass
from typing import Any, TypeVar, overload
import httpx
from openai import APIConnectionError, APIStatusError, AsyncOpenAI, RateLimitError
from openai.types.chat.chat_completion import ChatCompletion
from openai.types.shared_params.response_format_json_schema import (
JSONSchema,
ResponseFormatJSONSchema,
)
from pydantic import BaseModel
from browser_use.llm.base import BaseChatModel
from browser_use.llm.exceptions import ModelProviderError, ModelRateLimitError
from browser_use.llm.messages import BaseMessage
from browser_use.llm.openrouter.serializer import OpenRouterMessageSerializer
from browser_use.llm.schema import SchemaOptimizer
from browser_use.llm.views import ChatInvokeCompletion, ChatInvokeUsage
T = TypeVar('T', bound=BaseModel)
@dataclass
class ChatOpenRouter(BaseChatModel):
"""
A wrapper around OpenRouter's chat API, which provides access to various LLM models
through a unified OpenAI-compatible interface.
This class implements the BaseChatModel protocol for OpenRouter's API.
"""
# Model configuration
model: str
# Model params
temperature: float | None = None
top_p: float | None = None
seed: int | None = None
# Client initialization parameters
api_key: str | None = None
http_referer: str | None = None # OpenRouter specific parameter for tracking
base_url: str | httpx.URL = 'https://openrouter.ai/api/v1'
timeout: float | httpx.Timeout | None = None
max_retries: int = 10
default_headers: Mapping[str, str] | None = None
default_query: Mapping[str, object] | None = None
http_client: httpx.AsyncClient | None = None
_strict_response_validation: bool = False
extra_body: dict[str, Any] | None = None
# Static
@property
def provider(self) -> str:
return 'openrouter'
def _get_client_params(self) -> dict[str, Any]:
"""Prepare client parameters dictionary."""
# Define base client params
base_params = {
'api_key': self.api_key,
'base_url': self.base_url,
'timeout': self.timeout,
'max_retries': self.max_retries,
'default_headers': self.default_headers,
'default_query': self.default_query,
'_strict_response_validation': self._strict_response_validation,
'top_p': self.top_p,
'seed': self.seed,
}
# Create client_params dict with non-None values
client_params = {k: v for k, v in base_params.items() if v is not None}
# Add http_client if provided
if self.http_client is not None:
client_params['http_client'] = self.http_client
return client_params
def get_client(self) -> AsyncOpenAI:
"""
Returns an AsyncOpenAI client configured for OpenRouter.
Returns:
AsyncOpenAI: An instance of the AsyncOpenAI client with OpenRouter base URL.
"""
if not hasattr(self, '_client'):
client_params = self._get_client_params()
self._client = AsyncOpenAI(**client_params)
return self._client
@property
def name(self) -> str:
return str(self.model)
def _get_usage(self, response: ChatCompletion) -> ChatInvokeUsage | None:
"""Extract usage information from the OpenRouter response."""
if response.usage is None:
return None
prompt_details = getattr(response.usage, 'prompt_tokens_details', None)
cached_tokens = prompt_details.cached_tokens if prompt_details else None
return ChatInvokeUsage(
prompt_tokens=response.usage.prompt_tokens,
prompt_cached_tokens=cached_tokens,
prompt_cache_creation_tokens=None,
prompt_image_tokens=None,
# Completion
completion_tokens=response.usage.completion_tokens,
total_tokens=response.usage.total_tokens,
)
@overload
async def ainvoke(
self, messages: list[BaseMessage], output_format: None = None, **kwargs: Any
) -> ChatInvokeCompletion[str]: ...
@overload
async def ainvoke(self, messages: list[BaseMessage], output_format: type[T], **kwargs: Any) -> ChatInvokeCompletion[T]: ...
async def ainvoke(
self, messages: list[BaseMessage], output_format: type[T] | None = None, **kwargs: Any
) -> ChatInvokeCompletion[T] | ChatInvokeCompletion[str]:
"""
Invoke the model with the given messages through OpenRouter.
Args:
messages: List of chat messages
output_format: Optional Pydantic model class for structured output
Returns:
Either a string response or an instance of output_format
"""
openrouter_messages = OpenRouterMessageSerializer.serialize_messages(messages)
# Set up extra headers for OpenRouter
extra_headers = {}
if self.http_referer:
extra_headers['HTTP-Referer'] = self.http_referer
try:
if output_format is None:
# Return string response
response = await self.get_client().chat.completions.create(
model=self.model,
messages=openrouter_messages,
temperature=self.temperature,
top_p=self.top_p,
seed=self.seed,
extra_headers=extra_headers,
**(self.extra_body or {}),
)
usage = self._get_usage(response)
return ChatInvokeCompletion(
completion=response.choices[0].message.content or '',
usage=usage,
)
else:
# Create a JSON schema for structured output
schema = SchemaOptimizer.create_optimized_json_schema(output_format)
response_format_schema: JSONSchema = {
'name': 'agent_output',
'strict': True,
'schema': schema,
}
# Return structured response
response = await self.get_client().chat.completions.create(
model=self.model,
messages=openrouter_messages,
temperature=self.temperature,
top_p=self.top_p,
seed=self.seed,
response_format=ResponseFormatJSONSchema(
json_schema=response_format_schema,
type='json_schema',
),
extra_headers=extra_headers,
**(self.extra_body or {}),
)
if response.choices[0].message.content is None:
raise ModelProviderError(
message='Failed to parse structured output from model response',
status_code=500,
model=self.name,
)
usage = self._get_usage(response)
parsed = output_format.model_validate_json(response.choices[0].message.content)
return ChatInvokeCompletion(
completion=parsed,
usage=usage,
)
except RateLimitError as e:
raise ModelRateLimitError(message=e.message, model=self.name) from e
except APIConnectionError as e:
raise ModelProviderError(message=str(e), model=self.name) from e
except APIStatusError as e:
raise ModelProviderError(message=e.message, status_code=e.status_code, model=self.name) from e
except Exception as e:
raise ModelProviderError(message=str(e), model=self.name) from e
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/llm/aws/serializer.py | browser_use/llm/aws/serializer.py | import base64
import json
import re
from typing import Any, overload
from browser_use.llm.messages import (
AssistantMessage,
BaseMessage,
ContentPartImageParam,
ContentPartRefusalParam,
ContentPartTextParam,
SystemMessage,
ToolCall,
UserMessage,
)
class AWSBedrockMessageSerializer:
"""Serializer for converting between custom message types and AWS Bedrock message format."""
@staticmethod
def _is_base64_image(url: str) -> bool:
"""Check if the URL is a base64 encoded image."""
return url.startswith('data:image/')
@staticmethod
def _is_url_image(url: str) -> bool:
"""Check if the URL is a regular HTTP/HTTPS image URL."""
return url.startswith(('http://', 'https://')) and any(
url.lower().endswith(ext) for ext in ['.jpg', '.jpeg', '.png', '.gif', '.webp', '.bmp']
)
@staticmethod
def _parse_base64_url(url: str) -> tuple[str, bytes]:
"""Parse a base64 data URL to extract format and raw bytes."""
# Format: data:image/jpeg;base64,<data>
if not url.startswith('data:'):
raise ValueError(f'Invalid base64 URL: {url}')
header, data = url.split(',', 1)
# Extract format from mime type
mime_match = re.search(r'image/(\w+)', header)
if mime_match:
format_name = mime_match.group(1).lower()
# Map common formats
format_mapping = {'jpg': 'jpeg', 'jpeg': 'jpeg', 'png': 'png', 'gif': 'gif', 'webp': 'webp'}
image_format = format_mapping.get(format_name, 'jpeg')
else:
image_format = 'jpeg' # Default format
# Decode base64 data
try:
image_bytes = base64.b64decode(data)
except Exception as e:
raise ValueError(f'Failed to decode base64 image data: {e}')
return image_format, image_bytes
@staticmethod
def _download_and_convert_image(url: str) -> tuple[str, bytes]:
"""Download an image from URL and convert to base64 bytes."""
try:
import httpx
except ImportError:
raise ImportError('httpx not available. Please install it to use URL images with AWS Bedrock.')
try:
response = httpx.get(url, timeout=30)
response.raise_for_status()
# Detect format from content type or URL
content_type = response.headers.get('content-type', '').lower()
if 'jpeg' in content_type or url.lower().endswith(('.jpg', '.jpeg')):
image_format = 'jpeg'
elif 'png' in content_type or url.lower().endswith('.png'):
image_format = 'png'
elif 'gif' in content_type or url.lower().endswith('.gif'):
image_format = 'gif'
elif 'webp' in content_type or url.lower().endswith('.webp'):
image_format = 'webp'
else:
image_format = 'jpeg' # Default format
return image_format, response.content
except Exception as e:
raise ValueError(f'Failed to download image from {url}: {e}')
@staticmethod
def _serialize_content_part_text(part: ContentPartTextParam) -> dict[str, Any]:
"""Convert a text content part to AWS Bedrock format."""
return {'text': part.text}
@staticmethod
def _serialize_content_part_image(part: ContentPartImageParam) -> dict[str, Any]:
"""Convert an image content part to AWS Bedrock format."""
url = part.image_url.url
if AWSBedrockMessageSerializer._is_base64_image(url):
# Handle base64 encoded images
image_format, image_bytes = AWSBedrockMessageSerializer._parse_base64_url(url)
elif AWSBedrockMessageSerializer._is_url_image(url):
# Download and convert URL images
image_format, image_bytes = AWSBedrockMessageSerializer._download_and_convert_image(url)
else:
raise ValueError(f'Unsupported image URL format: {url}')
return {
'image': {
'format': image_format,
'source': {
'bytes': image_bytes,
},
}
}
@staticmethod
def _serialize_user_content(
content: str | list[ContentPartTextParam | ContentPartImageParam],
) -> list[dict[str, Any]]:
"""Serialize content for user messages."""
if isinstance(content, str):
return [{'text': content}]
content_blocks: list[dict[str, Any]] = []
for part in content:
if part.type == 'text':
content_blocks.append(AWSBedrockMessageSerializer._serialize_content_part_text(part))
elif part.type == 'image_url':
content_blocks.append(AWSBedrockMessageSerializer._serialize_content_part_image(part))
return content_blocks
@staticmethod
def _serialize_system_content(
content: str | list[ContentPartTextParam],
) -> list[dict[str, Any]]:
"""Serialize content for system messages."""
if isinstance(content, str):
return [{'text': content}]
content_blocks: list[dict[str, Any]] = []
for part in content:
if part.type == 'text':
content_blocks.append(AWSBedrockMessageSerializer._serialize_content_part_text(part))
return content_blocks
@staticmethod
def _serialize_assistant_content(
content: str | list[ContentPartTextParam | ContentPartRefusalParam] | None,
) -> list[dict[str, Any]]:
"""Serialize content for assistant messages."""
if content is None:
return []
if isinstance(content, str):
return [{'text': content}]
content_blocks: list[dict[str, Any]] = []
for part in content:
if part.type == 'text':
content_blocks.append(AWSBedrockMessageSerializer._serialize_content_part_text(part))
# Skip refusal content parts - AWS Bedrock doesn't need them
return content_blocks
@staticmethod
def _serialize_tool_call(tool_call: ToolCall) -> dict[str, Any]:
"""Convert a tool call to AWS Bedrock format."""
try:
arguments = json.loads(tool_call.function.arguments)
except json.JSONDecodeError:
# If arguments aren't valid JSON, wrap them
arguments = {'arguments': tool_call.function.arguments}
return {
'toolUse': {
'toolUseId': tool_call.id,
'name': tool_call.function.name,
'input': arguments,
}
}
# region - Serialize overloads
@overload
@staticmethod
def serialize(message: UserMessage) -> dict[str, Any]: ...
@overload
@staticmethod
def serialize(message: SystemMessage) -> SystemMessage: ...
@overload
@staticmethod
def serialize(message: AssistantMessage) -> dict[str, Any]: ...
@staticmethod
def serialize(message: BaseMessage) -> dict[str, Any] | SystemMessage:
"""Serialize a custom message to AWS Bedrock format."""
if isinstance(message, UserMessage):
return {
'role': 'user',
'content': AWSBedrockMessageSerializer._serialize_user_content(message.content),
}
elif isinstance(message, SystemMessage):
# System messages are handled separately in AWS Bedrock
return message
elif isinstance(message, AssistantMessage):
content_blocks: list[dict[str, Any]] = []
# Add content blocks if present
if message.content is not None:
content_blocks.extend(AWSBedrockMessageSerializer._serialize_assistant_content(message.content))
# Add tool use blocks if present
if message.tool_calls:
for tool_call in message.tool_calls:
content_blocks.append(AWSBedrockMessageSerializer._serialize_tool_call(tool_call))
# AWS Bedrock requires at least one content block
if not content_blocks:
content_blocks = [{'text': ''}]
return {
'role': 'assistant',
'content': content_blocks,
}
else:
raise ValueError(f'Unknown message type: {type(message)}')
@staticmethod
def serialize_messages(messages: list[BaseMessage]) -> tuple[list[dict[str, Any]], list[dict[str, Any]] | None]:
"""
Serialize a list of messages, extracting any system message.
Returns:
Tuple of (bedrock_messages, system_message) where system_message is extracted
from any SystemMessage in the list.
"""
bedrock_messages: list[dict[str, Any]] = []
system_message: list[dict[str, Any]] | None = None
for message in messages:
if isinstance(message, SystemMessage):
# Extract system message content
system_message = AWSBedrockMessageSerializer._serialize_system_content(message.content)
else:
# Serialize and add to regular messages
serialized = AWSBedrockMessageSerializer.serialize(message)
bedrock_messages.append(serialized)
return bedrock_messages, system_message
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/llm/aws/chat_anthropic.py | browser_use/llm/aws/chat_anthropic.py | import json
from collections.abc import Mapping
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any, TypeVar, overload
from anthropic import (
APIConnectionError,
APIStatusError,
AsyncAnthropicBedrock,
RateLimitError,
omit,
)
from anthropic.types import CacheControlEphemeralParam, Message, ToolParam
from anthropic.types.text_block import TextBlock
from anthropic.types.tool_choice_tool_param import ToolChoiceToolParam
from pydantic import BaseModel
from browser_use.llm.anthropic.serializer import AnthropicMessageSerializer
from browser_use.llm.aws.chat_bedrock import ChatAWSBedrock
from browser_use.llm.exceptions import ModelProviderError, ModelRateLimitError
from browser_use.llm.messages import BaseMessage
from browser_use.llm.views import ChatInvokeCompletion, ChatInvokeUsage
if TYPE_CHECKING:
from boto3.session import Session # pyright: ignore
T = TypeVar('T', bound=BaseModel)
@dataclass
class ChatAnthropicBedrock(ChatAWSBedrock):
"""
AWS Bedrock Anthropic Claude chat model.
This is a convenience class that provides Claude-specific defaults
for the AWS Bedrock service. It inherits all functionality from
ChatAWSBedrock but sets Anthropic Claude as the default model.
"""
# Anthropic Claude specific defaults
model: str = 'anthropic.claude-3-5-sonnet-20240620-v1:0'
max_tokens: int = 8192
temperature: float | None = None
top_p: float | None = None
top_k: int | None = None
stop_sequences: list[str] | None = None
seed: int | None = None
# AWS credentials and configuration
aws_access_key: str | None = None
aws_secret_key: str | None = None
aws_session_token: str | None = None
aws_region: str | None = None
session: 'Session | None' = None
# Client initialization parameters
max_retries: int = 10
default_headers: Mapping[str, str] | None = None
default_query: Mapping[str, object] | None = None
@property
def provider(self) -> str:
return 'anthropic_bedrock'
def _get_client_params(self) -> dict[str, Any]:
"""Prepare client parameters dictionary for Bedrock."""
client_params: dict[str, Any] = {}
if self.session:
credentials = self.session.get_credentials()
client_params.update(
{
'aws_access_key': credentials.access_key,
'aws_secret_key': credentials.secret_key,
'aws_session_token': credentials.token,
'aws_region': self.session.region_name,
}
)
else:
# Use individual credentials
if self.aws_access_key:
client_params['aws_access_key'] = self.aws_access_key
if self.aws_secret_key:
client_params['aws_secret_key'] = self.aws_secret_key
if self.aws_region:
client_params['aws_region'] = self.aws_region
if self.aws_session_token:
client_params['aws_session_token'] = self.aws_session_token
# Add optional parameters
if self.max_retries:
client_params['max_retries'] = self.max_retries
if self.default_headers:
client_params['default_headers'] = self.default_headers
if self.default_query:
client_params['default_query'] = self.default_query
return client_params
def _get_client_params_for_invoke(self) -> dict[str, Any]:
"""Prepare client parameters dictionary for invoke."""
client_params = {}
if self.temperature is not None:
client_params['temperature'] = self.temperature
if self.max_tokens is not None:
client_params['max_tokens'] = self.max_tokens
if self.top_p is not None:
client_params['top_p'] = self.top_p
if self.top_k is not None:
client_params['top_k'] = self.top_k
if self.seed is not None:
client_params['seed'] = self.seed
if self.stop_sequences is not None:
client_params['stop_sequences'] = self.stop_sequences
return client_params
def get_client(self) -> AsyncAnthropicBedrock:
"""
Returns an AsyncAnthropicBedrock client.
Returns:
AsyncAnthropicBedrock: An instance of the AsyncAnthropicBedrock client.
"""
client_params = self._get_client_params()
return AsyncAnthropicBedrock(**client_params)
@property
def name(self) -> str:
return str(self.model)
def _get_usage(self, response: Message) -> ChatInvokeUsage | None:
"""Extract usage information from the response."""
usage = ChatInvokeUsage(
prompt_tokens=response.usage.input_tokens
+ (
response.usage.cache_read_input_tokens or 0
), # Total tokens in Anthropic are a bit fucked, you have to add cached tokens to the prompt tokens
completion_tokens=response.usage.output_tokens,
total_tokens=response.usage.input_tokens + response.usage.output_tokens,
prompt_cached_tokens=response.usage.cache_read_input_tokens,
prompt_cache_creation_tokens=response.usage.cache_creation_input_tokens,
prompt_image_tokens=None,
)
return usage
@overload
async def ainvoke(
self, messages: list[BaseMessage], output_format: None = None, **kwargs: Any
) -> ChatInvokeCompletion[str]: ...
@overload
async def ainvoke(self, messages: list[BaseMessage], output_format: type[T], **kwargs: Any) -> ChatInvokeCompletion[T]: ...
async def ainvoke(
self, messages: list[BaseMessage], output_format: type[T] | None = None, **kwargs: Any
) -> ChatInvokeCompletion[T] | ChatInvokeCompletion[str]:
anthropic_messages, system_prompt = AnthropicMessageSerializer.serialize_messages(messages)
try:
if output_format is None:
# Normal completion without structured output
response = await self.get_client().messages.create(
model=self.model,
messages=anthropic_messages,
system=system_prompt or omit,
**self._get_client_params_for_invoke(),
)
usage = self._get_usage(response)
# Extract text from the first content block
first_content = response.content[0]
if isinstance(first_content, TextBlock):
response_text = first_content.text
else:
# If it's not a text block, convert to string
response_text = str(first_content)
return ChatInvokeCompletion(
completion=response_text,
usage=usage,
)
else:
# Use tool calling for structured output
# Create a tool that represents the output format
tool_name = output_format.__name__
schema = output_format.model_json_schema()
# Remove title from schema if present (Anthropic doesn't like it in parameters)
if 'title' in schema:
del schema['title']
tool = ToolParam(
name=tool_name,
description=f'Extract information in the format of {tool_name}',
input_schema=schema,
cache_control=CacheControlEphemeralParam(type='ephemeral'),
)
# Force the model to use this tool
tool_choice = ToolChoiceToolParam(type='tool', name=tool_name)
response = await self.get_client().messages.create(
model=self.model,
messages=anthropic_messages,
tools=[tool],
system=system_prompt or omit,
tool_choice=tool_choice,
**self._get_client_params_for_invoke(),
)
usage = self._get_usage(response)
# Extract the tool use block
for content_block in response.content:
if hasattr(content_block, 'type') and content_block.type == 'tool_use':
# Parse the tool input as the structured output
try:
return ChatInvokeCompletion(completion=output_format.model_validate(content_block.input), usage=usage)
except Exception as e:
# If validation fails, try to parse it as JSON first
if isinstance(content_block.input, str):
data = json.loads(content_block.input)
return ChatInvokeCompletion(
completion=output_format.model_validate(data),
usage=usage,
)
raise e
# If no tool use block found, raise an error
raise ValueError('Expected tool use in response but none found')
except APIConnectionError as e:
raise ModelProviderError(message=e.message, model=self.name) from e
except RateLimitError as e:
raise ModelRateLimitError(message=e.message, model=self.name) from e
except APIStatusError as e:
raise ModelProviderError(message=e.message, status_code=e.status_code, model=self.name) from e
except Exception as e:
raise ModelProviderError(message=str(e), model=self.name) from e
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/llm/aws/__init__.py | browser_use/llm/aws/__init__.py | from typing import TYPE_CHECKING
# Type stubs for lazy imports
if TYPE_CHECKING:
from browser_use.llm.aws.chat_anthropic import ChatAnthropicBedrock
from browser_use.llm.aws.chat_bedrock import ChatAWSBedrock
# Lazy imports mapping for AWS chat models
_LAZY_IMPORTS = {
'ChatAnthropicBedrock': ('browser_use.llm.aws.chat_anthropic', 'ChatAnthropicBedrock'),
'ChatAWSBedrock': ('browser_use.llm.aws.chat_bedrock', 'ChatAWSBedrock'),
}
def __getattr__(name: str):
"""Lazy import mechanism for AWS chat models."""
if name in _LAZY_IMPORTS:
module_path, attr_name = _LAZY_IMPORTS[name]
try:
from importlib import import_module
module = import_module(module_path)
attr = getattr(module, attr_name)
# Cache the imported attribute in the module's globals
globals()[name] = attr
return attr
except ImportError as e:
raise ImportError(f'Failed to import {name} from {module_path}: {e}') from e
raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
__all__ = [
'ChatAWSBedrock',
'ChatAnthropicBedrock',
]
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/llm/aws/chat_bedrock.py | browser_use/llm/aws/chat_bedrock.py | import json
from dataclasses import dataclass
from os import getenv
from typing import TYPE_CHECKING, Any, TypeVar, overload
from pydantic import BaseModel
from browser_use.llm.aws.serializer import AWSBedrockMessageSerializer
from browser_use.llm.base import BaseChatModel
from browser_use.llm.exceptions import ModelProviderError, ModelRateLimitError
from browser_use.llm.messages import BaseMessage
from browser_use.llm.views import ChatInvokeCompletion, ChatInvokeUsage
if TYPE_CHECKING:
from boto3 import client as AwsClient # type: ignore
from boto3.session import Session # type: ignore
T = TypeVar('T', bound=BaseModel)
@dataclass
class ChatAWSBedrock(BaseChatModel):
"""
AWS Bedrock chat model supporting multiple providers (Anthropic, Meta, etc.).
This class provides access to various models via AWS Bedrock,
supporting both text generation and structured output via tool calling.
To use this model, you need to either:
1. Set the following environment variables:
- AWS_ACCESS_KEY_ID
- AWS_SECRET_ACCESS_KEY
- AWS_SESSION_TOKEN (only required when using temporary credentials)
- AWS_REGION
2. Or provide a boto3 Session object
3. Or use AWS SSO authentication
"""
# Model configuration
model: str = 'anthropic.claude-3-5-sonnet-20240620-v1:0'
max_tokens: int | None = 4096
temperature: float | None = None
top_p: float | None = None
seed: int | None = None
stop_sequences: list[str] | None = None
# AWS credentials and configuration
aws_access_key_id: str | None = None
aws_secret_access_key: str | None = None
aws_session_token: str | None = None
aws_region: str | None = None
aws_sso_auth: bool = False
session: 'Session | None' = None
# Request parameters
request_params: dict[str, Any] | None = None
# Static
@property
def provider(self) -> str:
return 'aws_bedrock'
def _get_client(self) -> 'AwsClient': # type: ignore
"""Get the AWS Bedrock client."""
try:
from boto3 import client as AwsClient # type: ignore
except ImportError:
raise ImportError(
'`boto3` not installed. Please install using `pip install browser-use[aws] or pip install browser-use[all]`'
)
if self.session:
return self.session.client('bedrock-runtime')
# Get credentials from environment or instance parameters
access_key = self.aws_access_key_id or getenv('AWS_ACCESS_KEY_ID')
secret_key = self.aws_secret_access_key or getenv('AWS_SECRET_ACCESS_KEY')
session_token = self.aws_session_token or getenv('AWS_SESSION_TOKEN')
region = self.aws_region or getenv('AWS_REGION') or getenv('AWS_DEFAULT_REGION')
if self.aws_sso_auth:
return AwsClient(service_name='bedrock-runtime', region_name=region)
else:
if not access_key or not secret_key:
raise ModelProviderError(
message='AWS credentials not found. Please set AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables (and AWS_SESSION_TOKEN if using temporary credentials) or provide a boto3 session.',
model=self.name,
)
return AwsClient(
service_name='bedrock-runtime',
region_name=region,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
aws_session_token=session_token,
)
@property
def name(self) -> str:
return str(self.model)
def _get_inference_config(self) -> dict[str, Any]:
"""Get the inference configuration for the request."""
config = {}
if self.max_tokens is not None:
config['maxTokens'] = self.max_tokens
if self.temperature is not None:
config['temperature'] = self.temperature
if self.top_p is not None:
config['topP'] = self.top_p
if self.stop_sequences is not None:
config['stopSequences'] = self.stop_sequences
if self.seed is not None:
config['seed'] = self.seed
return config
def _format_tools_for_request(self, output_format: type[BaseModel]) -> list[dict[str, Any]]:
"""Format a Pydantic model as a tool for structured output."""
schema = output_format.model_json_schema()
# Convert Pydantic schema to Bedrock tool format
properties = {}
required = []
for prop_name, prop_info in schema.get('properties', {}).items():
properties[prop_name] = {
'type': prop_info.get('type', 'string'),
'description': prop_info.get('description', ''),
}
# Add required fields
required = schema.get('required', [])
return [
{
'toolSpec': {
'name': f'extract_{output_format.__name__.lower()}',
'description': f'Extract information in the format of {output_format.__name__}',
'inputSchema': {'json': {'type': 'object', 'properties': properties, 'required': required}},
}
}
]
def _get_usage(self, response: dict[str, Any]) -> ChatInvokeUsage | None:
"""Extract usage information from the response."""
if 'usage' not in response:
return None
usage_data = response['usage']
return ChatInvokeUsage(
prompt_tokens=usage_data.get('inputTokens', 0),
completion_tokens=usage_data.get('outputTokens', 0),
total_tokens=usage_data.get('totalTokens', 0),
prompt_cached_tokens=None, # Bedrock doesn't provide this
prompt_cache_creation_tokens=None,
prompt_image_tokens=None,
)
@overload
async def ainvoke(
self, messages: list[BaseMessage], output_format: None = None, **kwargs: Any
) -> ChatInvokeCompletion[str]: ...
@overload
async def ainvoke(self, messages: list[BaseMessage], output_format: type[T], **kwargs: Any) -> ChatInvokeCompletion[T]: ...
async def ainvoke(
self, messages: list[BaseMessage], output_format: type[T] | None = None, **kwargs: Any
) -> ChatInvokeCompletion[T] | ChatInvokeCompletion[str]:
"""
Invoke the AWS Bedrock model with the given messages.
Args:
messages: List of chat messages
output_format: Optional Pydantic model class for structured output
Returns:
Either a string response or an instance of output_format
"""
try:
from botocore.exceptions import ClientError # type: ignore
except ImportError:
raise ImportError(
'`boto3` not installed. Please install using `pip install browser-use[aws] or pip install browser-use[all]`'
)
bedrock_messages, system_message = AWSBedrockMessageSerializer.serialize_messages(messages)
try:
# Prepare the request body
body: dict[str, Any] = {}
if system_message:
body['system'] = system_message
inference_config = self._get_inference_config()
if inference_config:
body['inferenceConfig'] = inference_config
# Handle structured output via tool calling
if output_format is not None:
tools = self._format_tools_for_request(output_format)
body['toolConfig'] = {'tools': tools}
# Add any additional request parameters
if self.request_params:
body.update(self.request_params)
# Filter out None values
body = {k: v for k, v in body.items() if v is not None}
# Make the API call
client = self._get_client()
response = client.converse(modelId=self.model, messages=bedrock_messages, **body)
usage = self._get_usage(response)
# Extract the response content
if 'output' in response and 'message' in response['output']:
message = response['output']['message']
content = message.get('content', [])
if output_format is None:
# Return text response
text_content = []
for item in content:
if 'text' in item:
text_content.append(item['text'])
response_text = '\n'.join(text_content) if text_content else ''
return ChatInvokeCompletion(
completion=response_text,
usage=usage,
)
else:
# Handle structured output from tool calls
for item in content:
if 'toolUse' in item:
tool_use = item['toolUse']
tool_input = tool_use.get('input', {})
try:
# Validate and return the structured output
return ChatInvokeCompletion(
completion=output_format.model_validate(tool_input),
usage=usage,
)
except Exception as e:
# If validation fails, try to parse as JSON first
if isinstance(tool_input, str):
try:
data = json.loads(tool_input)
return ChatInvokeCompletion(
completion=output_format.model_validate(data),
usage=usage,
)
except json.JSONDecodeError:
pass
raise ModelProviderError(
message=f'Failed to validate structured output: {str(e)}',
model=self.name,
) from e
# If no tool use found but output_format was requested
raise ModelProviderError(
message='Expected structured output but no tool use found in response',
model=self.name,
)
# If no valid content found
if output_format is None:
return ChatInvokeCompletion(
completion='',
usage=usage,
)
else:
raise ModelProviderError(
message='No valid content found in response',
model=self.name,
)
except ClientError as e:
error_code = e.response.get('Error', {}).get('Code', 'Unknown')
error_message = e.response.get('Error', {}).get('Message', str(e))
if error_code in ['ThrottlingException', 'TooManyRequestsException']:
raise ModelRateLimitError(message=error_message, model=self.name) from e
else:
raise ModelProviderError(message=error_message, model=self.name) from e
except Exception as e:
raise ModelProviderError(message=str(e), model=self.name) from e
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/llm/vercel/serializer.py | browser_use/llm/vercel/serializer.py | from openai.types.chat import ChatCompletionMessageParam
from browser_use.llm.messages import BaseMessage
from browser_use.llm.openai.serializer import OpenAIMessageSerializer
class VercelMessageSerializer:
"""
Serializer for converting between custom message types and Vercel AI Gateway message formats.
Vercel AI Gateway uses the OpenAI-compatible API, so we can reuse the OpenAI serializer.
"""
@staticmethod
def serialize_messages(messages: list[BaseMessage]) -> list[ChatCompletionMessageParam]:
"""
Serialize a list of browser_use messages to Vercel AI Gateway-compatible messages.
Args:
messages: List of browser_use messages
Returns:
List of Vercel AI Gateway-compatible messages (identical to OpenAI format)
"""
# Vercel AI Gateway uses the same message format as OpenAI
return OpenAIMessageSerializer.serialize_messages(messages)
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/llm/vercel/chat.py | browser_use/llm/vercel/chat.py | import json
from collections.abc import Mapping
from dataclasses import dataclass, field
from typing import Any, Literal, TypeAlias, TypeVar, overload
import httpx
from openai import APIConnectionError, APIStatusError, AsyncOpenAI, RateLimitError
from openai.types.chat.chat_completion import ChatCompletion
from openai.types.shared_params.response_format_json_schema import (
JSONSchema,
ResponseFormatJSONSchema,
)
from pydantic import BaseModel
from browser_use.llm.base import BaseChatModel
from browser_use.llm.exceptions import ModelProviderError, ModelRateLimitError
from browser_use.llm.messages import BaseMessage, ContentPartTextParam, SystemMessage
from browser_use.llm.schema import SchemaOptimizer
from browser_use.llm.vercel.serializer import VercelMessageSerializer
from browser_use.llm.views import ChatInvokeCompletion, ChatInvokeUsage
T = TypeVar('T', bound=BaseModel)
ChatVercelModel: TypeAlias = Literal[
'alibaba/qwen-3-14b',
'alibaba/qwen-3-235b',
'alibaba/qwen-3-30b',
'alibaba/qwen-3-32b',
'alibaba/qwen3-coder',
'alibaba/qwen3-coder-30b-a3b',
'alibaba/qwen3-coder-plus',
'alibaba/qwen3-max',
'alibaba/qwen3-max-preview',
'alibaba/qwen3-next-80b-a3b-instruct',
'alibaba/qwen3-next-80b-a3b-thinking',
'alibaba/qwen3-vl-instruct',
'alibaba/qwen3-vl-thinking',
'amazon/nova-lite',
'amazon/nova-micro',
'amazon/nova-pro',
'amazon/titan-embed-text-v2',
'anthropic/claude-3-haiku',
'anthropic/claude-3-opus',
'anthropic/claude-3.5-haiku',
'anthropic/claude-3.5-sonnet',
'anthropic/claude-3.5-sonnet-20240620',
'anthropic/claude-3.7-sonnet',
'anthropic/claude-haiku-4.5',
'anthropic/claude-opus-4',
'anthropic/claude-opus-4.1',
'anthropic/claude-sonnet-4',
'anthropic/claude-sonnet-4.5',
'cohere/command-a',
'cohere/command-r',
'cohere/command-r-plus',
'cohere/embed-v4.0',
'deepseek/deepseek-r1',
'deepseek/deepseek-r1-distill-llama-70b',
'deepseek/deepseek-v3',
'deepseek/deepseek-v3.1',
'deepseek/deepseek-v3.1-base',
'deepseek/deepseek-v3.1-terminus',
'deepseek/deepseek-v3.2-exp',
'deepseek/deepseek-v3.2-exp-thinking',
'google/gemini-2.0-flash',
'google/gemini-2.0-flash-lite',
'google/gemini-2.5-flash',
'google/gemini-2.5-flash-image',
'google/gemini-2.5-flash-image-preview',
'google/gemini-2.5-flash-lite',
'google/gemini-2.5-flash-lite-preview-09-2025',
'google/gemini-2.5-flash-preview-09-2025',
'google/gemini-2.5-pro',
'google/gemini-embedding-001',
'google/gemma-2-9b',
'google/text-embedding-005',
'google/text-multilingual-embedding-002',
'inception/mercury-coder-small',
'meituan/longcat-flash-chat',
'meituan/longcat-flash-thinking',
'meta/llama-3-70b',
'meta/llama-3-8b',
'meta/llama-3.1-70b',
'meta/llama-3.1-8b',
'meta/llama-3.2-11b',
'meta/llama-3.2-1b',
'meta/llama-3.2-3b',
'meta/llama-3.2-90b',
'meta/llama-3.3-70b',
'meta/llama-4-maverick',
'meta/llama-4-scout',
'mistral/codestral',
'mistral/codestral-embed',
'mistral/devstral-small',
'mistral/magistral-medium',
'mistral/magistral-medium-2506',
'mistral/magistral-small',
'mistral/magistral-small-2506',
'mistral/ministral-3b',
'mistral/ministral-8b',
'mistral/mistral-embed',
'mistral/mistral-large',
'mistral/mistral-medium',
'mistral/mistral-small',
'mistral/mixtral-8x22b-instruct',
'mistral/pixtral-12b',
'mistral/pixtral-large',
'moonshotai/kimi-k2',
'moonshotai/kimi-k2-0905',
'moonshotai/kimi-k2-turbo',
'morph/morph-v3-fast',
'morph/morph-v3-large',
'openai/gpt-3.5-turbo',
'openai/gpt-3.5-turbo-instruct',
'openai/gpt-4-turbo',
'openai/gpt-4.1',
'openai/gpt-4.1-mini',
'openai/gpt-4.1-nano',
'openai/gpt-4o',
'openai/gpt-4o-mini',
'openai/gpt-5',
'openai/gpt-5-codex',
'openai/gpt-5-mini',
'openai/gpt-5-nano',
'openai/gpt-5-pro',
'openai/gpt-oss-120b',
'openai/gpt-oss-20b',
'openai/o1',
'openai/o3',
'openai/o3-mini',
'openai/o4-mini',
'openai/text-embedding-3-large',
'openai/text-embedding-3-small',
'openai/text-embedding-ada-002',
'perplexity/sonar',
'perplexity/sonar-pro',
'perplexity/sonar-reasoning',
'perplexity/sonar-reasoning-pro',
'stealth/sonoma-dusk-alpha',
'stealth/sonoma-sky-alpha',
'vercel/v0-1.0-md',
'vercel/v0-1.5-md',
'voyage/voyage-3-large',
'voyage/voyage-3.5',
'voyage/voyage-3.5-lite',
'voyage/voyage-code-2',
'voyage/voyage-code-3',
'voyage/voyage-finance-2',
'voyage/voyage-law-2',
'xai/grok-2',
'xai/grok-2-vision',
'xai/grok-3',
'xai/grok-3-fast',
'xai/grok-3-mini',
'xai/grok-3-mini-fast',
'xai/grok-4',
'xai/grok-4-fast-non-reasoning',
'xai/grok-4-fast-reasoning',
'xai/grok-code-fast-1',
'zai/glm-4.5',
'zai/glm-4.5-air',
'zai/glm-4.5v',
'zai/glm-4.6',
]
@dataclass
class ChatVercel(BaseChatModel):
"""
A wrapper around Vercel AI Gateway's API, which provides OpenAI-compatible access
to various LLM models with features like rate limiting, caching, and monitoring.
Examples:
```python
from browser_use import Agent, ChatVercel
llm = ChatVercel(model='openai/gpt-4o', api_key='your_vercel_api_key')
agent = Agent(task='Your task here', llm=llm)
```
Args:
model: The model identifier
api_key: Your Vercel API key
base_url: The Vercel AI Gateway endpoint (defaults to https://ai-gateway.vercel.sh/v1)
temperature: Sampling temperature (0-2)
max_tokens: Maximum tokens to generate
reasoning_models: List of reasoning model patterns (e.g., 'o1', 'gpt-oss') that need
prompt-based JSON extraction. Auto-detects common reasoning models by default.
timeout: Request timeout in seconds
max_retries: Maximum number of retries for failed requests
provider_options: Provider routing options for the gateway. Use this to control which
providers are used and in what order. Example: {'gateway': {'order': ['vertex', 'anthropic']}}
"""
# Model configuration
model: ChatVercelModel | str
# Model params
temperature: float | None = None
max_tokens: int | None = None
top_p: float | None = None
reasoning_models: list[str] | None = field(
default_factory=lambda: [
'o1',
'o3',
'o4',
'gpt-oss',
'deepseek-r1',
'qwen3-next-80b-a3b-thinking',
]
)
# Client initialization parameters
api_key: str | None = None
base_url: str | httpx.URL = 'https://ai-gateway.vercel.sh/v1'
timeout: float | httpx.Timeout | None = None
max_retries: int = 5
default_headers: Mapping[str, str] | None = None
default_query: Mapping[str, object] | None = None
http_client: httpx.AsyncClient | None = None
_strict_response_validation: bool = False
provider_options: dict[str, Any] | None = None
# Static
@property
def provider(self) -> str:
return 'vercel'
def _get_client_params(self) -> dict[str, Any]:
"""Prepare client parameters dictionary."""
base_params = {
'api_key': self.api_key,
'base_url': self.base_url,
'timeout': self.timeout,
'max_retries': self.max_retries,
'default_headers': self.default_headers,
'default_query': self.default_query,
'_strict_response_validation': self._strict_response_validation,
}
client_params = {k: v for k, v in base_params.items() if v is not None}
if self.http_client is not None:
client_params['http_client'] = self.http_client
return client_params
def get_client(self) -> AsyncOpenAI:
"""
Returns an AsyncOpenAI client configured for Vercel AI Gateway.
Returns:
AsyncOpenAI: An instance of the AsyncOpenAI client with Vercel base URL.
"""
if not hasattr(self, '_client'):
client_params = self._get_client_params()
self._client = AsyncOpenAI(**client_params)
return self._client
@property
def name(self) -> str:
return str(self.model)
def _get_usage(self, response: ChatCompletion) -> ChatInvokeUsage | None:
"""Extract usage information from the Vercel response."""
if response.usage is None:
return None
prompt_details = getattr(response.usage, 'prompt_tokens_details', None)
cached_tokens = prompt_details.cached_tokens if prompt_details else None
return ChatInvokeUsage(
prompt_tokens=response.usage.prompt_tokens,
prompt_cached_tokens=cached_tokens,
prompt_cache_creation_tokens=None,
prompt_image_tokens=None,
completion_tokens=response.usage.completion_tokens,
total_tokens=response.usage.total_tokens,
)
def _fix_gemini_schema(self, schema: dict[str, Any]) -> dict[str, Any]:
"""
Convert a Pydantic model to a Gemini-compatible schema.
This function removes unsupported properties like 'additionalProperties' and resolves
$ref references that Gemini doesn't support.
"""
# Handle $defs and $ref resolution
if '$defs' in schema:
defs = schema.pop('$defs')
def resolve_refs(obj: Any) -> Any:
if isinstance(obj, dict):
if '$ref' in obj:
ref = obj.pop('$ref')
ref_name = ref.split('/')[-1]
if ref_name in defs:
# Replace the reference with the actual definition
resolved = defs[ref_name].copy()
# Merge any additional properties from the reference
for key, value in obj.items():
if key != '$ref':
resolved[key] = value
return resolve_refs(resolved)
return obj
else:
# Recursively process all dictionary values
return {k: resolve_refs(v) for k, v in obj.items()}
elif isinstance(obj, list):
return [resolve_refs(item) for item in obj]
return obj
schema = resolve_refs(schema)
# Remove unsupported properties
def clean_schema(obj: Any) -> Any:
if isinstance(obj, dict):
# Remove unsupported properties
cleaned = {}
for key, value in obj.items():
if key not in ['additionalProperties', 'title', 'default']:
cleaned_value = clean_schema(value)
# Handle empty object properties - Gemini doesn't allow empty OBJECT types
if (
key == 'properties'
and isinstance(cleaned_value, dict)
and len(cleaned_value) == 0
and isinstance(obj.get('type', ''), str)
and obj.get('type', '').upper() == 'OBJECT'
):
# Convert empty object to have at least one property
cleaned['properties'] = {'_placeholder': {'type': 'string'}}
else:
cleaned[key] = cleaned_value
# If this is an object type with empty properties, add a placeholder
if (
isinstance(cleaned.get('type', ''), str)
and cleaned.get('type', '').upper() == 'OBJECT'
and 'properties' in cleaned
and isinstance(cleaned['properties'], dict)
and len(cleaned['properties']) == 0
):
cleaned['properties'] = {'_placeholder': {'type': 'string'}}
# Also remove 'title' from the required list if it exists
if 'required' in cleaned and isinstance(cleaned.get('required'), list):
cleaned['required'] = [p for p in cleaned['required'] if p != 'title']
return cleaned
elif isinstance(obj, list):
return [clean_schema(item) for item in obj]
return obj
return clean_schema(schema)
@overload
async def ainvoke(
self, messages: list[BaseMessage], output_format: None = None, **kwargs: Any
) -> ChatInvokeCompletion[str]: ...
@overload
async def ainvoke(self, messages: list[BaseMessage], output_format: type[T], **kwargs: Any) -> ChatInvokeCompletion[T]: ...
async def ainvoke(
self, messages: list[BaseMessage], output_format: type[T] | None = None, **kwargs: Any
) -> ChatInvokeCompletion[T] | ChatInvokeCompletion[str]:
"""
Invoke the model with the given messages through Vercel AI Gateway.
Args:
messages: List of chat messages
output_format: Optional Pydantic model class for structured output
Returns:
Either a string response or an instance of output_format
"""
vercel_messages = VercelMessageSerializer.serialize_messages(messages)
try:
model_params: dict[str, Any] = {}
if self.temperature is not None:
model_params['temperature'] = self.temperature
if self.max_tokens is not None:
model_params['max_tokens'] = self.max_tokens
if self.top_p is not None:
model_params['top_p'] = self.top_p
if self.provider_options:
model_params['extra_body'] = {'providerOptions': self.provider_options}
if output_format is None:
# Return string response
response = await self.get_client().chat.completions.create(
model=self.model,
messages=vercel_messages,
**model_params,
)
usage = self._get_usage(response)
return ChatInvokeCompletion(
completion=response.choices[0].message.content or '',
usage=usage,
stop_reason=response.choices[0].finish_reason if response.choices else None,
)
else:
is_google_model = self.model.startswith('google/')
is_anthropic_model = self.model.startswith('anthropic/')
is_reasoning_model = self.reasoning_models and any(
str(pattern).lower() in str(self.model).lower() for pattern in self.reasoning_models
)
if is_google_model or is_anthropic_model or is_reasoning_model:
modified_messages = [m.model_copy(deep=True) for m in messages]
schema = SchemaOptimizer.create_gemini_optimized_schema(output_format)
json_instruction = f'\n\nIMPORTANT: You must respond with ONLY a valid JSON object (no markdown, no code blocks, no explanations) that exactly matches this schema:\n{json.dumps(schema, indent=2)}'
instruction_added = False
if modified_messages and modified_messages[0].role == 'system':
if isinstance(modified_messages[0].content, str):
modified_messages[0].content += json_instruction
instruction_added = True
elif isinstance(modified_messages[0].content, list):
modified_messages[0].content.append(ContentPartTextParam(text=json_instruction))
instruction_added = True
elif modified_messages and modified_messages[-1].role == 'user':
if isinstance(modified_messages[-1].content, str):
modified_messages[-1].content += json_instruction
instruction_added = True
elif isinstance(modified_messages[-1].content, list):
modified_messages[-1].content.append(ContentPartTextParam(text=json_instruction))
instruction_added = True
if not instruction_added:
modified_messages.insert(0, SystemMessage(content=json_instruction))
vercel_messages = VercelMessageSerializer.serialize_messages(modified_messages)
request_params = model_params.copy()
if self.provider_options:
request_params['extra_body'] = {'providerOptions': self.provider_options}
response = await self.get_client().chat.completions.create(
model=self.model,
messages=vercel_messages,
**request_params,
)
content = response.choices[0].message.content if response.choices else None
if not content:
raise ModelProviderError(
message='No response from model',
status_code=500,
model=self.name,
)
try:
text = content.strip()
if text.startswith('```json') and text.endswith('```'):
text = text[7:-3].strip()
elif text.startswith('```') and text.endswith('```'):
text = text[3:-3].strip()
parsed_data = json.loads(text)
parsed = output_format.model_validate(parsed_data)
usage = self._get_usage(response)
return ChatInvokeCompletion(
completion=parsed,
usage=usage,
stop_reason=response.choices[0].finish_reason if response.choices else None,
)
except (json.JSONDecodeError, ValueError) as e:
raise ModelProviderError(
message=f'Failed to parse JSON response: {str(e)}. Raw response: {content[:200]}',
status_code=500,
model=self.name,
) from e
else:
schema = SchemaOptimizer.create_optimized_json_schema(output_format)
response_format_schema: JSONSchema = {
'name': 'agent_output',
'strict': True,
'schema': schema,
}
request_params = model_params.copy()
if self.provider_options:
request_params['extra_body'] = {'providerOptions': self.provider_options}
response = await self.get_client().chat.completions.create(
model=self.model,
messages=vercel_messages,
response_format=ResponseFormatJSONSchema(
json_schema=response_format_schema,
type='json_schema',
),
**request_params,
)
content = response.choices[0].message.content if response.choices else None
if not content:
raise ModelProviderError(
message='Failed to parse structured output from model response - empty or null content',
status_code=500,
model=self.name,
)
usage = self._get_usage(response)
parsed = output_format.model_validate_json(content)
return ChatInvokeCompletion(
completion=parsed,
usage=usage,
stop_reason=response.choices[0].finish_reason if response.choices else None,
)
except RateLimitError as e:
raise ModelRateLimitError(message=e.message, model=self.name) from e
except APIConnectionError as e:
raise ModelProviderError(message=str(e), model=self.name) from e
except APIStatusError as e:
raise ModelProviderError(message=e.message, status_code=e.status_code, model=self.name) from e
except Exception as e:
raise ModelProviderError(message=str(e), model=self.name) from e
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/llm/vercel/__init__.py | browser_use/llm/vercel/__init__.py | from browser_use.llm.vercel.chat import ChatVercel
__all__ = ['ChatVercel']
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/llm/tests/test_mistral_schema.py | browser_use/llm/tests/test_mistral_schema.py | from pydantic import BaseModel, Field
from browser_use.llm.mistral.schema import MistralSchemaOptimizer
class NestedExample(BaseModel):
code: str = Field(..., min_length=2, max_length=4, pattern='[A-Z]+')
description: str
class RootExample(BaseModel):
item: NestedExample
email: str = Field(..., json_schema_extra={'format': 'email'})
def test_mistral_schema_strips_unsupported_keywords():
schema = MistralSchemaOptimizer.create_mistral_compatible_schema(RootExample)
def _assert_no_banned_keys(obj):
if isinstance(obj, dict):
for key, value in obj.items():
assert key not in {'minLength', 'maxLength', 'pattern', 'format'}
_assert_no_banned_keys(value)
elif isinstance(obj, list):
for item in obj:
_assert_no_banned_keys(item)
_assert_no_banned_keys(schema)
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/llm/tests/test_single_step.py | browser_use/llm/tests/test_single_step.py | import logging
import os
import tempfile
import pytest
from browser_use.agent.prompts import AgentMessagePrompt
from browser_use.agent.service import Agent
from browser_use.browser.views import BrowserStateSummary, TabInfo
from browser_use.dom.views import DOMSelectorMap, EnhancedDOMTreeNode, NodeType, SerializedDOMState, SimplifiedNode
from browser_use.filesystem.file_system import FileSystem
from browser_use.llm.anthropic.chat import ChatAnthropic
from browser_use.llm.azure.chat import ChatAzureOpenAI
from browser_use.llm.base import BaseChatModel
from browser_use.llm.google.chat import ChatGoogle
from browser_use.llm.groq.chat import ChatGroq
# Optional OCI import
try:
from browser_use.llm.oci_raw.chat import ChatOCIRaw
OCI_AVAILABLE = True
except ImportError:
ChatOCIRaw = None
OCI_AVAILABLE = False
from browser_use.llm.openai.chat import ChatOpenAI
# Set logging level to INFO for this module
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def _check_oci_credentials() -> bool:
"""Check if OCI credentials are available."""
if not OCI_AVAILABLE:
return False
try:
import oci
oci.config.from_file('~/.oci/config', 'DEFAULT')
return True
except Exception:
return False
def create_mock_state_message(temp_dir: str):
"""Create a mock state message with a single clickable element."""
# Create a mock DOM element with a single clickable button
mock_button = EnhancedDOMTreeNode(
node_id=1,
backend_node_id=1,
node_type=NodeType.ELEMENT_NODE,
node_name='button',
node_value='Click Me',
attributes={'id': 'test-button'},
is_scrollable=False,
is_visible=True,
absolute_position=None,
session_id=None,
target_id='ABCD1234ABCD1234ABCD1234ABCD1234ABCD1234',
frame_id=None,
content_document=None,
shadow_root_type=None,
shadow_roots=None,
parent_node=None,
children_nodes=None,
ax_node=None,
snapshot_node=None,
)
# Create selector map (keyed by backend_node_id)
selector_map: DOMSelectorMap = {mock_button.backend_node_id: mock_button}
# Create mock tab info with proper target_id
mock_tab = TabInfo(
target_id='ABCD1234ABCD1234ABCD1234ABCD1234ABCD1234',
url='https://example.com',
title='Test Page',
)
dom_state = SerializedDOMState(
_root=SimplifiedNode(
original_node=mock_button,
children=[],
should_display=True,
is_interactive=True,
),
selector_map=selector_map,
)
# Create mock browser state with required selector_map
mock_browser_state = BrowserStateSummary(
dom_state=dom_state, # Using the actual DOM element
url='https://example.com',
title='Test Page',
tabs=[mock_tab],
screenshot='', # Empty screenshot
pixels_above=0,
pixels_below=0,
)
# Create file system using the provided temp directory
mock_file_system = FileSystem(temp_dir)
# Create the agent message prompt
agent_prompt = AgentMessagePrompt(
browser_state_summary=mock_browser_state,
file_system=mock_file_system, # Now using actual FileSystem instance
agent_history_description='', # Empty history
read_state_description='', # Empty read state
task='Click the button on the page',
include_attributes=['id'],
step_info=None,
page_filtered_actions=None,
max_clickable_elements_length=40000,
sensitive_data=None,
)
# Override the clickable_elements_to_string method to return our simple element
dom_state.llm_representation = lambda include_attributes=None: '[1]<button id="test-button">Click Me</button>'
# Get the formatted message
message = agent_prompt.get_user_message(use_vision=False)
return message
# Pytest parameterized version
@pytest.mark.parametrize(
'llm_class,model_name',
[
(ChatGroq, 'meta-llama/llama-4-maverick-17b-128e-instruct'),
(ChatGoogle, 'gemini-2.0-flash-exp'),
(ChatOpenAI, 'gpt-4.1-mini'),
(ChatAnthropic, 'claude-3-5-sonnet-latest'),
(ChatAzureOpenAI, 'gpt-4.1-mini'),
pytest.param(
ChatOCIRaw,
{
'model_id': os.getenv('OCI_MODEL_ID', 'placeholder'),
'service_endpoint': os.getenv(
'OCI_SERVICE_ENDPOINT', 'https://inference.generativeai.us-chicago-1.oci.oraclecloud.com'
),
'compartment_id': os.getenv('OCI_COMPARTMENT_ID', 'placeholder'),
'provider': 'meta',
'temperature': 0.7,
'max_tokens': 800,
'frequency_penalty': 0.0,
'presence_penalty': 0.0,
'top_p': 0.9,
'auth_type': 'API_KEY',
'auth_profile': 'DEFAULT',
},
marks=pytest.mark.skipif(
not _check_oci_credentials() or not os.getenv('OCI_MODEL_ID') or not os.getenv('OCI_COMPARTMENT_ID'),
reason='OCI credentials or environment variables not available',
),
),
],
)
async def test_single_step_parametrized(llm_class, model_name):
"""Test single step with different LLM providers using pytest parametrize."""
if isinstance(model_name, dict):
# Handle ChatOCIRaw which requires keyword arguments
llm = llm_class(**model_name)
else:
llm = llm_class(model=model_name)
agent = Agent(task='Click the button on the page', llm=llm)
# Create temporary directory that will stay alive during the test
with tempfile.TemporaryDirectory() as temp_dir:
# Create mock state message
mock_message = create_mock_state_message(temp_dir)
agent.message_manager._set_message_with_type(mock_message, 'state')
messages = agent.message_manager.get_messages()
# Test with simple question
response = await llm.ainvoke(messages, agent.AgentOutput)
# Additional validation for OCI Raw
if ChatOCIRaw is not None and isinstance(llm, ChatOCIRaw):
# Verify OCI Raw generates proper Agent actions
assert response.completion.action is not None
assert len(response.completion.action) > 0
# Basic assertions to ensure response is valid
assert response.completion is not None
assert response.usage is not None
assert response.usage.total_tokens > 0
async def test_single_step():
"""Original test function that tests all models in a loop."""
# Create a list of models to test
models: list[BaseChatModel] = [
ChatGroq(model='meta-llama/llama-4-maverick-17b-128e-instruct'),
ChatGoogle(model='gemini-2.0-flash-exp'),
ChatOpenAI(model='gpt-4.1'),
ChatAnthropic(model='claude-3-5-sonnet-latest'), # Using haiku for cost efficiency
ChatAzureOpenAI(model='gpt-4o-mini'),
]
for llm in models:
print(f'\n{"=" * 60}')
print(f'Testing with model: {llm.provider} - {llm.model}')
print(f'{"=" * 60}\n')
agent = Agent(task='Click the button on the page', llm=llm)
# Create temporary directory that will stay alive during the test
with tempfile.TemporaryDirectory() as temp_dir:
# Create mock state message
mock_message = create_mock_state_message(temp_dir)
# Print the mock message content to see what it looks like
print('Mock state message:')
print(mock_message.content)
print('\n' + '=' * 50 + '\n')
agent.message_manager._set_message_with_type(mock_message, 'state')
messages = agent.message_manager.get_messages()
# Test with simple question
try:
response = await llm.ainvoke(messages, agent.AgentOutput)
logger.info(f'Response from {llm.provider}: {response.completion}')
logger.info(f'Actions: {str(response.completion.action)}')
except Exception as e:
logger.error(f'Error with {llm.provider}: {type(e).__name__}: {str(e)}')
print(f'\n{"=" * 60}\n')
if __name__ == '__main__':
import asyncio
asyncio.run(test_single_step())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/llm/tests/test_gemini_image.py | browser_use/llm/tests/test_gemini_image.py | import asyncio
import base64
import io
import random
from PIL import Image, ImageDraw, ImageFont
from browser_use.llm.google.chat import ChatGoogle
from browser_use.llm.google.serializer import GoogleMessageSerializer
from browser_use.llm.messages import (
BaseMessage,
ContentPartImageParam,
ContentPartTextParam,
ImageURL,
SystemMessage,
UserMessage,
)
def create_random_text_image(text: str = 'hello world', width: int = 4000, height: int = 4000) -> str:
# Create image with random background color
bg_color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
image = Image.new('RGB', (width, height), bg_color)
draw = ImageDraw.Draw(image)
# Try to use a default font, fallback to default if not available
try:
font = ImageFont.truetype('arial.ttf', 24)
except Exception:
font = ImageFont.load_default()
# Calculate text position to center it
bbox = draw.textbbox((0, 0), text, font=font)
text_width = bbox[2] - bbox[0]
text_height = bbox[3] - bbox[1]
x = (width - text_width) // 2
y = (height - text_height) // 2
# Draw text with contrasting color
text_color = (255 - bg_color[0], 255 - bg_color[1], 255 - bg_color[2])
draw.text((x, y), text, fill=text_color, font=font)
# Convert to base64
buffer = io.BytesIO()
image.save(buffer, format='JPEG')
img_data = base64.b64encode(buffer.getvalue()).decode()
return f'data:image/jpeg;base64,{img_data}'
async def test_gemini_image_vision():
"""Test Gemini's ability to see and describe images."""
# Create the LLM
llm = ChatGoogle(model='gemini-2.0-flash-exp')
# Create a random image with text
image_data_url = create_random_text_image('Hello Gemini! Can you see this text?')
# Create messages with image
messages: list[BaseMessage] = [
SystemMessage(content='You are a helpful assistant that can see and describe images.'),
UserMessage(
content=[
ContentPartTextParam(text='What do you see in this image? Please describe the text and any visual elements.'),
ContentPartImageParam(image_url=ImageURL(url=image_data_url)),
]
),
]
# Serialize messages for Google format
serializer = GoogleMessageSerializer()
formatted_messages, system_message = serializer.serialize_messages(messages)
print('Testing Gemini image vision...')
print(f'System message: {system_message}')
# Make the API call
try:
response = await llm.ainvoke(messages)
print('\n=== Gemini Response ===')
print(response.completion)
print(response.usage)
print('=======================')
except Exception as e:
print(f'Error calling Gemini: {e}')
print(f'Error type: {type(e)}')
if __name__ == '__main__':
asyncio.run(test_gemini_image_vision())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/llm/tests/test_chat_models.py | browser_use/llm/tests/test_chat_models.py | import os
import pytest
from pydantic import BaseModel
from browser_use.llm import ChatAnthropic, ChatGoogle, ChatGroq, ChatOpenAI, ChatOpenRouter
from browser_use.llm.messages import ContentPartTextParam
# Optional OCI import
try:
from examples.models.oci_models import xai_llm
OCI_MODELS_AVAILABLE = True
except ImportError:
xai_llm = None
OCI_MODELS_AVAILABLE = False
class CapitalResponse(BaseModel):
"""Structured response for capital question"""
country: str
capital: str
class TestChatModels:
from browser_use.llm.messages import (
AssistantMessage,
BaseMessage,
SystemMessage,
UserMessage,
)
"""Test suite for all chat model implementations"""
# Test Constants
SYSTEM_MESSAGE = SystemMessage(content=[ContentPartTextParam(text='You are a helpful assistant.', type='text')])
FRANCE_QUESTION = UserMessage(content='What is the capital of France? Answer in one word.')
FRANCE_ANSWER = AssistantMessage(content='Paris')
GERMANY_QUESTION = UserMessage(content='What is the capital of Germany? Answer in one word.')
# Expected values
EXPECTED_GERMANY_CAPITAL = 'berlin'
EXPECTED_FRANCE_COUNTRY = 'france'
EXPECTED_FRANCE_CAPITAL = 'paris'
# Test messages for conversation
CONVERSATION_MESSAGES: list[BaseMessage] = [
SYSTEM_MESSAGE,
FRANCE_QUESTION,
FRANCE_ANSWER,
GERMANY_QUESTION,
]
# Test messages for structured output
STRUCTURED_MESSAGES: list[BaseMessage] = [UserMessage(content='What is the capital of France?')]
# OpenAI Tests
@pytest.fixture
def openrouter_chat(self):
"""Provides an initialized ChatOpenRouter client for tests."""
if not os.getenv('OPENROUTER_API_KEY'):
pytest.skip('OPENROUTER_API_KEY not set')
return ChatOpenRouter(model='openai/gpt-4o-mini', api_key=os.getenv('OPENROUTER_API_KEY'), temperature=0)
@pytest.mark.asyncio
async def test_openai_ainvoke_normal(self):
"""Test normal text response from OpenAI"""
# Skip if no API key
if not os.getenv('OPENAI_API_KEY'):
pytest.skip('OPENAI_API_KEY not set')
chat = ChatOpenAI(model='gpt-4o-mini', temperature=0)
response = await chat.ainvoke(self.CONVERSATION_MESSAGES)
completion = response.completion
assert isinstance(completion, str)
assert self.EXPECTED_GERMANY_CAPITAL in completion.lower()
@pytest.mark.asyncio
async def test_openai_ainvoke_structured(self):
"""Test structured output from OpenAI"""
# Skip if no API key
if not os.getenv('OPENAI_API_KEY'):
pytest.skip('OPENAI_API_KEY not set')
chat = ChatOpenAI(model='gpt-4o-mini', temperature=0)
response = await chat.ainvoke(self.STRUCTURED_MESSAGES, output_format=CapitalResponse)
completion = response.completion
assert isinstance(completion, CapitalResponse)
assert completion.country.lower() == self.EXPECTED_FRANCE_COUNTRY
assert completion.capital.lower() == self.EXPECTED_FRANCE_CAPITAL
# Anthropic Tests
@pytest.mark.asyncio
async def test_anthropic_ainvoke_normal(self):
"""Test normal text response from Anthropic"""
# Skip if no API key
if not os.getenv('ANTHROPIC_API_KEY'):
pytest.skip('ANTHROPIC_API_KEY not set')
chat = ChatAnthropic(model='claude-3-5-haiku-latest', max_tokens=100, temperature=0)
response = await chat.ainvoke(self.CONVERSATION_MESSAGES)
completion = response.completion
assert isinstance(completion, str)
assert self.EXPECTED_GERMANY_CAPITAL in completion.lower()
@pytest.mark.asyncio
async def test_anthropic_ainvoke_structured(self):
"""Test structured output from Anthropic"""
# Skip if no API key
if not os.getenv('ANTHROPIC_API_KEY'):
pytest.skip('ANTHROPIC_API_KEY not set')
chat = ChatAnthropic(model='claude-3-5-haiku-latest', max_tokens=100, temperature=0)
response = await chat.ainvoke(self.STRUCTURED_MESSAGES, output_format=CapitalResponse)
completion = response.completion
assert isinstance(completion, CapitalResponse)
assert completion.country.lower() == self.EXPECTED_FRANCE_COUNTRY
assert completion.capital.lower() == self.EXPECTED_FRANCE_CAPITAL
# Google Gemini Tests
@pytest.mark.asyncio
async def test_google_ainvoke_normal(self):
"""Test normal text response from Google Gemini"""
# Skip if no API key
if not os.getenv('GOOGLE_API_KEY'):
pytest.skip('GOOGLE_API_KEY not set')
chat = ChatGoogle(model='gemini-2.0-flash', api_key=os.getenv('GOOGLE_API_KEY'), temperature=0)
response = await chat.ainvoke(self.CONVERSATION_MESSAGES)
completion = response.completion
assert isinstance(completion, str)
assert self.EXPECTED_GERMANY_CAPITAL in completion.lower()
@pytest.mark.asyncio
async def test_google_ainvoke_structured(self):
"""Test structured output from Google Gemini"""
# Skip if no API key
if not os.getenv('GOOGLE_API_KEY'):
pytest.skip('GOOGLE_API_KEY not set')
chat = ChatGoogle(model='gemini-2.0-flash', api_key=os.getenv('GOOGLE_API_KEY'), temperature=0)
response = await chat.ainvoke(self.STRUCTURED_MESSAGES, output_format=CapitalResponse)
completion = response.completion
assert isinstance(completion, CapitalResponse)
assert completion.country.lower() == self.EXPECTED_FRANCE_COUNTRY
assert completion.capital.lower() == self.EXPECTED_FRANCE_CAPITAL
# Google Gemini with Vertex AI Tests
@pytest.mark.asyncio
async def test_google_vertex_ainvoke_normal(self):
"""Test normal text response from Google Gemini via Vertex AI"""
# Skip if no project ID
if not os.getenv('GOOGLE_CLOUD_PROJECT'):
pytest.skip('GOOGLE_CLOUD_PROJECT not set')
chat = ChatGoogle(
model='gemini-2.0-flash',
vertexai=True,
project=os.getenv('GOOGLE_CLOUD_PROJECT'),
location='us-central1',
temperature=0,
)
response = await chat.ainvoke(self.CONVERSATION_MESSAGES)
completion = response.completion
assert isinstance(completion, str)
assert self.EXPECTED_GERMANY_CAPITAL in completion.lower()
@pytest.mark.asyncio
async def test_google_vertex_ainvoke_structured(self):
"""Test structured output from Google Gemini via Vertex AI"""
# Skip if no project ID
if not os.getenv('GOOGLE_CLOUD_PROJECT'):
pytest.skip('GOOGLE_CLOUD_PROJECT not set')
chat = ChatGoogle(
model='gemini-2.0-flash',
vertexai=True,
project=os.getenv('GOOGLE_CLOUD_PROJECT'),
location='us-central1',
temperature=0,
)
response = await chat.ainvoke(self.STRUCTURED_MESSAGES, output_format=CapitalResponse)
completion = response.completion
assert isinstance(completion, CapitalResponse)
assert completion.country.lower() == self.EXPECTED_FRANCE_COUNTRY
assert completion.capital.lower() == self.EXPECTED_FRANCE_CAPITAL
# Groq Tests
@pytest.mark.asyncio
async def test_groq_ainvoke_normal(self):
"""Test normal text response from Groq"""
# Skip if no API key
if not os.getenv('GROQ_API_KEY'):
pytest.skip('GROQ_API_KEY not set')
chat = ChatGroq(model='meta-llama/llama-4-maverick-17b-128e-instruct', temperature=0)
response = await chat.ainvoke(self.CONVERSATION_MESSAGES)
completion = response.completion
assert isinstance(completion, str)
assert self.EXPECTED_GERMANY_CAPITAL in completion.lower()
@pytest.mark.asyncio
async def test_groq_ainvoke_structured(self):
"""Test structured output from Groq"""
# Skip if no API key
if not os.getenv('GROQ_API_KEY'):
pytest.skip('GROQ_API_KEY not set')
chat = ChatGroq(model='meta-llama/llama-4-maverick-17b-128e-instruct', temperature=0)
response = await chat.ainvoke(self.STRUCTURED_MESSAGES, output_format=CapitalResponse)
completion = response.completion
assert isinstance(completion, CapitalResponse)
assert completion.country.lower() == self.EXPECTED_FRANCE_COUNTRY
assert completion.capital.lower() == self.EXPECTED_FRANCE_CAPITAL
# OpenRouter Tests
@pytest.mark.asyncio
async def test_openrouter_ainvoke_normal(self):
"""Test normal text response from OpenRouter"""
# Skip if no API key
if not os.getenv('OPENROUTER_API_KEY'):
pytest.skip('OPENROUTER_API_KEY not set')
chat = ChatOpenRouter(model='openai/gpt-4o-mini', api_key=os.getenv('OPENROUTER_API_KEY'), temperature=0)
response = await chat.ainvoke(self.CONVERSATION_MESSAGES)
completion = response.completion
assert isinstance(completion, str)
assert self.EXPECTED_GERMANY_CAPITAL in completion.lower()
@pytest.mark.asyncio
async def test_openrouter_ainvoke_structured(self):
"""Test structured output from OpenRouter"""
# Skip if no API key
if not os.getenv('OPENROUTER_API_KEY'):
pytest.skip('OPENROUTER_API_KEY not set')
chat = ChatOpenRouter(model='openai/gpt-4o-mini', api_key=os.getenv('OPENROUTER_API_KEY'), temperature=0)
response = await chat.ainvoke(self.STRUCTURED_MESSAGES, output_format=CapitalResponse)
completion = response.completion
assert isinstance(completion, CapitalResponse)
assert completion.country.lower() == self.EXPECTED_FRANCE_COUNTRY
assert completion.capital.lower() == self.EXPECTED_FRANCE_CAPITAL
# OCI Raw Tests
@pytest.fixture
def oci_raw_chat(self):
"""Provides an initialized ChatOCIRaw client for tests."""
# Skip if OCI models not available
if not OCI_MODELS_AVAILABLE:
pytest.skip('OCI models not available - install with pip install "browser-use[oci]"')
# Skip if OCI credentials not available - check for config file existence
try:
import oci
oci.config.from_file('~/.oci/config', 'DEFAULT')
except Exception:
pytest.skip('OCI credentials not available')
# Skip if using placeholder config
if xai_llm and hasattr(xai_llm, 'compartment_id') and 'example' in xai_llm.compartment_id.lower():
pytest.skip('OCI model using placeholder configuration - set real credentials')
return xai_llm # xai or cohere
@pytest.mark.asyncio
async def test_oci_raw_ainvoke_normal(self, oci_raw_chat):
"""Test normal text response from OCI Raw"""
response = await oci_raw_chat.ainvoke(self.CONVERSATION_MESSAGES)
completion = response.completion
assert isinstance(completion, str)
assert self.EXPECTED_GERMANY_CAPITAL in completion.lower()
@pytest.mark.asyncio
async def test_oci_raw_ainvoke_structured(self, oci_raw_chat):
"""Test structured output from OCI Raw"""
response = await oci_raw_chat.ainvoke(self.STRUCTURED_MESSAGES, output_format=CapitalResponse)
completion = response.completion
assert isinstance(completion, CapitalResponse)
assert completion.country.lower() == self.EXPECTED_FRANCE_COUNTRY
assert completion.capital.lower() == self.EXPECTED_FRANCE_CAPITAL
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/llm/tests/test_anthropic_cache.py | browser_use/llm/tests/test_anthropic_cache.py | import logging
from typing import cast
from browser_use.agent.service import Agent
from browser_use.llm.anthropic.chat import ChatAnthropic
from browser_use.llm.anthropic.serializer import AnthropicMessageSerializer, NonSystemMessage
from browser_use.llm.messages import (
AssistantMessage,
BaseMessage,
ContentPartImageParam,
ContentPartTextParam,
Function,
ImageURL,
SystemMessage,
ToolCall,
UserMessage,
)
logger = logging.getLogger(__name__)
class TestAnthropicCache:
"""Comprehensive test for Anthropic cache serialization."""
def test_cache_basic_functionality(self):
"""Test basic cache functionality for all message types."""
# Test cache with different message types
messages: list[BaseMessage] = [
SystemMessage(content='System message!', cache=True),
UserMessage(content='User message!', cache=True),
AssistantMessage(content='Assistant message!', cache=False),
]
anthropic_messages, system_message = AnthropicMessageSerializer.serialize_messages(messages)
assert len(anthropic_messages) == 2
assert isinstance(system_message, list)
assert isinstance(anthropic_messages[0]['content'], list)
assert isinstance(anthropic_messages[1]['content'], str)
# Test cache with assistant message
agent_messages: list[BaseMessage] = [
SystemMessage(content='System message!'),
UserMessage(content='User message!'),
AssistantMessage(content='Assistant message!', cache=True),
]
anthropic_messages, system_message = AnthropicMessageSerializer.serialize_messages(agent_messages)
assert isinstance(system_message, str)
assert isinstance(anthropic_messages[0]['content'], str)
assert isinstance(anthropic_messages[1]['content'], list)
def test_cache_with_tool_calls(self):
"""Test cache functionality with tool calls."""
tool_call = ToolCall(id='test_id', function=Function(name='test_function', arguments='{"arg": "value"}'))
# Assistant with tool calls and cache
assistant_with_tools = AssistantMessage(content='Assistant with tools', tool_calls=[tool_call], cache=True)
messages, _ = AnthropicMessageSerializer.serialize_messages([assistant_with_tools])
assert len(messages) == 1
assert isinstance(messages[0]['content'], list)
# Should have both text and tool_use blocks
assert len(messages[0]['content']) >= 2
def test_cache_with_images(self):
"""Test cache functionality with image content."""
user_with_image = UserMessage(
content=[
ContentPartTextParam(text='Here is an image:', type='text'),
ContentPartImageParam(image_url=ImageURL(url='https://example.com/image.jpg'), type='image_url'),
],
cache=True,
)
messages, _ = AnthropicMessageSerializer.serialize_messages([user_with_image])
assert len(messages) == 1
assert isinstance(messages[0]['content'], list)
assert len(messages[0]['content']) == 2
def test_cache_with_base64_images(self):
"""Test cache functionality with base64 images."""
base64_url = 'data:image/jpeg;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg=='
user_with_base64 = UserMessage(
content=[
ContentPartTextParam(text='Base64 image:', type='text'),
ContentPartImageParam(image_url=ImageURL(url=base64_url), type='image_url'),
],
cache=True,
)
messages, _ = AnthropicMessageSerializer.serialize_messages([user_with_base64])
assert len(messages) == 1
assert isinstance(messages[0]['content'], list)
def test_cache_content_types(self):
"""Test different content types with cache."""
# String content with cache should become list
user_string_cached = UserMessage(content='String message', cache=True)
messages, _ = AnthropicMessageSerializer.serialize_messages([user_string_cached])
assert isinstance(messages[0]['content'], list)
# String content without cache should remain string
user_string_no_cache = UserMessage(content='String message', cache=False)
messages, _ = AnthropicMessageSerializer.serialize_messages([user_string_no_cache])
assert isinstance(messages[0]['content'], str)
# List content maintains list format regardless of cache
user_list_cached = UserMessage(content=[ContentPartTextParam(text='List message', type='text')], cache=True)
messages, _ = AnthropicMessageSerializer.serialize_messages([user_list_cached])
assert isinstance(messages[0]['content'], list)
user_list_no_cache = UserMessage(content=[ContentPartTextParam(text='List message', type='text')], cache=False)
messages, _ = AnthropicMessageSerializer.serialize_messages([user_list_no_cache])
assert isinstance(messages[0]['content'], list)
def test_assistant_cache_empty_content(self):
"""Test AssistantMessage with empty content and cache."""
# With cache
assistant_empty_cached = AssistantMessage(content=None, cache=True)
messages, _ = AnthropicMessageSerializer.serialize_messages([assistant_empty_cached])
assert len(messages) == 1
assert isinstance(messages[0]['content'], list)
# Without cache
assistant_empty_no_cache = AssistantMessage(content=None, cache=False)
messages, _ = AnthropicMessageSerializer.serialize_messages([assistant_empty_no_cache])
assert len(messages) == 1
assert isinstance(messages[0]['content'], str)
def test_mixed_cache_scenarios(self):
"""Test various combinations of cached and non-cached messages."""
messages_list: list[BaseMessage] = [
SystemMessage(content='System with cache', cache=True),
UserMessage(content='User with cache', cache=True),
AssistantMessage(content='Assistant without cache', cache=False),
UserMessage(content='User without cache', cache=False),
AssistantMessage(content='Assistant with cache', cache=True),
]
serialized_messages, system_message = AnthropicMessageSerializer.serialize_messages(messages_list)
# Check system message is cached (becomes list)
assert isinstance(system_message, list)
# Check serialized messages
assert len(serialized_messages) == 4
# User with cache should be string (cache was cleaned to False by _clean_cache_messages)
# Only the last message with cache=True remains cached
assert isinstance(serialized_messages[0]['content'], str)
# Assistant without cache should be string
assert isinstance(serialized_messages[1]['content'], str)
# User without cache should be string
assert isinstance(serialized_messages[2]['content'], str)
# Assistant with cache should be list (this is the last cached message)
assert isinstance(serialized_messages[3]['content'], list)
def test_system_message_cache_behavior(self):
"""Test SystemMessage specific cache behavior."""
# With cache
system_cached = SystemMessage(content='System message with cache', cache=True)
result = AnthropicMessageSerializer.serialize(system_cached)
assert isinstance(result, SystemMessage)
# Test serialization to string format
serialized_content = AnthropicMessageSerializer._serialize_content_to_str(result.content, use_cache=True)
assert isinstance(serialized_content, list)
# Without cache
system_no_cache = SystemMessage(content='System message without cache', cache=False)
result = AnthropicMessageSerializer.serialize(system_no_cache)
assert isinstance(result, SystemMessage)
serialized_content = AnthropicMessageSerializer._serialize_content_to_str(result.content, use_cache=False)
assert isinstance(serialized_content, str)
def test_agent_messages_integration(self):
"""Test integration with actual agent messages."""
agent = Agent(task='Hello, world!', llm=ChatAnthropic(''))
messages = agent.message_manager.get_messages()
anthropic_messages, system_message = AnthropicMessageSerializer.serialize_messages(messages)
# System message should be properly handled
assert system_message is not None
def test_cache_cleaning_last_message_only(self):
"""Test that only the last cache=True message remains cached."""
# Create multiple messages with cache=True
messages_list: list[BaseMessage] = [
UserMessage(content='First user message', cache=True),
AssistantMessage(content='First assistant message', cache=True),
UserMessage(content='Second user message', cache=True),
AssistantMessage(content='Second assistant message', cache=False),
UserMessage(content='Third user message', cache=True), # This should be the only one cached
]
# Test the cleaning method directly (only accepts non-system messages)
normal_messages = cast(list[NonSystemMessage], [msg for msg in messages_list if not isinstance(msg, SystemMessage)])
cleaned_messages = AnthropicMessageSerializer._clean_cache_messages(normal_messages)
# Verify only the last cache=True message remains cached
assert not cleaned_messages[0].cache # First user message should be uncached
assert not cleaned_messages[1].cache # First assistant message should be uncached
assert not cleaned_messages[2].cache # Second user message should be uncached
assert not cleaned_messages[3].cache # Second assistant message was already uncached
assert cleaned_messages[4].cache # Third user message should remain cached
# Test through serialize_messages
serialized_messages, system_message = AnthropicMessageSerializer.serialize_messages(messages_list)
# Count how many messages have list content (indicating caching)
cached_content_count = sum(1 for msg in serialized_messages if isinstance(msg['content'], list))
# Only one message should have cached content
assert cached_content_count == 1
# The last message should be the cached one
assert isinstance(serialized_messages[-1]['content'], list)
def test_cache_cleaning_with_system_message(self):
"""Test that system messages are not affected by cache cleaning logic."""
messages_list: list[BaseMessage] = [
SystemMessage(content='System message', cache=True), # System messages are handled separately
UserMessage(content='First user message', cache=True),
AssistantMessage(content='Assistant message', cache=True), # This should be the only normal message cached
]
# Test through serialize_messages to see the full integration
serialized_messages, system_message = AnthropicMessageSerializer.serialize_messages(messages_list)
# System message should be cached
assert isinstance(system_message, list)
# Only one normal message should have cached content (the last one)
cached_content_count = sum(1 for msg in serialized_messages if isinstance(msg['content'], list))
assert cached_content_count == 1
# The last message should be the cached one
assert isinstance(serialized_messages[-1]['content'], list)
def test_cache_cleaning_no_cached_messages(self):
"""Test that messages without cache=True are not affected."""
normal_messages_list = [
UserMessage(content='User message 1', cache=False),
AssistantMessage(content='Assistant message 1', cache=False),
UserMessage(content='User message 2', cache=False),
]
cleaned_messages = AnthropicMessageSerializer._clean_cache_messages(normal_messages_list)
# All messages should remain uncached
for msg in cleaned_messages:
assert not msg.cache
def test_max_4_cache_blocks(self):
"""Test that the max number of cache blocks is 4."""
agent = Agent(task='Hello, world!', llm=ChatAnthropic(''))
messages = agent.message_manager.get_messages()
anthropic_messages, system_message = AnthropicMessageSerializer.serialize_messages(messages)
logger.info(anthropic_messages)
logger.info(system_message)
def test_cache_only_last_block_in_message(self):
"""Test that only the LAST block in a message gets cache_control when cache=True."""
# Test UserMessage with multiple text parts
user_msg = UserMessage(
content=[
ContentPartTextParam(text='Part 1', type='text'),
ContentPartTextParam(text='Part 2', type='text'),
ContentPartTextParam(text='Part 3', type='text'),
],
cache=True,
)
serialized = AnthropicMessageSerializer.serialize(user_msg)
assert isinstance(serialized['content'], list)
content_blocks = serialized['content']
# Count blocks with cache_control
# Note: content_blocks are dicts at runtime despite type annotations
cache_count = sum(1 for block in content_blocks if block.get('cache_control') is not None) # type: ignore[attr-defined]
assert cache_count == 1, f'Expected 1 cache_control block, got {cache_count}'
# Verify it's the last block
assert content_blocks[-1].get('cache_control') is not None # type: ignore[attr-defined]
assert content_blocks[0].get('cache_control') is None # type: ignore[attr-defined]
assert content_blocks[1].get('cache_control') is None # type: ignore[attr-defined]
def test_cache_only_last_tool_call(self):
"""Test that only the LAST tool_use block gets cache_control."""
tool_calls = [
ToolCall(id='id1', function=Function(name='func1', arguments='{"arg": "1"}')),
ToolCall(id='id2', function=Function(name='func2', arguments='{"arg": "2"}')),
ToolCall(id='id3', function=Function(name='func3', arguments='{"arg": "3"}')),
]
assistant_msg = AssistantMessage(content=None, tool_calls=tool_calls, cache=True)
serialized = AnthropicMessageSerializer.serialize(assistant_msg)
assert isinstance(serialized['content'], list)
content_blocks = serialized['content']
# Count tool_use blocks with cache_control
# Note: content_blocks are dicts at runtime despite type annotations
cache_count = sum(1 for block in content_blocks if block.get('cache_control') is not None) # type: ignore[attr-defined]
assert cache_count == 1, f'Expected 1 cache_control block, got {cache_count}'
# Verify it's the last tool_use block
assert content_blocks[-1].get('cache_control') is not None # type: ignore[attr-defined]
assert content_blocks[0].get('cache_control') is None # type: ignore[attr-defined]
assert content_blocks[1].get('cache_control') is None # type: ignore[attr-defined]
def test_cache_assistant_with_content_and_tools(self):
"""Test AssistantMessage with both content and tool calls - only last tool gets cache."""
tool_call = ToolCall(id='test_id', function=Function(name='test_function', arguments='{"arg": "value"}'))
assistant_msg = AssistantMessage(
content=[
ContentPartTextParam(text='Text part 1', type='text'),
ContentPartTextParam(text='Text part 2', type='text'),
],
tool_calls=[tool_call],
cache=True,
)
serialized = AnthropicMessageSerializer.serialize(assistant_msg)
assert isinstance(serialized['content'], list)
content_blocks = serialized['content']
# Should have 2 text blocks + 1 tool_use block = 3 blocks total
assert len(content_blocks) == 3
# Only the last block (tool_use) should have cache_control
# Note: content_blocks are dicts at runtime despite type annotations
cache_count = sum(1 for block in content_blocks if block.get('cache_control') is not None) # type: ignore[attr-defined]
assert cache_count == 1, f'Expected 1 cache_control block, got {cache_count}'
assert content_blocks[-1].get('cache_control') is not None # type: ignore[attr-defined] # Last tool_use block
assert content_blocks[0].get('cache_control') is None # type: ignore[attr-defined] # First text block
assert content_blocks[1].get('cache_control') is None # type: ignore[attr-defined] # Second text block
if __name__ == '__main__':
test_instance = TestAnthropicCache()
test_instance.test_cache_basic_functionality()
test_instance.test_cache_with_tool_calls()
test_instance.test_cache_with_images()
test_instance.test_cache_with_base64_images()
test_instance.test_cache_content_types()
test_instance.test_assistant_cache_empty_content()
test_instance.test_mixed_cache_scenarios()
test_instance.test_system_message_cache_behavior()
test_instance.test_agent_messages_integration()
test_instance.test_cache_cleaning_last_message_only()
test_instance.test_cache_cleaning_with_system_message()
test_instance.test_cache_cleaning_no_cached_messages()
test_instance.test_max_4_cache_blocks()
test_instance.test_cache_only_last_block_in_message()
test_instance.test_cache_only_last_tool_call()
test_instance.test_cache_assistant_with_content_and_tools()
print('All cache tests passed!')
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/llm/tests/test_groq_loop.py | browser_use/llm/tests/test_groq_loop.py | import asyncio
from browser_use.llm import ContentText
from browser_use.llm.groq.chat import ChatGroq
from browser_use.llm.messages import SystemMessage, UserMessage
llm = ChatGroq(
model='meta-llama/llama-4-maverick-17b-128e-instruct',
temperature=0.5,
)
# llm = ChatOpenAI(model='gpt-4.1-mini')
async def main():
from pydantic import BaseModel
from browser_use.tokens.service import TokenCost
tk = TokenCost().register_llm(llm)
class Output(BaseModel):
reasoning: str
answer: str
message = [
SystemMessage(content='You are a helpful assistant that can answer questions and help with tasks.'),
UserMessage(
content=[
ContentText(
text=r"Why is the sky blue? write exactly this into reasoning make sure to output ' with exactly like in the input : "
),
ContentText(
text="""
The user's request is to find the lowest priced women's plus size one piece swimsuit in color black with a customer rating of at least 5 on Kohls.com. I am currently on the homepage of Kohls. The page has a search bar and various category links. To begin, I need to navigate to the women's section and search for swimsuits. I will start by clicking on the 'Women' category link."""
),
]
),
]
for i in range(10):
print('-' * 50)
print(f'start loop {i}')
response = await llm.ainvoke(message, output_format=Output)
completion = response.completion
print(f'start reasoning: {completion.reasoning}')
print(f'answer: {completion.answer}')
print('-' * 50)
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/llm/azure/chat.py | browser_use/llm/azure/chat.py | import os
from dataclasses import dataclass
from typing import Any, TypeVar, overload
import httpx
from openai import APIConnectionError, APIStatusError, RateLimitError
from openai import AsyncAzureOpenAI as AsyncAzureOpenAIClient
from openai.types.responses import Response
from openai.types.shared import ChatModel
from pydantic import BaseModel
from browser_use.llm.exceptions import ModelProviderError, ModelRateLimitError
from browser_use.llm.messages import BaseMessage
from browser_use.llm.openai.like import ChatOpenAILike
from browser_use.llm.openai.responses_serializer import ResponsesAPIMessageSerializer
from browser_use.llm.schema import SchemaOptimizer
from browser_use.llm.views import ChatInvokeCompletion, ChatInvokeUsage
T = TypeVar('T', bound=BaseModel)
# List of models that only support the Responses API
RESPONSES_API_ONLY_MODELS: list[str] = [
'gpt-5.1-codex',
'gpt-5.1-codex-mini',
'gpt-5.1-codex-max',
'gpt-5-codex',
'codex-mini-latest',
'computer-use-preview',
]
@dataclass
class ChatAzureOpenAI(ChatOpenAILike):
"""
A class for to interact with any provider using the OpenAI API schema.
Args:
model (str): The name of the OpenAI model to use. Defaults to "not-provided".
api_key (Optional[str]): The API key to use. Defaults to "not-provided".
use_responses_api (bool): If True, use the Responses API instead of Chat Completions API.
This is required for certain models like gpt-5.1-codex-mini on Azure OpenAI with
api_version >= 2025-03-01-preview. Set to 'auto' to automatically detect based on model.
"""
# Model configuration
model: str | ChatModel
# Client initialization parameters
api_key: str | None = None
api_version: str | None = '2024-12-01-preview'
azure_endpoint: str | None = None
azure_deployment: str | None = None
base_url: str | None = None
azure_ad_token: str | None = None
azure_ad_token_provider: Any | None = None
default_headers: dict[str, str] | None = None
default_query: dict[str, Any] | None = None
# Responses API support
use_responses_api: bool | str = 'auto' # True, False, or 'auto'
client: AsyncAzureOpenAIClient | None = None
@property
def provider(self) -> str:
return 'azure'
def _get_client_params(self) -> dict[str, Any]:
_client_params: dict[str, Any] = {}
self.api_key = self.api_key or os.getenv('AZURE_OPENAI_KEY') or os.getenv('AZURE_OPENAI_API_KEY')
self.azure_endpoint = self.azure_endpoint or os.getenv('AZURE_OPENAI_ENDPOINT')
self.azure_deployment = self.azure_deployment or os.getenv('AZURE_OPENAI_DEPLOYMENT')
params_mapping = {
'api_key': self.api_key,
'api_version': self.api_version,
'organization': self.organization,
'azure_endpoint': self.azure_endpoint,
'azure_deployment': self.azure_deployment,
'base_url': self.base_url,
'azure_ad_token': self.azure_ad_token,
'azure_ad_token_provider': self.azure_ad_token_provider,
'http_client': self.http_client,
}
if self.default_headers is not None:
_client_params['default_headers'] = self.default_headers
if self.default_query is not None:
_client_params['default_query'] = self.default_query
_client_params.update({k: v for k, v in params_mapping.items() if v is not None})
return _client_params
def get_client(self) -> AsyncAzureOpenAIClient:
"""
Returns an asynchronous OpenAI client.
Returns:
AsyncAzureOpenAIClient: An instance of the asynchronous OpenAI client.
"""
if self.client:
return self.client
_client_params: dict[str, Any] = self._get_client_params()
if self.http_client:
_client_params['http_client'] = self.http_client
else:
# Create a new async HTTP client with custom limits
_client_params['http_client'] = httpx.AsyncClient(
limits=httpx.Limits(max_connections=20, max_keepalive_connections=6)
)
self.client = AsyncAzureOpenAIClient(**_client_params)
return self.client
def _should_use_responses_api(self) -> bool:
"""Determine if the Responses API should be used based on model and settings."""
if isinstance(self.use_responses_api, bool):
return self.use_responses_api
# Auto-detect: use Responses API for models that require it
model_lower = str(self.model).lower()
for responses_only_model in RESPONSES_API_ONLY_MODELS:
if responses_only_model.lower() in model_lower:
return True
return False
def _get_usage_from_responses(self, response: Response) -> ChatInvokeUsage | None:
"""Extract usage information from a Responses API response."""
if response.usage is None:
return None
# Get cached tokens from input_tokens_details if available
cached_tokens = None
if response.usage.input_tokens_details is not None:
cached_tokens = getattr(response.usage.input_tokens_details, 'cached_tokens', None)
return ChatInvokeUsage(
prompt_tokens=response.usage.input_tokens,
prompt_cached_tokens=cached_tokens,
prompt_cache_creation_tokens=None,
prompt_image_tokens=None,
completion_tokens=response.usage.output_tokens,
total_tokens=response.usage.total_tokens,
)
async def _ainvoke_responses_api(
self, messages: list[BaseMessage], output_format: type[T] | None = None, **kwargs: Any
) -> ChatInvokeCompletion[T] | ChatInvokeCompletion[str]:
"""
Invoke the model using the Responses API.
This is used for models that require the Responses API (e.g., gpt-5.1-codex-mini)
or when use_responses_api is explicitly set to True.
"""
# Serialize messages to Responses API input format
input_messages = ResponsesAPIMessageSerializer.serialize_messages(messages)
try:
model_params: dict[str, Any] = {
'model': self.model,
'input': input_messages,
}
if self.temperature is not None:
model_params['temperature'] = self.temperature
if self.max_completion_tokens is not None:
model_params['max_output_tokens'] = self.max_completion_tokens
if self.top_p is not None:
model_params['top_p'] = self.top_p
if self.service_tier is not None:
model_params['service_tier'] = self.service_tier
# Handle reasoning models
if self.reasoning_models and any(str(m).lower() in str(self.model).lower() for m in self.reasoning_models):
# For reasoning models, use reasoning parameter instead of reasoning_effort
model_params['reasoning'] = {'effort': self.reasoning_effort}
model_params.pop('temperature', None)
if output_format is None:
# Return string response
response = await self.get_client().responses.create(**model_params)
usage = self._get_usage_from_responses(response)
return ChatInvokeCompletion(
completion=response.output_text or '',
usage=usage,
stop_reason=response.status if response.status else None,
)
else:
# For structured output, use the text.format parameter
json_schema = SchemaOptimizer.create_optimized_json_schema(
output_format,
remove_min_items=self.remove_min_items_from_schema,
remove_defaults=self.remove_defaults_from_schema,
)
model_params['text'] = {
'format': {
'type': 'json_schema',
'name': 'agent_output',
'strict': True,
'schema': json_schema,
}
}
# Add JSON schema to system prompt if requested
if self.add_schema_to_system_prompt and input_messages and input_messages[0].get('role') == 'system':
schema_text = f'\n<json_schema>\n{json_schema}\n</json_schema>'
content = input_messages[0].get('content', '')
if isinstance(content, str):
input_messages[0]['content'] = content + schema_text
elif isinstance(content, list):
input_messages[0]['content'] = list(content) + [{'type': 'input_text', 'text': schema_text}]
model_params['input'] = input_messages
if self.dont_force_structured_output:
# Remove the text format parameter if not forcing structured output
model_params.pop('text', None)
response = await self.get_client().responses.create(**model_params)
if not response.output_text:
raise ModelProviderError(
message='Failed to parse structured output from model response',
status_code=500,
model=self.name,
)
usage = self._get_usage_from_responses(response)
parsed = output_format.model_validate_json(response.output_text)
return ChatInvokeCompletion(
completion=parsed,
usage=usage,
stop_reason=response.status if response.status else None,
)
except RateLimitError as e:
raise ModelRateLimitError(message=e.message, model=self.name) from e
except APIConnectionError as e:
raise ModelProviderError(message=str(e), model=self.name) from e
except APIStatusError as e:
raise ModelProviderError(message=e.message, status_code=e.status_code, model=self.name) from e
except Exception as e:
raise ModelProviderError(message=str(e), model=self.name) from e
@overload
async def ainvoke(
self, messages: list[BaseMessage], output_format: None = None, **kwargs: Any
) -> ChatInvokeCompletion[str]: ...
@overload
async def ainvoke(self, messages: list[BaseMessage], output_format: type[T], **kwargs: Any) -> ChatInvokeCompletion[T]: ...
async def ainvoke(
self, messages: list[BaseMessage], output_format: type[T] | None = None, **kwargs: Any
) -> ChatInvokeCompletion[T] | ChatInvokeCompletion[str]:
"""
Invoke the model with the given messages.
This method routes to either the Responses API or the Chat Completions API
based on the model and settings.
Args:
messages: List of chat messages
output_format: Optional Pydantic model class for structured output
Returns:
Either a string response or an instance of output_format
"""
if self._should_use_responses_api():
return await self._ainvoke_responses_api(messages, output_format, **kwargs)
else:
# Use the parent class implementation (Chat Completions API)
return await super().ainvoke(messages, output_format, **kwargs)
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/llm/groq/serializer.py | browser_use/llm/groq/serializer.py | from typing import overload
from groq.types.chat import (
ChatCompletionAssistantMessageParam,
ChatCompletionContentPartImageParam,
ChatCompletionContentPartTextParam,
ChatCompletionMessageParam,
ChatCompletionMessageToolCallParam,
ChatCompletionSystemMessageParam,
ChatCompletionUserMessageParam,
)
from groq.types.chat.chat_completion_content_part_image_param import ImageURL
from groq.types.chat.chat_completion_message_tool_call_param import Function
from browser_use.llm.messages import (
AssistantMessage,
BaseMessage,
ContentPartImageParam,
ContentPartRefusalParam,
ContentPartTextParam,
SystemMessage,
ToolCall,
UserMessage,
)
class GroqMessageSerializer:
"""Serializer for converting between custom message types and OpenAI message param types."""
@staticmethod
def _serialize_content_part_text(part: ContentPartTextParam) -> ChatCompletionContentPartTextParam:
return ChatCompletionContentPartTextParam(text=part.text, type='text')
@staticmethod
def _serialize_content_part_image(part: ContentPartImageParam) -> ChatCompletionContentPartImageParam:
return ChatCompletionContentPartImageParam(
image_url=ImageURL(url=part.image_url.url, detail=part.image_url.detail),
type='image_url',
)
@staticmethod
def _serialize_user_content(
content: str | list[ContentPartTextParam | ContentPartImageParam],
) -> str | list[ChatCompletionContentPartTextParam | ChatCompletionContentPartImageParam]:
"""Serialize content for user messages (text and images allowed)."""
if isinstance(content, str):
return content
serialized_parts: list[ChatCompletionContentPartTextParam | ChatCompletionContentPartImageParam] = []
for part in content:
if part.type == 'text':
serialized_parts.append(GroqMessageSerializer._serialize_content_part_text(part))
elif part.type == 'image_url':
serialized_parts.append(GroqMessageSerializer._serialize_content_part_image(part))
return serialized_parts
@staticmethod
def _serialize_system_content(
content: str | list[ContentPartTextParam],
) -> str:
"""Serialize content for system messages (text only)."""
if isinstance(content, str):
return content
serialized_parts: list[str] = []
for part in content:
if part.type == 'text':
serialized_parts.append(GroqMessageSerializer._serialize_content_part_text(part)['text'])
return '\n'.join(serialized_parts)
@staticmethod
def _serialize_assistant_content(
content: str | list[ContentPartTextParam | ContentPartRefusalParam] | None,
) -> str | None:
"""Serialize content for assistant messages (text and refusal allowed)."""
if content is None:
return None
if isinstance(content, str):
return content
serialized_parts: list[str] = []
for part in content:
if part.type == 'text':
serialized_parts.append(GroqMessageSerializer._serialize_content_part_text(part)['text'])
return '\n'.join(serialized_parts)
@staticmethod
def _serialize_tool_call(tool_call: ToolCall) -> ChatCompletionMessageToolCallParam:
return ChatCompletionMessageToolCallParam(
id=tool_call.id,
function=Function(name=tool_call.function.name, arguments=tool_call.function.arguments),
type='function',
)
# endregion
# region - Serialize overloads
@overload
@staticmethod
def serialize(message: UserMessage) -> ChatCompletionUserMessageParam: ...
@overload
@staticmethod
def serialize(message: SystemMessage) -> ChatCompletionSystemMessageParam: ...
@overload
@staticmethod
def serialize(message: AssistantMessage) -> ChatCompletionAssistantMessageParam: ...
@staticmethod
def serialize(message: BaseMessage) -> ChatCompletionMessageParam:
"""Serialize a custom message to an OpenAI message param."""
if isinstance(message, UserMessage):
user_result: ChatCompletionUserMessageParam = {
'role': 'user',
'content': GroqMessageSerializer._serialize_user_content(message.content),
}
if message.name is not None:
user_result['name'] = message.name
return user_result
elif isinstance(message, SystemMessage):
system_result: ChatCompletionSystemMessageParam = {
'role': 'system',
'content': GroqMessageSerializer._serialize_system_content(message.content),
}
if message.name is not None:
system_result['name'] = message.name
return system_result
elif isinstance(message, AssistantMessage):
# Handle content serialization
content = None
if message.content is not None:
content = GroqMessageSerializer._serialize_assistant_content(message.content)
assistant_result: ChatCompletionAssistantMessageParam = {'role': 'assistant'}
# Only add content if it's not None
if content is not None:
assistant_result['content'] = content
if message.name is not None:
assistant_result['name'] = message.name
if message.tool_calls:
assistant_result['tool_calls'] = [GroqMessageSerializer._serialize_tool_call(tc) for tc in message.tool_calls]
return assistant_result
else:
raise ValueError(f'Unknown message type: {type(message)}')
@staticmethod
def serialize_messages(messages: list[BaseMessage]) -> list[ChatCompletionMessageParam]:
return [GroqMessageSerializer.serialize(m) for m in messages]
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/llm/groq/chat.py | browser_use/llm/groq/chat.py | import logging
from dataclasses import dataclass
from typing import Any, Literal, TypeVar, overload
from groq import (
APIError,
APIResponseValidationError,
APIStatusError,
AsyncGroq,
NotGiven,
RateLimitError,
Timeout,
)
from groq.types.chat import ChatCompletion, ChatCompletionToolChoiceOptionParam, ChatCompletionToolParam
from groq.types.chat.completion_create_params import (
ResponseFormatResponseFormatJsonSchema,
ResponseFormatResponseFormatJsonSchemaJsonSchema,
)
from httpx import URL
from pydantic import BaseModel
from browser_use.llm.base import BaseChatModel, ChatInvokeCompletion
from browser_use.llm.exceptions import ModelProviderError, ModelRateLimitError
from browser_use.llm.groq.parser import try_parse_groq_failed_generation
from browser_use.llm.groq.serializer import GroqMessageSerializer
from browser_use.llm.messages import BaseMessage
from browser_use.llm.schema import SchemaOptimizer
from browser_use.llm.views import ChatInvokeUsage
GroqVerifiedModels = Literal[
'meta-llama/llama-4-maverick-17b-128e-instruct',
'meta-llama/llama-4-scout-17b-16e-instruct',
'qwen/qwen3-32b',
'moonshotai/kimi-k2-instruct',
'openai/gpt-oss-20b',
'openai/gpt-oss-120b',
]
JsonSchemaModels = [
'meta-llama/llama-4-maverick-17b-128e-instruct',
'meta-llama/llama-4-scout-17b-16e-instruct',
'openai/gpt-oss-20b',
'openai/gpt-oss-120b',
]
ToolCallingModels = [
'moonshotai/kimi-k2-instruct',
]
T = TypeVar('T', bound=BaseModel)
logger = logging.getLogger(__name__)
@dataclass
class ChatGroq(BaseChatModel):
"""
A wrapper around AsyncGroq that implements the BaseLLM protocol.
"""
# Model configuration
model: GroqVerifiedModels | str
# Model params
temperature: float | None = None
service_tier: Literal['auto', 'on_demand', 'flex'] | None = None
top_p: float | None = None
seed: int | None = None
# Client initialization parameters
api_key: str | None = None
base_url: str | URL | None = None
timeout: float | Timeout | NotGiven | None = None
max_retries: int = 10 # Increase default retries for automation reliability
def get_client(self) -> AsyncGroq:
return AsyncGroq(api_key=self.api_key, base_url=self.base_url, timeout=self.timeout, max_retries=self.max_retries)
@property
def provider(self) -> str:
return 'groq'
@property
def name(self) -> str:
return str(self.model)
def _get_usage(self, response: ChatCompletion) -> ChatInvokeUsage | None:
usage = (
ChatInvokeUsage(
prompt_tokens=response.usage.prompt_tokens,
completion_tokens=response.usage.completion_tokens,
total_tokens=response.usage.total_tokens,
prompt_cached_tokens=None, # Groq doesn't support cached tokens
prompt_cache_creation_tokens=None,
prompt_image_tokens=None,
)
if response.usage is not None
else None
)
return usage
@overload
async def ainvoke(
self, messages: list[BaseMessage], output_format: None = None, **kwargs: Any
) -> ChatInvokeCompletion[str]: ...
@overload
async def ainvoke(self, messages: list[BaseMessage], output_format: type[T], **kwargs: Any) -> ChatInvokeCompletion[T]: ...
async def ainvoke(
self, messages: list[BaseMessage], output_format: type[T] | None = None, **kwargs: Any
) -> ChatInvokeCompletion[T] | ChatInvokeCompletion[str]:
groq_messages = GroqMessageSerializer.serialize_messages(messages)
try:
if output_format is None:
return await self._invoke_regular_completion(groq_messages)
else:
return await self._invoke_structured_output(groq_messages, output_format)
except RateLimitError as e:
raise ModelRateLimitError(message=e.response.text, status_code=e.response.status_code, model=self.name) from e
except APIResponseValidationError as e:
raise ModelProviderError(message=e.response.text, status_code=e.response.status_code, model=self.name) from e
except APIStatusError as e:
if output_format is None:
raise ModelProviderError(message=e.response.text, status_code=e.response.status_code, model=self.name) from e
else:
try:
logger.debug(f'Groq failed generation: {e.response.text}; fallback to manual parsing')
parsed_response = try_parse_groq_failed_generation(e, output_format)
logger.debug('Manual error parsing successful โ
')
return ChatInvokeCompletion(
completion=parsed_response,
usage=None, # because this is a hacky way to get the outputs
# TODO: @groq needs to fix their parsers and validators
)
except Exception as _:
raise ModelProviderError(message=str(e), status_code=e.response.status_code, model=self.name) from e
except APIError as e:
raise ModelProviderError(message=e.message, model=self.name) from e
except Exception as e:
raise ModelProviderError(message=str(e), model=self.name) from e
async def _invoke_regular_completion(self, groq_messages) -> ChatInvokeCompletion[str]:
"""Handle regular completion without structured output."""
chat_completion = await self.get_client().chat.completions.create(
messages=groq_messages,
model=self.model,
service_tier=self.service_tier,
temperature=self.temperature,
top_p=self.top_p,
seed=self.seed,
)
usage = self._get_usage(chat_completion)
return ChatInvokeCompletion(
completion=chat_completion.choices[0].message.content or '',
usage=usage,
)
async def _invoke_structured_output(self, groq_messages, output_format: type[T]) -> ChatInvokeCompletion[T]:
"""Handle structured output using either tool calling or JSON schema."""
schema = SchemaOptimizer.create_optimized_json_schema(output_format)
if self.model in ToolCallingModels:
response = await self._invoke_with_tool_calling(groq_messages, output_format, schema)
else:
response = await self._invoke_with_json_schema(groq_messages, output_format, schema)
if not response.choices[0].message.content:
raise ModelProviderError(
message='No content in response',
status_code=500,
model=self.name,
)
parsed_response = output_format.model_validate_json(response.choices[0].message.content)
usage = self._get_usage(response)
return ChatInvokeCompletion(
completion=parsed_response,
usage=usage,
)
async def _invoke_with_tool_calling(self, groq_messages, output_format: type[T], schema) -> ChatCompletion:
"""Handle structured output using tool calling."""
tool = ChatCompletionToolParam(
function={
'name': output_format.__name__,
'description': f'Extract information in the format of {output_format.__name__}',
'parameters': schema,
},
type='function',
)
tool_choice: ChatCompletionToolChoiceOptionParam = 'required'
return await self.get_client().chat.completions.create(
model=self.model,
messages=groq_messages,
temperature=self.temperature,
top_p=self.top_p,
seed=self.seed,
tools=[tool],
tool_choice=tool_choice,
service_tier=self.service_tier,
)
async def _invoke_with_json_schema(self, groq_messages, output_format: type[T], schema) -> ChatCompletion:
"""Handle structured output using JSON schema."""
return await self.get_client().chat.completions.create(
model=self.model,
messages=groq_messages,
temperature=self.temperature,
top_p=self.top_p,
seed=self.seed,
response_format=ResponseFormatResponseFormatJsonSchema(
json_schema=ResponseFormatResponseFormatJsonSchemaJsonSchema(
name=output_format.__name__,
description='Model output schema',
schema=schema,
),
type='json_schema',
),
service_tier=self.service_tier,
)
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/llm/groq/parser.py | browser_use/llm/groq/parser.py | import json
import logging
import re
from typing import TypeVar
from groq import APIStatusError
from pydantic import BaseModel
logger = logging.getLogger(__name__)
T = TypeVar('T', bound=BaseModel)
class ParseFailedGenerationError(Exception):
pass
def try_parse_groq_failed_generation(
error: APIStatusError,
output_format: type[T],
) -> T:
"""Extract JSON from model output, handling both plain JSON and code-block-wrapped JSON."""
try:
content = error.body['error']['failed_generation'] # type: ignore
# If content is wrapped in code blocks, extract just the JSON part
if '```' in content:
# Find the JSON content between code blocks
content = content.split('```')[1]
# Remove language identifier if present (e.g., 'json\n')
if '\n' in content:
content = content.split('\n', 1)[1]
# remove html-like tags before the first { and after the last }
# This handles cases like <|header_start|>assistant<|header_end|> and <function=AgentOutput>
# Only remove content before { if content doesn't already start with {
if not content.strip().startswith('{'):
content = re.sub(r'^.*?(?=\{)', '', content, flags=re.DOTALL)
# Remove common HTML-like tags and patterns at the end, but be more conservative
# Look for patterns like </function>, <|header_start|>, etc. after the JSON
content = re.sub(r'\}(\s*<[^>]*>.*?$)', '}', content, flags=re.DOTALL)
content = re.sub(r'\}(\s*<\|[^|]*\|>.*?$)', '}', content, flags=re.DOTALL)
# Handle extra characters after the JSON, including stray braces
# Find the position of the last } that would close the main JSON object
content = content.strip()
if content.endswith('}'):
# Try to parse and see if we get valid JSON
try:
json.loads(content)
except json.JSONDecodeError:
# If parsing fails, try to find the correct end of the JSON
# by counting braces and removing anything after the balanced JSON
brace_count = 0
last_valid_pos = -1
for i, char in enumerate(content):
if char == '{':
brace_count += 1
elif char == '}':
brace_count -= 1
if brace_count == 0:
last_valid_pos = i + 1
break
if last_valid_pos > 0:
content = content[:last_valid_pos]
# Fix control characters in JSON strings before parsing
# This handles cases where literal control characters appear in JSON values
content = _fix_control_characters_in_json(content)
# Parse the cleaned content
result_dict = json.loads(content)
# some models occasionally respond with a list containing one dict: https://github.com/browser-use/browser-use/issues/1458
if isinstance(result_dict, list) and len(result_dict) == 1 and isinstance(result_dict[0], dict):
result_dict = result_dict[0]
logger.debug(f'Successfully parsed model output: {result_dict}')
return output_format.model_validate(result_dict)
except KeyError as e:
raise ParseFailedGenerationError(e) from e
except json.JSONDecodeError as e:
logger.warning(f'Failed to parse model output: {content} {str(e)}')
raise ValueError(f'Could not parse response. {str(e)}')
except Exception as e:
raise ParseFailedGenerationError(error.response.text) from e
def _fix_control_characters_in_json(content: str) -> str:
"""Fix control characters in JSON string values to make them valid JSON."""
try:
# First try to parse as-is to see if it's already valid
json.loads(content)
return content
except json.JSONDecodeError:
pass
# More sophisticated approach: only escape control characters inside string values
# while preserving JSON structure formatting
result = []
i = 0
in_string = False
escaped = False
while i < len(content):
char = content[i]
if not in_string:
# Outside of string - check if we're entering a string
if char == '"':
in_string = True
result.append(char)
else:
# Inside string - handle escaping and control characters
if escaped:
# Previous character was backslash, so this character is escaped
result.append(char)
escaped = False
elif char == '\\':
# This is an escape character
result.append(char)
escaped = True
elif char == '"':
# End of string
result.append(char)
in_string = False
elif char == '\n':
# Literal newline inside string - escape it
result.append('\\n')
elif char == '\r':
# Literal carriage return inside string - escape it
result.append('\\r')
elif char == '\t':
# Literal tab inside string - escape it
result.append('\\t')
elif char == '\b':
# Literal backspace inside string - escape it
result.append('\\b')
elif char == '\f':
# Literal form feed inside string - escape it
result.append('\\f')
elif ord(char) < 32:
# Other control characters inside string - convert to unicode escape
result.append(f'\\u{ord(char):04x}')
else:
# Normal character inside string
result.append(char)
i += 1
return ''.join(result)
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/llm/ollama/serializer.py | browser_use/llm/ollama/serializer.py | import base64
import json
from typing import Any, overload
from ollama._types import Image, Message
from browser_use.llm.messages import (
AssistantMessage,
BaseMessage,
SystemMessage,
ToolCall,
UserMessage,
)
class OllamaMessageSerializer:
"""Serializer for converting between custom message types and Ollama message types."""
@staticmethod
def _extract_text_content(content: Any) -> str:
"""Extract text content from message content, ignoring images."""
if content is None:
return ''
if isinstance(content, str):
return content
text_parts: list[str] = []
for part in content:
if hasattr(part, 'type'):
if part.type == 'text':
text_parts.append(part.text)
elif part.type == 'refusal':
text_parts.append(f'[Refusal] {part.refusal}')
# Skip image parts as they're handled separately
return '\n'.join(text_parts)
@staticmethod
def _extract_images(content: Any) -> list[Image]:
"""Extract images from message content."""
if content is None or isinstance(content, str):
return []
images: list[Image] = []
for part in content:
if hasattr(part, 'type') and part.type == 'image_url':
url = part.image_url.url
if url.startswith('data:'):
# Handle base64 encoded images
# Format: data:image/jpeg;base64,<data>
_, data = url.split(',', 1)
# Decode base64 to bytes
image_bytes = base64.b64decode(data)
images.append(Image(value=image_bytes))
else:
# Handle URL images (Ollama will download them)
images.append(Image(value=url))
return images
@staticmethod
def _serialize_tool_calls(tool_calls: list[ToolCall]) -> list[Message.ToolCall]:
"""Convert browser-use ToolCalls to Ollama ToolCalls."""
ollama_tool_calls: list[Message.ToolCall] = []
for tool_call in tool_calls:
# Parse arguments from JSON string to dict for Ollama
try:
arguments_dict = json.loads(tool_call.function.arguments)
except json.JSONDecodeError:
# If parsing fails, wrap in a dict
arguments_dict = {'arguments': tool_call.function.arguments}
ollama_tool_call = Message.ToolCall(
function=Message.ToolCall.Function(name=tool_call.function.name, arguments=arguments_dict)
)
ollama_tool_calls.append(ollama_tool_call)
return ollama_tool_calls
# region - Serialize overloads
@overload
@staticmethod
def serialize(message: UserMessage) -> Message: ...
@overload
@staticmethod
def serialize(message: SystemMessage) -> Message: ...
@overload
@staticmethod
def serialize(message: AssistantMessage) -> Message: ...
@staticmethod
def serialize(message: BaseMessage) -> Message:
"""Serialize a custom message to an Ollama Message."""
if isinstance(message, UserMessage):
text_content = OllamaMessageSerializer._extract_text_content(message.content)
images = OllamaMessageSerializer._extract_images(message.content)
ollama_message = Message(
role='user',
content=text_content if text_content else None,
)
if images:
ollama_message.images = images
return ollama_message
elif isinstance(message, SystemMessage):
text_content = OllamaMessageSerializer._extract_text_content(message.content)
return Message(
role='system',
content=text_content if text_content else None,
)
elif isinstance(message, AssistantMessage):
# Handle content
text_content = None
if message.content is not None:
text_content = OllamaMessageSerializer._extract_text_content(message.content)
ollama_message = Message(
role='assistant',
content=text_content if text_content else None,
)
# Handle tool calls
if message.tool_calls:
ollama_message.tool_calls = OllamaMessageSerializer._serialize_tool_calls(message.tool_calls)
return ollama_message
else:
raise ValueError(f'Unknown message type: {type(message)}')
@staticmethod
def serialize_messages(messages: list[BaseMessage]) -> list[Message]:
"""Serialize a list of browser_use messages to Ollama Messages."""
return [OllamaMessageSerializer.serialize(m) for m in messages]
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/llm/ollama/chat.py | browser_use/llm/ollama/chat.py | from collections.abc import Mapping
from dataclasses import dataclass
from typing import Any, TypeVar, overload
import httpx
from ollama import AsyncClient as OllamaAsyncClient
from ollama import Options
from pydantic import BaseModel
from browser_use.llm.base import BaseChatModel
from browser_use.llm.exceptions import ModelProviderError
from browser_use.llm.messages import BaseMessage
from browser_use.llm.ollama.serializer import OllamaMessageSerializer
from browser_use.llm.views import ChatInvokeCompletion
T = TypeVar('T', bound=BaseModel)
@dataclass
class ChatOllama(BaseChatModel):
"""
A wrapper around Ollama's chat model.
"""
model: str
# # Model params
# TODO (matic): Why is this commented out?
# temperature: float | None = None
# Client initialization parameters
host: str | None = None
timeout: float | httpx.Timeout | None = None
client_params: dict[str, Any] | None = None
ollama_options: Mapping[str, Any] | Options | None = None
# Static
@property
def provider(self) -> str:
return 'ollama'
def _get_client_params(self) -> dict[str, Any]:
"""Prepare client parameters dictionary."""
return {
'host': self.host,
'timeout': self.timeout,
'client_params': self.client_params,
}
def get_client(self) -> OllamaAsyncClient:
"""
Returns an OllamaAsyncClient client.
"""
return OllamaAsyncClient(host=self.host, timeout=self.timeout, **self.client_params or {})
@property
def name(self) -> str:
return self.model
@overload
async def ainvoke(
self, messages: list[BaseMessage], output_format: None = None, **kwargs: Any
) -> ChatInvokeCompletion[str]: ...
@overload
async def ainvoke(self, messages: list[BaseMessage], output_format: type[T], **kwargs: Any) -> ChatInvokeCompletion[T]: ...
async def ainvoke(
self, messages: list[BaseMessage], output_format: type[T] | None = None, **kwargs: Any
) -> ChatInvokeCompletion[T] | ChatInvokeCompletion[str]:
ollama_messages = OllamaMessageSerializer.serialize_messages(messages)
try:
if output_format is None:
response = await self.get_client().chat(
model=self.model,
messages=ollama_messages,
options=self.ollama_options,
)
return ChatInvokeCompletion(completion=response.message.content or '', usage=None)
else:
schema = output_format.model_json_schema()
response = await self.get_client().chat(
model=self.model,
messages=ollama_messages,
format=schema,
options=self.ollama_options,
)
completion = response.message.content or ''
if output_format is not None:
completion = output_format.model_validate_json(completion)
return ChatInvokeCompletion(completion=completion, usage=None)
except Exception as e:
raise ModelProviderError(message=str(e), model=self.name) from e
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/llm/mistral/chat.py | browser_use/llm/mistral/chat.py | from __future__ import annotations
import json
import logging
import os
from collections.abc import Mapping
from dataclasses import dataclass
from typing import Any, TypeVar, cast, overload
import httpx
from pydantic import BaseModel
from browser_use.llm.base import BaseChatModel
from browser_use.llm.exceptions import ModelProviderError, ModelRateLimitError
from browser_use.llm.messages import BaseMessage
from browser_use.llm.mistral.schema import MistralSchemaOptimizer
from browser_use.llm.openai.serializer import OpenAIMessageSerializer
from browser_use.llm.views import ChatInvokeCompletion, ChatInvokeUsage
logger = logging.getLogger(__name__)
T = TypeVar('T', bound=BaseModel)
@dataclass
class ChatMistral(BaseChatModel):
"""Mistral /chat/completions wrapper with schema sanitization."""
model: str = 'mistral-medium-latest'
# Generation params
temperature: float | None = 0.2
top_p: float | None = None
max_tokens: int | None = 4096 # Mistral expects max_tokens (not max_completion_tokens)
seed: int | None = None
safe_prompt: bool = False
# Client params
api_key: str | None = None # Falls back to MISTRAL_API_KEY
base_url: str | httpx.URL = 'https://api.mistral.ai/v1'
timeout: float | httpx.Timeout | None = None
max_retries: int = 5
default_headers: Mapping[str, str] | None = None
default_query: Mapping[str, object] | None = None
http_client: httpx.AsyncClient | None = None
@property
def provider(self) -> str:
return 'mistral'
@property
def name(self) -> str:
return str(self.model)
def _get_api_key(self) -> str:
key = self.api_key or os.getenv('MISTRAL_API_KEY')
if not key:
raise ModelProviderError('Missing Mistral API key', status_code=401, model=self.name)
return key
def _get_base_url(self) -> str:
return str(os.getenv('MISTRAL_BASE_URL', self.base_url)).rstrip('/')
def _auth_headers(self) -> dict[str, str]:
headers = {
'Authorization': f'Bearer {self._get_api_key()}',
'Content-Type': 'application/json',
}
if self.default_headers:
headers.update(self.default_headers)
return headers
def _client(self) -> httpx.AsyncClient:
if self.http_client:
return self.http_client
if not hasattr(self, '_cached_client'):
transport = httpx.AsyncHTTPTransport(retries=self.max_retries)
client_args: dict[str, Any] = {'transport': transport}
if self.timeout is not None:
client_args['timeout'] = self.timeout
self._cached_client = httpx.AsyncClient(**client_args)
return self._cached_client
def _serialize_messages(self, messages: list[BaseMessage]) -> list[dict[str, Any]]:
raw_messages: list[dict[str, Any]] = []
for msg in OpenAIMessageSerializer.serialize_messages(messages):
dumper = getattr(msg, 'model_dump', None)
if callable(dumper):
raw_messages.append(cast(dict[str, Any], dumper(exclude_none=True)))
else:
raw_messages.append(cast(dict[str, Any], msg)) # type: ignore[arg-type]
return raw_messages
def _query_params(self) -> dict[str, str] | None:
if self.default_query is None:
return None
return {k: str(v) for k, v in self.default_query.items() if v is not None}
def _build_usage(self, usage: dict[str, Any] | None) -> ChatInvokeUsage | None:
if not usage:
return None
return ChatInvokeUsage(
prompt_tokens=usage.get('prompt_tokens', 0),
prompt_cached_tokens=None,
prompt_cache_creation_tokens=None,
prompt_image_tokens=None,
completion_tokens=usage.get('completion_tokens', 0),
total_tokens=usage.get('total_tokens', 0),
)
def _extract_content_text(self, choice: dict[str, Any]) -> str:
message = choice.get('message', {})
content = message.get('content')
if isinstance(content, list):
text_parts = []
for part in content:
if isinstance(part, dict):
if part.get('type') == 'text' and 'text' in part:
text_parts.append(part.get('text', ''))
elif 'content' in part:
text_parts.append(str(part['content']))
return ''.join(text_parts)
if isinstance(content, dict):
return json.dumps(content)
return content or ''
def _parse_error(self, response: httpx.Response) -> str:
try:
body = response.json()
if isinstance(body, dict):
for key in ('message', 'error', 'detail'):
val = body.get(key)
if isinstance(val, dict):
val = val.get('message') or val.get('detail')
if val:
return str(val)
except Exception:
pass
return response.text
async def _post(self, payload: dict[str, Any]) -> dict[str, Any]:
url = f'{self._get_base_url()}/chat/completions'
client = self._client()
response = await client.post(url, headers=self._auth_headers(), json=payload, params=self._query_params())
if response.status_code >= 400:
message = self._parse_error(response)
if response.status_code == 429:
raise ModelRateLimitError(message=message, status_code=response.status_code, model=self.name)
raise ModelProviderError(message=message, status_code=response.status_code, model=self.name)
try:
return response.json()
except Exception as e:
raise ModelProviderError(message=f'Failed to parse Mistral response: {e}', model=self.name) from e
@overload
async def ainvoke(
self, messages: list[BaseMessage], output_format: None = None, **kwargs: Any
) -> ChatInvokeCompletion[str]: ...
@overload
async def ainvoke(self, messages: list[BaseMessage], output_format: type[T], **kwargs: Any) -> ChatInvokeCompletion[T]: ...
async def ainvoke(
self, messages: list[BaseMessage], output_format: type[T] | None = None, **kwargs: Any
) -> ChatInvokeCompletion[T] | ChatInvokeCompletion[str]:
payload: dict[str, Any] = {
'model': self.model,
'messages': self._serialize_messages(messages),
}
# Generation params
if self.temperature is not None:
payload['temperature'] = self.temperature
if self.top_p is not None:
payload['top_p'] = self.top_p
if self.max_tokens is not None:
payload['max_tokens'] = self.max_tokens
if self.seed is not None:
payload['seed'] = self.seed
if self.safe_prompt:
payload['safe_prompt'] = self.safe_prompt
# Structured output path
if output_format is not None:
payload['response_format'] = {
'type': 'json_schema',
'json_schema': {
'name': 'agent_output',
'strict': True,
'schema': MistralSchemaOptimizer.create_mistral_compatible_schema(output_format),
},
}
try:
data = await self._post(payload)
choices = data.get('choices', [])
if not choices:
raise ModelProviderError('Mistral returned no choices', model=self.name)
content_text = self._extract_content_text(choices[0])
usage = self._build_usage(data.get('usage'))
if output_format is None:
return ChatInvokeCompletion(completion=content_text, usage=usage)
parsed = output_format.model_validate_json(content_text)
return ChatInvokeCompletion(completion=parsed, usage=usage)
except ModelRateLimitError:
raise
except ModelProviderError:
raise
except Exception as e:
logger.error(f'Mistral invocation failed: {e}')
raise ModelProviderError(message=str(e), model=self.name) from e
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/llm/mistral/schema.py | browser_use/llm/mistral/schema.py | """Schema optimizer for Mistral-compatible JSON schemas."""
from __future__ import annotations
from typing import Any
from pydantic import BaseModel
from browser_use.llm.schema import SchemaOptimizer
class MistralSchemaOptimizer:
"""Create JSON schemas that avoid Mistral's unsupported keywords."""
UNSUPPORTED_KEYWORDS = {'minLength', 'maxLength', 'pattern', 'format'}
@classmethod
def create_mistral_compatible_schema(cls, model: type[BaseModel]) -> dict[str, Any]:
"""
Build a Mistral-safe schema by starting with the standard optimized schema and
then stripping unsupported validation keywords recursively.
"""
base_schema = SchemaOptimizer.create_optimized_json_schema(model)
return cls._strip_unsupported_keywords(base_schema)
@classmethod
def _strip_unsupported_keywords(cls, obj: Any) -> Any:
if isinstance(obj, dict):
return {
key: cls._strip_unsupported_keywords(value) for key, value in obj.items() if key not in cls.UNSUPPORTED_KEYWORDS
}
if isinstance(obj, list):
return [cls._strip_unsupported_keywords(item) for item in obj]
return obj
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/llm/mistral/__init__.py | browser_use/llm/mistral/__init__.py | """Mistral chat model integration."""
from browser_use.llm.mistral.chat import ChatMistral
__all__ = ['ChatMistral']
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/llm/anthropic/serializer.py | browser_use/llm/anthropic/serializer.py | import json
from typing import overload
from anthropic.types import (
Base64ImageSourceParam,
CacheControlEphemeralParam,
ImageBlockParam,
MessageParam,
TextBlockParam,
ToolUseBlockParam,
URLImageSourceParam,
)
from browser_use.llm.messages import (
AssistantMessage,
BaseMessage,
ContentPartImageParam,
ContentPartTextParam,
SupportedImageMediaType,
SystemMessage,
UserMessage,
)
NonSystemMessage = UserMessage | AssistantMessage
class AnthropicMessageSerializer:
"""Serializer for converting between custom message types and Anthropic message param types."""
@staticmethod
def _is_base64_image(url: str) -> bool:
"""Check if the URL is a base64 encoded image."""
return url.startswith('data:image/')
@staticmethod
def _parse_base64_url(url: str) -> tuple[SupportedImageMediaType, str]:
"""Parse a base64 data URL to extract media type and data."""
# Format: data:image/jpeg;base64,<data>
if not url.startswith('data:'):
raise ValueError(f'Invalid base64 URL: {url}')
header, data = url.split(',', 1)
media_type = header.split(';')[0].replace('data:', '')
# Ensure it's a supported media type
supported_types = ['image/jpeg', 'image/png', 'image/gif', 'image/webp']
if media_type not in supported_types:
# Default to jpeg if not recognized
media_type = 'image/jpeg'
return media_type, data # type: ignore
@staticmethod
def _serialize_cache_control(use_cache: bool) -> CacheControlEphemeralParam | None:
"""Serialize cache control."""
if use_cache:
return CacheControlEphemeralParam(type='ephemeral')
return None
@staticmethod
def _serialize_content_part_text(part: ContentPartTextParam, use_cache: bool) -> TextBlockParam:
"""Convert a text content part to Anthropic's TextBlockParam."""
return TextBlockParam(
text=part.text, type='text', cache_control=AnthropicMessageSerializer._serialize_cache_control(use_cache)
)
@staticmethod
def _serialize_content_part_image(part: ContentPartImageParam) -> ImageBlockParam:
"""Convert an image content part to Anthropic's ImageBlockParam."""
url = part.image_url.url
if AnthropicMessageSerializer._is_base64_image(url):
# Handle base64 encoded images
media_type, data = AnthropicMessageSerializer._parse_base64_url(url)
return ImageBlockParam(
source=Base64ImageSourceParam(
data=data,
media_type=media_type,
type='base64',
),
type='image',
)
else:
# Handle URL images
return ImageBlockParam(source=URLImageSourceParam(url=url, type='url'), type='image')
@staticmethod
def _serialize_content_to_str(
content: str | list[ContentPartTextParam], use_cache: bool = False
) -> list[TextBlockParam] | str:
"""Serialize content to a string."""
cache_control = AnthropicMessageSerializer._serialize_cache_control(use_cache)
if isinstance(content, str):
if cache_control:
return [TextBlockParam(text=content, type='text', cache_control=cache_control)]
else:
return content
serialized_blocks: list[TextBlockParam] = []
for i, part in enumerate(content):
is_last = i == len(content) - 1
if part.type == 'text':
serialized_blocks.append(
AnthropicMessageSerializer._serialize_content_part_text(part, use_cache=use_cache and is_last)
)
return serialized_blocks
@staticmethod
def _serialize_content(
content: str | list[ContentPartTextParam | ContentPartImageParam],
use_cache: bool = False,
) -> str | list[TextBlockParam | ImageBlockParam]:
"""Serialize content to Anthropic format."""
if isinstance(content, str):
if use_cache:
return [TextBlockParam(text=content, type='text', cache_control=CacheControlEphemeralParam(type='ephemeral'))]
else:
return content
serialized_blocks: list[TextBlockParam | ImageBlockParam] = []
for i, part in enumerate(content):
is_last = i == len(content) - 1
if part.type == 'text':
serialized_blocks.append(
AnthropicMessageSerializer._serialize_content_part_text(part, use_cache=use_cache and is_last)
)
elif part.type == 'image_url':
serialized_blocks.append(AnthropicMessageSerializer._serialize_content_part_image(part))
return serialized_blocks
@staticmethod
def _serialize_tool_calls_to_content(tool_calls, use_cache: bool = False) -> list[ToolUseBlockParam]:
"""Convert tool calls to Anthropic's ToolUseBlockParam format."""
blocks: list[ToolUseBlockParam] = []
for i, tool_call in enumerate(tool_calls):
# Parse the arguments JSON string to object
try:
input_obj = json.loads(tool_call.function.arguments)
except json.JSONDecodeError:
# If arguments aren't valid JSON, use as string
input_obj = {'arguments': tool_call.function.arguments}
is_last = i == len(tool_calls) - 1
blocks.append(
ToolUseBlockParam(
id=tool_call.id,
input=input_obj,
name=tool_call.function.name,
type='tool_use',
cache_control=AnthropicMessageSerializer._serialize_cache_control(use_cache and is_last),
)
)
return blocks
# region - Serialize overloads
@overload
@staticmethod
def serialize(message: UserMessage) -> MessageParam: ...
@overload
@staticmethod
def serialize(message: SystemMessage) -> SystemMessage: ...
@overload
@staticmethod
def serialize(message: AssistantMessage) -> MessageParam: ...
@staticmethod
def serialize(message: BaseMessage) -> MessageParam | SystemMessage:
"""Serialize a custom message to an Anthropic MessageParam.
Note: Anthropic doesn't have a 'system' role. System messages should be
handled separately as the system parameter in the API call, not as a message.
If a SystemMessage is passed here, it will be converted to a user message.
"""
if isinstance(message, UserMessage):
content = AnthropicMessageSerializer._serialize_content(message.content, use_cache=message.cache)
return MessageParam(role='user', content=content)
elif isinstance(message, SystemMessage):
# Anthropic doesn't have system messages in the messages array
# System prompts are passed separately. Convert to user message.
return message
elif isinstance(message, AssistantMessage):
# Handle content and tool calls
blocks: list[TextBlockParam | ToolUseBlockParam] = []
# Add content blocks if present
if message.content is not None:
if isinstance(message.content, str):
# String content: only cache if it's the only/last block (no tool calls)
blocks.append(
TextBlockParam(
text=message.content,
type='text',
cache_control=AnthropicMessageSerializer._serialize_cache_control(
message.cache and not message.tool_calls
),
)
)
else:
# Process content parts (text and refusal)
for i, part in enumerate(message.content):
# Only last content block gets cache if there are no tool calls
is_last_content = (i == len(message.content) - 1) and not message.tool_calls
if part.type == 'text':
blocks.append(
AnthropicMessageSerializer._serialize_content_part_text(
part, use_cache=message.cache and is_last_content
)
)
# # Note: Anthropic doesn't have a specific refusal block type,
# # so we convert refusals to text blocks
# elif part.type == 'refusal':
# blocks.append(TextBlockParam(text=f'[Refusal] {part.refusal}', type='text'))
# Add tool use blocks if present
if message.tool_calls:
tool_blocks = AnthropicMessageSerializer._serialize_tool_calls_to_content(
message.tool_calls, use_cache=message.cache
)
blocks.extend(tool_blocks)
# If no content or tool calls, add empty text block
# (Anthropic requires at least one content block)
if not blocks:
blocks.append(
TextBlockParam(
text='', type='text', cache_control=AnthropicMessageSerializer._serialize_cache_control(message.cache)
)
)
# If caching is enabled or we have multiple blocks, return blocks as-is
# Otherwise, simplify single text blocks to plain string
if message.cache or len(blocks) > 1:
content = blocks
else:
# Only simplify when no caching and single block
single_block = blocks[0]
if single_block['type'] == 'text' and not single_block.get('cache_control'):
content = single_block['text']
else:
content = blocks
return MessageParam(
role='assistant',
content=content,
)
else:
raise ValueError(f'Unknown message type: {type(message)}')
@staticmethod
def _clean_cache_messages(messages: list[NonSystemMessage]) -> list[NonSystemMessage]:
"""Clean cache settings so only the last cache=True message remains cached.
Because of how Claude caching works, only the last cache message matters.
This method automatically removes cache=True from all messages except the last one.
Args:
messages: List of non-system messages to clean
Returns:
List of messages with cleaned cache settings
"""
if not messages:
return messages
# Create a copy to avoid modifying the original
cleaned_messages = [msg.model_copy(deep=True) for msg in messages]
# Find the last message with cache=True
last_cache_index = -1
for i in range(len(cleaned_messages) - 1, -1, -1):
if cleaned_messages[i].cache:
last_cache_index = i
break
# If we found a cached message, disable cache for all others
if last_cache_index != -1:
for i, msg in enumerate(cleaned_messages):
if i != last_cache_index and msg.cache:
# Set cache to False for all messages except the last cached one
msg.cache = False
return cleaned_messages
@staticmethod
def serialize_messages(messages: list[BaseMessage]) -> tuple[list[MessageParam], list[TextBlockParam] | str | None]:
"""Serialize a list of messages, extracting any system message.
Returns:
A tuple of (messages, system_message) where system_message is extracted
from any SystemMessage in the list.
"""
messages = [m.model_copy(deep=True) for m in messages]
# Separate system messages from normal messages
normal_messages: list[NonSystemMessage] = []
system_message: SystemMessage | None = None
for message in messages:
if isinstance(message, SystemMessage):
system_message = message
else:
normal_messages.append(message)
# Clean cache messages so only the last cache=True message remains cached
normal_messages = AnthropicMessageSerializer._clean_cache_messages(normal_messages)
# Serialize normal messages
serialized_messages: list[MessageParam] = []
for message in normal_messages:
serialized_messages.append(AnthropicMessageSerializer.serialize(message))
# Serialize system message
serialized_system_message: list[TextBlockParam] | str | None = None
if system_message:
serialized_system_message = AnthropicMessageSerializer._serialize_content_to_str(
system_message.content, use_cache=system_message.cache
)
return serialized_messages, serialized_system_message
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/llm/anthropic/chat.py | browser_use/llm/anthropic/chat.py | import json
from collections.abc import Mapping
from dataclasses import dataclass
from typing import Any, TypeVar, overload
import httpx
from anthropic import (
APIConnectionError,
APIStatusError,
AsyncAnthropic,
NotGiven,
RateLimitError,
omit,
)
from anthropic.types import CacheControlEphemeralParam, Message, ToolParam
from anthropic.types.model_param import ModelParam
from anthropic.types.text_block import TextBlock
from anthropic.types.tool_choice_tool_param import ToolChoiceToolParam
from httpx import Timeout
from pydantic import BaseModel
from browser_use.llm.anthropic.serializer import AnthropicMessageSerializer
from browser_use.llm.base import BaseChatModel
from browser_use.llm.exceptions import ModelProviderError, ModelRateLimitError
from browser_use.llm.messages import BaseMessage
from browser_use.llm.schema import SchemaOptimizer
from browser_use.llm.views import ChatInvokeCompletion, ChatInvokeUsage
T = TypeVar('T', bound=BaseModel)
@dataclass
class ChatAnthropic(BaseChatModel):
"""
A wrapper around Anthropic's chat model.
"""
# Model configuration
model: str | ModelParam
max_tokens: int = 8192
temperature: float | None = None
top_p: float | None = None
seed: int | None = None
# Client initialization parameters
api_key: str | None = None
auth_token: str | None = None
base_url: str | httpx.URL | None = None
timeout: float | Timeout | None | NotGiven = NotGiven()
max_retries: int = 10
default_headers: Mapping[str, str] | None = None
default_query: Mapping[str, object] | None = None
http_client: httpx.AsyncClient | None = None
# Static
@property
def provider(self) -> str:
return 'anthropic'
def _get_client_params(self) -> dict[str, Any]:
"""Prepare client parameters dictionary."""
# Define base client params
base_params = {
'api_key': self.api_key,
'auth_token': self.auth_token,
'base_url': self.base_url,
'timeout': self.timeout,
'max_retries': self.max_retries,
'default_headers': self.default_headers,
'default_query': self.default_query,
'http_client': self.http_client,
}
# Create client_params dict with non-None values and non-NotGiven values
client_params = {}
for k, v in base_params.items():
if v is not None and v is not NotGiven():
client_params[k] = v
return client_params
def _get_client_params_for_invoke(self):
"""Prepare client parameters dictionary for invoke."""
client_params = {}
if self.temperature is not None:
client_params['temperature'] = self.temperature
if self.max_tokens is not None:
client_params['max_tokens'] = self.max_tokens
if self.top_p is not None:
client_params['top_p'] = self.top_p
if self.seed is not None:
client_params['seed'] = self.seed
return client_params
def get_client(self) -> AsyncAnthropic:
"""
Returns an AsyncAnthropic client.
Returns:
AsyncAnthropic: An instance of the AsyncAnthropic client.
"""
client_params = self._get_client_params()
return AsyncAnthropic(**client_params)
@property
def name(self) -> str:
return str(self.model)
def _get_usage(self, response: Message) -> ChatInvokeUsage | None:
usage = ChatInvokeUsage(
prompt_tokens=response.usage.input_tokens
+ (
response.usage.cache_read_input_tokens or 0
), # Total tokens in Anthropic are a bit fucked, you have to add cached tokens to the prompt tokens
completion_tokens=response.usage.output_tokens,
total_tokens=response.usage.input_tokens + response.usage.output_tokens,
prompt_cached_tokens=response.usage.cache_read_input_tokens,
prompt_cache_creation_tokens=response.usage.cache_creation_input_tokens,
prompt_image_tokens=None,
)
return usage
@overload
async def ainvoke(
self, messages: list[BaseMessage], output_format: None = None, **kwargs: Any
) -> ChatInvokeCompletion[str]: ...
@overload
async def ainvoke(self, messages: list[BaseMessage], output_format: type[T], **kwargs: Any) -> ChatInvokeCompletion[T]: ...
async def ainvoke(
self, messages: list[BaseMessage], output_format: type[T] | None = None, **kwargs: Any
) -> ChatInvokeCompletion[T] | ChatInvokeCompletion[str]:
anthropic_messages, system_prompt = AnthropicMessageSerializer.serialize_messages(messages)
try:
if output_format is None:
# Normal completion without structured output
response = await self.get_client().messages.create(
model=self.model,
messages=anthropic_messages,
system=system_prompt or omit,
**self._get_client_params_for_invoke(),
)
# Ensure we have a valid Message object before accessing attributes
if not isinstance(response, Message):
raise ModelProviderError(
message=f'Unexpected response type from Anthropic API: {type(response).__name__}. Response: {str(response)[:200]}',
status_code=502,
model=self.name,
)
usage = self._get_usage(response)
# Extract text from the first content block
first_content = response.content[0]
if isinstance(first_content, TextBlock):
response_text = first_content.text
else:
# If it's not a text block, convert to string
response_text = str(first_content)
return ChatInvokeCompletion(
completion=response_text,
usage=usage,
stop_reason=response.stop_reason,
)
else:
# Use tool calling for structured output
# Create a tool that represents the output format
tool_name = output_format.__name__
schema = SchemaOptimizer.create_optimized_json_schema(output_format)
# Remove title from schema if present (Anthropic doesn't like it in parameters)
if 'title' in schema:
del schema['title']
tool = ToolParam(
name=tool_name,
description=f'Extract information in the format of {tool_name}',
input_schema=schema,
cache_control=CacheControlEphemeralParam(type='ephemeral'),
)
# Force the model to use this tool
tool_choice = ToolChoiceToolParam(type='tool', name=tool_name)
response = await self.get_client().messages.create(
model=self.model,
messages=anthropic_messages,
tools=[tool],
system=system_prompt or omit,
tool_choice=tool_choice,
**self._get_client_params_for_invoke(),
)
# Ensure we have a valid Message object before accessing attributes
if not isinstance(response, Message):
raise ModelProviderError(
message=f'Unexpected response type from Anthropic API: {type(response).__name__}. Response: {str(response)[:200]}',
status_code=502,
model=self.name,
)
usage = self._get_usage(response)
# Extract the tool use block
for content_block in response.content:
if hasattr(content_block, 'type') and content_block.type == 'tool_use':
# Parse the tool input as the structured output
try:
return ChatInvokeCompletion(
completion=output_format.model_validate(content_block.input),
usage=usage,
stop_reason=response.stop_reason,
)
except Exception as e:
# If validation fails, try to parse it as JSON first
if isinstance(content_block.input, str):
data = json.loads(content_block.input)
return ChatInvokeCompletion(
completion=output_format.model_validate(data),
usage=usage,
stop_reason=response.stop_reason,
)
raise e
# If no tool use block found, raise an error
raise ValueError('Expected tool use in response but none found')
except APIConnectionError as e:
raise ModelProviderError(message=e.message, model=self.name) from e
except RateLimitError as e:
raise ModelRateLimitError(message=e.message, model=self.name) from e
except APIStatusError as e:
raise ModelProviderError(message=e.message, status_code=e.status_code, model=self.name) from e
except Exception as e:
raise ModelProviderError(message=str(e), model=self.name) from e
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/llm/cerebras/serializer.py | browser_use/llm/cerebras/serializer.py | from __future__ import annotations
import json
from typing import Any, overload
from browser_use.llm.messages import (
AssistantMessage,
BaseMessage,
ContentPartImageParam,
ContentPartTextParam,
SystemMessage,
ToolCall,
UserMessage,
)
MessageDict = dict[str, Any]
class CerebrasMessageSerializer:
"""Serializer for converting browser-use messages to Cerebras messages."""
# -------- content ๅค็ --------------------------------------------------
@staticmethod
def _serialize_text_part(part: ContentPartTextParam) -> str:
return part.text
@staticmethod
def _serialize_image_part(part: ContentPartImageParam) -> dict[str, Any]:
url = part.image_url.url
if url.startswith('data:'):
return {'type': 'image_url', 'image_url': {'url': url}}
return {'type': 'image_url', 'image_url': {'url': url}}
@staticmethod
def _serialize_content(content: Any) -> str | list[dict[str, Any]]:
if content is None:
return ''
if isinstance(content, str):
return content
serialized: list[dict[str, Any]] = []
for part in content:
if part.type == 'text':
serialized.append({'type': 'text', 'text': CerebrasMessageSerializer._serialize_text_part(part)})
elif part.type == 'image_url':
serialized.append(CerebrasMessageSerializer._serialize_image_part(part))
elif part.type == 'refusal':
serialized.append({'type': 'text', 'text': f'[Refusal] {part.refusal}'})
return serialized
# -------- Tool-call ๅค็ -------------------------------------------------
@staticmethod
def _serialize_tool_calls(tool_calls: list[ToolCall]) -> list[dict[str, Any]]:
cerebras_tool_calls: list[dict[str, Any]] = []
for tc in tool_calls:
try:
arguments = json.loads(tc.function.arguments)
except json.JSONDecodeError:
arguments = {'arguments': tc.function.arguments}
cerebras_tool_calls.append(
{
'id': tc.id,
'type': 'function',
'function': {
'name': tc.function.name,
'arguments': arguments,
},
}
)
return cerebras_tool_calls
# -------- ๅๆกๆถๆฏๅบๅๅ -------------------------------------------------
@overload
@staticmethod
def serialize(message: UserMessage) -> MessageDict: ...
@overload
@staticmethod
def serialize(message: SystemMessage) -> MessageDict: ...
@overload
@staticmethod
def serialize(message: AssistantMessage) -> MessageDict: ...
@staticmethod
def serialize(message: BaseMessage) -> MessageDict:
if isinstance(message, UserMessage):
return {
'role': 'user',
'content': CerebrasMessageSerializer._serialize_content(message.content),
}
if isinstance(message, SystemMessage):
return {
'role': 'system',
'content': CerebrasMessageSerializer._serialize_content(message.content),
}
if isinstance(message, AssistantMessage):
msg: MessageDict = {
'role': 'assistant',
'content': CerebrasMessageSerializer._serialize_content(message.content),
}
if message.tool_calls:
msg['tool_calls'] = CerebrasMessageSerializer._serialize_tool_calls(message.tool_calls)
return msg
raise ValueError(f'Unknown message type: {type(message)}')
# -------- ๅ่กจๅบๅๅ -----------------------------------------------------
@staticmethod
def serialize_messages(messages: list[BaseMessage]) -> list[MessageDict]:
return [CerebrasMessageSerializer.serialize(m) for m in messages]
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/llm/cerebras/chat.py | browser_use/llm/cerebras/chat.py | from __future__ import annotations
from dataclasses import dataclass
from typing import Any, TypeVar, overload
import httpx
from openai import (
APIConnectionError,
APIError,
APIStatusError,
APITimeoutError,
AsyncOpenAI,
RateLimitError,
)
from openai.types.chat import ChatCompletion
from pydantic import BaseModel
from browser_use.llm.base import BaseChatModel
from browser_use.llm.cerebras.serializer import CerebrasMessageSerializer
from browser_use.llm.exceptions import ModelProviderError, ModelRateLimitError
from browser_use.llm.messages import BaseMessage
from browser_use.llm.views import ChatInvokeCompletion, ChatInvokeUsage
T = TypeVar('T', bound=BaseModel)
@dataclass
class ChatCerebras(BaseChatModel):
"""Cerebras inference wrapper (OpenAI-compatible)."""
model: str = 'llama3.1-8b'
# Generation parameters
max_tokens: int | None = 4096
temperature: float | None = 0.2
top_p: float | None = None
seed: int | None = None
# Connection parameters
api_key: str | None = None
base_url: str | httpx.URL | None = 'https://api.cerebras.ai/v1'
timeout: float | httpx.Timeout | None = None
client_params: dict[str, Any] | None = None
@property
def provider(self) -> str:
return 'cerebras'
def _client(self) -> AsyncOpenAI:
return AsyncOpenAI(
api_key=self.api_key,
base_url=self.base_url,
timeout=self.timeout,
**(self.client_params or {}),
)
@property
def name(self) -> str:
return self.model
def _get_usage(self, response: ChatCompletion) -> ChatInvokeUsage | None:
if response.usage is not None:
usage = ChatInvokeUsage(
prompt_tokens=response.usage.prompt_tokens,
prompt_cached_tokens=None,
prompt_cache_creation_tokens=None,
prompt_image_tokens=None,
completion_tokens=response.usage.completion_tokens,
total_tokens=response.usage.total_tokens,
)
else:
usage = None
return usage
@overload
async def ainvoke(
self,
messages: list[BaseMessage],
output_format: None = None,
**kwargs: Any,
) -> ChatInvokeCompletion[str]: ...
@overload
async def ainvoke(
self,
messages: list[BaseMessage],
output_format: type[T],
**kwargs: Any,
) -> ChatInvokeCompletion[T]: ...
async def ainvoke(
self,
messages: list[BaseMessage],
output_format: type[T] | None = None,
**kwargs: Any,
) -> ChatInvokeCompletion[T] | ChatInvokeCompletion[str]:
"""
Cerebras ainvoke supports:
1. Regular text/multi-turn conversation
2. JSON Output (response_format)
"""
client = self._client()
cerebras_messages = CerebrasMessageSerializer.serialize_messages(messages)
common: dict[str, Any] = {}
if self.temperature is not None:
common['temperature'] = self.temperature
if self.max_tokens is not None:
common['max_tokens'] = self.max_tokens
if self.top_p is not None:
common['top_p'] = self.top_p
if self.seed is not None:
common['seed'] = self.seed
# โ Regular multi-turn conversation/text output
if output_format is None:
try:
resp = await client.chat.completions.create( # type: ignore
model=self.model,
messages=cerebras_messages, # type: ignore
**common,
)
usage = self._get_usage(resp)
return ChatInvokeCompletion(
completion=resp.choices[0].message.content or '',
usage=usage,
)
except RateLimitError as e:
raise ModelRateLimitError(str(e), model=self.name) from e
except (APIError, APIConnectionError, APITimeoutError, APIStatusError) as e:
raise ModelProviderError(str(e), model=self.name) from e
except Exception as e:
raise ModelProviderError(str(e), model=self.name) from e
# โก JSON Output path (response_format)
if output_format is not None and hasattr(output_format, 'model_json_schema'):
try:
# For Cerebras, we'll use a simpler approach without response_format
# Instead, we'll ask the model to return JSON and parse it
import json
# Get the schema to guide the model
schema = output_format.model_json_schema()
schema_str = json.dumps(schema, indent=2)
# Create a prompt that asks for the specific JSON structure
json_prompt = f"""
Please respond with a JSON object that follows this exact schema:
{schema_str}
Your response must be valid JSON only, no other text.
"""
# Add or modify the last user message to include the JSON prompt
if cerebras_messages and cerebras_messages[-1]['role'] == 'user':
if isinstance(cerebras_messages[-1]['content'], str):
cerebras_messages[-1]['content'] += json_prompt
elif isinstance(cerebras_messages[-1]['content'], list):
cerebras_messages[-1]['content'].append({'type': 'text', 'text': json_prompt})
else:
# Add as a new user message
cerebras_messages.append({'role': 'user', 'content': json_prompt})
resp = await client.chat.completions.create( # type: ignore
model=self.model,
messages=cerebras_messages, # type: ignore
**common,
)
content = resp.choices[0].message.content
if not content:
raise ModelProviderError('Empty JSON content in Cerebras response', model=self.name)
usage = self._get_usage(resp)
# Try to extract JSON from the response
import re
json_match = re.search(r'\{.*\}', content, re.DOTALL)
if json_match:
json_str = json_match.group(0)
else:
json_str = content
parsed = output_format.model_validate_json(json_str)
return ChatInvokeCompletion(
completion=parsed,
usage=usage,
)
except RateLimitError as e:
raise ModelRateLimitError(str(e), model=self.name) from e
except (APIError, APIConnectionError, APITimeoutError, APIStatusError) as e:
raise ModelProviderError(str(e), model=self.name) from e
except Exception as e:
raise ModelProviderError(str(e), model=self.name) from e
raise ModelProviderError('No valid ainvoke execution path for Cerebras LLM', model=self.name)
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/llm/google/serializer.py | browser_use/llm/google/serializer.py | import base64
from google.genai.types import Content, ContentListUnion, Part
from browser_use.llm.messages import (
AssistantMessage,
BaseMessage,
SystemMessage,
UserMessage,
)
class GoogleMessageSerializer:
"""Serializer for converting messages to Google Gemini format."""
@staticmethod
def serialize_messages(
messages: list[BaseMessage], include_system_in_user: bool = False
) -> tuple[ContentListUnion, str | None]:
"""
Convert a list of BaseMessages to Google format, extracting system message.
Google handles system instructions separately from the conversation, so we need to:
1. Extract any system messages and return them separately as a string (or include in first user message if flag is set)
2. Convert the remaining messages to Content objects
Args:
messages: List of messages to convert
include_system_in_user: If True, system/developer messages are prepended to the first user message
Returns:
A tuple of (formatted_messages, system_message) where:
- formatted_messages: List of Content objects for the conversation
- system_message: System instruction string or None
"""
messages = [m.model_copy(deep=True) for m in messages]
formatted_messages: ContentListUnion = []
system_message: str | None = None
system_parts: list[str] = []
for i, message in enumerate(messages):
role = message.role if hasattr(message, 'role') else None
# Handle system/developer messages
if isinstance(message, SystemMessage) or role in ['system', 'developer']:
# Extract system message content as string
if isinstance(message.content, str):
if include_system_in_user:
system_parts.append(message.content)
else:
system_message = message.content
elif message.content is not None:
# Handle Iterable of content parts
parts = []
for part in message.content:
if part.type == 'text':
parts.append(part.text)
combined_text = '\n'.join(parts)
if include_system_in_user:
system_parts.append(combined_text)
else:
system_message = combined_text
continue
# Determine the role for non-system messages
if isinstance(message, UserMessage):
role = 'user'
elif isinstance(message, AssistantMessage):
role = 'model'
else:
# Default to user for any unknown message types
role = 'user'
# Initialize message parts
message_parts: list[Part] = []
# If this is the first user message and we have system parts, prepend them
if include_system_in_user and system_parts and role == 'user' and not formatted_messages:
system_text = '\n\n'.join(system_parts)
if isinstance(message.content, str):
message_parts.append(Part.from_text(text=f'{system_text}\n\n{message.content}'))
else:
# Add system text as the first part
message_parts.append(Part.from_text(text=system_text))
system_parts = [] # Clear after using
else:
# Extract content and create parts normally
if isinstance(message.content, str):
# Regular text content
message_parts = [Part.from_text(text=message.content)]
elif message.content is not None:
# Handle Iterable of content parts
for part in message.content:
if part.type == 'text':
message_parts.append(Part.from_text(text=part.text))
elif part.type == 'refusal':
message_parts.append(Part.from_text(text=f'[Refusal] {part.refusal}'))
elif part.type == 'image_url':
# Handle images
url = part.image_url.url
# Format: data:image/jpeg;base64,<data>
header, data = url.split(',', 1)
# Decode base64 to bytes
image_bytes = base64.b64decode(data)
# Use the media_type from ImageURL, which correctly identifies the image format
mime_type = part.image_url.media_type
# Add image part
image_part = Part.from_bytes(data=image_bytes, mime_type=mime_type)
message_parts.append(image_part)
# Create the Content object
if message_parts:
final_message = Content(role=role, parts=message_parts)
# for some reason, the type checker is not able to infer the type of formatted_messages
formatted_messages.append(final_message) # type: ignore
return formatted_messages, system_message
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/llm/google/chat.py | browser_use/llm/google/chat.py | import asyncio
import json
import logging
import random
import time
from dataclasses import dataclass, field
from typing import Any, Literal, TypeVar, overload
from google import genai
from google.auth.credentials import Credentials
from google.genai import types
from google.genai.types import MediaModality
from pydantic import BaseModel
from browser_use.llm.base import BaseChatModel
from browser_use.llm.exceptions import ModelProviderError
from browser_use.llm.google.serializer import GoogleMessageSerializer
from browser_use.llm.messages import BaseMessage
from browser_use.llm.schema import SchemaOptimizer
from browser_use.llm.views import ChatInvokeCompletion, ChatInvokeUsage
T = TypeVar('T', bound=BaseModel)
VerifiedGeminiModels = Literal[
'gemini-2.0-flash',
'gemini-2.0-flash-exp',
'gemini-2.0-flash-lite-preview-02-05',
'Gemini-2.0-exp',
'gemini-2.5-flash',
'gemini-2.5-flash-lite',
'gemini-flash-latest',
'gemini-flash-lite-latest',
'gemini-2.5-pro',
'gemini-3-pro-preview',
'gemini-3-flash-preview',
'gemma-3-27b-it',
'gemma-3-4b',
'gemma-3-12b',
'gemma-3n-e2b',
'gemma-3n-e4b',
]
@dataclass
class ChatGoogle(BaseChatModel):
"""
A wrapper around Google's Gemini chat model using the genai client.
This class accepts all genai.Client parameters while adding model,
temperature, and config parameters for the LLM interface.
Args:
model: The Gemini model to use
temperature: Temperature for response generation
config: Additional configuration parameters to pass to generate_content
(e.g., tools, safety_settings, etc.).
api_key: Google API key
vertexai: Whether to use Vertex AI
credentials: Google credentials object
project: Google Cloud project ID
location: Google Cloud location
http_options: HTTP options for the client
include_system_in_user: If True, system messages are included in the first user message
supports_structured_output: If True, uses native JSON mode; if False, uses prompt-based fallback
max_retries: Number of retries for retryable errors (default: 5)
retryable_status_codes: List of HTTP status codes to retry on (default: [429, 500, 502, 503, 504])
retry_base_delay: Base delay in seconds for exponential backoff (default: 1.0)
retry_max_delay: Maximum delay in seconds between retries (default: 60.0)
Example:
from google.genai import types
llm = ChatGoogle(
model='gemini-2.0-flash-exp',
config={
'tools': [types.Tool(code_execution=types.ToolCodeExecution())]
},
max_retries=5,
retryable_status_codes=[429, 500, 502, 503, 504],
retry_base_delay=1.0,
retry_max_delay=60.0,
)
"""
# Model configuration
model: VerifiedGeminiModels | str
temperature: float | None = 0.5
top_p: float | None = None
seed: int | None = None
thinking_budget: int | None = None # for gemini-2.5 flash and flash-lite models, default will be set to 0
max_output_tokens: int | None = 8096
config: types.GenerateContentConfigDict | None = None
include_system_in_user: bool = False
supports_structured_output: bool = True # New flag
max_retries: int = 5 # Number of retries for retryable errors
retryable_status_codes: list[int] = field(default_factory=lambda: [429, 500, 502, 503, 504]) # Status codes to retry on
retry_base_delay: float = 1.0 # Base delay in seconds for exponential backoff
retry_max_delay: float = 60.0 # Maximum delay in seconds between retries
# Client initialization parameters
api_key: str | None = None
vertexai: bool | None = None
credentials: Credentials | None = None
project: str | None = None
location: str | None = None
http_options: types.HttpOptions | types.HttpOptionsDict | None = None
# Internal client cache to prevent connection issues
_client: genai.Client | None = None
# Static
@property
def provider(self) -> str:
return 'google'
@property
def logger(self) -> logging.Logger:
"""Get logger for this chat instance"""
return logging.getLogger(f'browser_use.llm.google.{self.model}')
def _get_client_params(self) -> dict[str, Any]:
"""Prepare client parameters dictionary."""
# Define base client params
base_params = {
'api_key': self.api_key,
'vertexai': self.vertexai,
'credentials': self.credentials,
'project': self.project,
'location': self.location,
'http_options': self.http_options,
}
# Create client_params dict with non-None values
client_params = {k: v for k, v in base_params.items() if v is not None}
return client_params
def get_client(self) -> genai.Client:
"""
Returns a genai.Client instance.
Returns:
genai.Client: An instance of the Google genai client.
"""
if self._client is not None:
return self._client
client_params = self._get_client_params()
self._client = genai.Client(**client_params)
return self._client
@property
def name(self) -> str:
return str(self.model)
def _get_stop_reason(self, response: types.GenerateContentResponse) -> str | None:
"""Extract stop_reason from Google response."""
if hasattr(response, 'candidates') and response.candidates:
return str(response.candidates[0].finish_reason) if hasattr(response.candidates[0], 'finish_reason') else None
return None
def _get_usage(self, response: types.GenerateContentResponse) -> ChatInvokeUsage | None:
usage: ChatInvokeUsage | None = None
if response.usage_metadata is not None:
image_tokens = 0
if response.usage_metadata.prompt_tokens_details is not None:
image_tokens = sum(
detail.token_count or 0
for detail in response.usage_metadata.prompt_tokens_details
if detail.modality == MediaModality.IMAGE
)
usage = ChatInvokeUsage(
prompt_tokens=response.usage_metadata.prompt_token_count or 0,
completion_tokens=(response.usage_metadata.candidates_token_count or 0)
+ (response.usage_metadata.thoughts_token_count or 0),
total_tokens=response.usage_metadata.total_token_count or 0,
prompt_cached_tokens=response.usage_metadata.cached_content_token_count,
prompt_cache_creation_tokens=None,
prompt_image_tokens=image_tokens,
)
return usage
@overload
async def ainvoke(
self, messages: list[BaseMessage], output_format: None = None, **kwargs: Any
) -> ChatInvokeCompletion[str]: ...
@overload
async def ainvoke(self, messages: list[BaseMessage], output_format: type[T], **kwargs: Any) -> ChatInvokeCompletion[T]: ...
async def ainvoke(
self, messages: list[BaseMessage], output_format: type[T] | None = None, **kwargs: Any
) -> ChatInvokeCompletion[T] | ChatInvokeCompletion[str]:
"""
Invoke the model with the given messages.
Args:
messages: List of chat messages
output_format: Optional Pydantic model class for structured output
Returns:
Either a string response or an instance of output_format
"""
# Serialize messages to Google format with the include_system_in_user flag
contents, system_instruction = GoogleMessageSerializer.serialize_messages(
messages, include_system_in_user=self.include_system_in_user
)
# Build config dictionary starting with user-provided config
config: types.GenerateContentConfigDict = {}
if self.config:
config = self.config.copy()
# Apply model-specific configuration (these can override config)
if self.temperature is not None:
config['temperature'] = self.temperature
# Add system instruction if present
if system_instruction:
config['system_instruction'] = system_instruction
if self.top_p is not None:
config['top_p'] = self.top_p
if self.seed is not None:
config['seed'] = self.seed
# set default for flash, flash-lite, gemini-flash-lite-latest, and gemini-flash-latest models
if self.thinking_budget is None and ('gemini-2.5-flash' in self.model or 'gemini-flash' in self.model):
self.thinking_budget = 0
if self.thinking_budget is not None:
thinking_config_dict: types.ThinkingConfigDict = {'thinking_budget': self.thinking_budget}
config['thinking_config'] = thinking_config_dict
if self.max_output_tokens is not None:
config['max_output_tokens'] = self.max_output_tokens
async def _make_api_call():
start_time = time.time()
self.logger.debug(f'๐ Starting API call to {self.model}')
try:
if output_format is None:
# Return string response
self.logger.debug('๐ Requesting text response')
response = await self.get_client().aio.models.generate_content(
model=self.model,
contents=contents, # type: ignore
config=config,
)
elapsed = time.time() - start_time
self.logger.debug(f'โ
Got text response in {elapsed:.2f}s')
# Handle case where response.text might be None
text = response.text or ''
if not text:
self.logger.warning('โ ๏ธ Empty text response received')
usage = self._get_usage(response)
return ChatInvokeCompletion(
completion=text,
usage=usage,
stop_reason=self._get_stop_reason(response),
)
else:
# Handle structured output
if self.supports_structured_output:
# Use native JSON mode
self.logger.debug(f'๐ง Requesting structured output for {output_format.__name__}')
config['response_mime_type'] = 'application/json'
# Convert Pydantic model to Gemini-compatible schema
optimized_schema = SchemaOptimizer.create_gemini_optimized_schema(output_format)
gemini_schema = self._fix_gemini_schema(optimized_schema)
config['response_schema'] = gemini_schema
response = await self.get_client().aio.models.generate_content(
model=self.model,
contents=contents,
config=config,
)
elapsed = time.time() - start_time
self.logger.debug(f'โ
Got structured response in {elapsed:.2f}s')
usage = self._get_usage(response)
# Handle case where response.parsed might be None
if response.parsed is None:
self.logger.debug('๐ Parsing JSON from text response')
# When using response_schema, Gemini returns JSON as text
if response.text:
try:
# Handle JSON wrapped in markdown code blocks (common Gemini behavior)
text = response.text.strip()
if text.startswith('```json') and text.endswith('```'):
text = text[7:-3].strip()
self.logger.debug('๐ง Stripped ```json``` wrapper from response')
elif text.startswith('```') and text.endswith('```'):
text = text[3:-3].strip()
self.logger.debug('๐ง Stripped ``` wrapper from response')
# Parse the JSON text and validate with the Pydantic model
parsed_data = json.loads(text)
return ChatInvokeCompletion(
completion=output_format.model_validate(parsed_data),
usage=usage,
stop_reason=self._get_stop_reason(response),
)
except (json.JSONDecodeError, ValueError) as e:
self.logger.error(f'โ Failed to parse JSON response: {str(e)}')
self.logger.debug(f'Raw response text: {response.text[:200]}...')
raise ModelProviderError(
message=f'Failed to parse or validate response {response}: {str(e)}',
status_code=500,
model=self.model,
) from e
else:
self.logger.error('โ No response text received')
raise ModelProviderError(
message=f'No response from model {response}',
status_code=500,
model=self.model,
)
# Ensure we return the correct type
if isinstance(response.parsed, output_format):
return ChatInvokeCompletion(
completion=response.parsed,
usage=usage,
stop_reason=self._get_stop_reason(response),
)
else:
# If it's not the expected type, try to validate it
return ChatInvokeCompletion(
completion=output_format.model_validate(response.parsed),
usage=usage,
stop_reason=self._get_stop_reason(response),
)
else:
# Fallback: Request JSON in the prompt for models without native JSON mode
self.logger.debug(f'๐ Using fallback JSON mode for {output_format.__name__}')
# Create a copy of messages to modify
modified_messages = [m.model_copy(deep=True) for m in messages]
# Add JSON instruction to the last message
if modified_messages and isinstance(modified_messages[-1].content, str):
json_instruction = f'\n\nPlease respond with a valid JSON object that matches this schema: {SchemaOptimizer.create_optimized_json_schema(output_format)}'
modified_messages[-1].content += json_instruction
# Re-serialize with modified messages
fallback_contents, fallback_system = GoogleMessageSerializer.serialize_messages(
modified_messages, include_system_in_user=self.include_system_in_user
)
# Update config with fallback system instruction if present
fallback_config = config.copy()
if fallback_system:
fallback_config['system_instruction'] = fallback_system
response = await self.get_client().aio.models.generate_content(
model=self.model,
contents=fallback_contents, # type: ignore
config=fallback_config,
)
elapsed = time.time() - start_time
self.logger.debug(f'โ
Got fallback response in {elapsed:.2f}s')
usage = self._get_usage(response)
# Try to extract JSON from the text response
if response.text:
try:
# Try to find JSON in the response
text = response.text.strip()
# Common patterns: JSON wrapped in markdown code blocks
if text.startswith('```json') and text.endswith('```'):
text = text[7:-3].strip()
elif text.startswith('```') and text.endswith('```'):
text = text[3:-3].strip()
# Parse and validate
parsed_data = json.loads(text)
return ChatInvokeCompletion(
completion=output_format.model_validate(parsed_data),
usage=usage,
stop_reason=self._get_stop_reason(response),
)
except (json.JSONDecodeError, ValueError) as e:
self.logger.error(f'โ Failed to parse fallback JSON: {str(e)}')
self.logger.debug(f'Raw response text: {response.text[:200]}...')
raise ModelProviderError(
message=f'Model does not support JSON mode and failed to parse JSON from text response: {str(e)}',
status_code=500,
model=self.model,
) from e
else:
self.logger.error('โ No response text in fallback mode')
raise ModelProviderError(
message='No response from model',
status_code=500,
model=self.model,
)
except Exception as e:
elapsed = time.time() - start_time
self.logger.error(f'๐ฅ API call failed after {elapsed:.2f}s: {type(e).__name__}: {e}')
# Re-raise the exception
raise
# Retry logic for certain errors with exponential backoff
assert self.max_retries >= 1, 'max_retries must be at least 1'
for attempt in range(self.max_retries):
try:
return await _make_api_call()
except ModelProviderError as e:
# Retry if status code is in retryable list and we have attempts left
if e.status_code in self.retryable_status_codes and attempt < self.max_retries - 1:
# Exponential backoff with jitter: base_delay * 2^attempt + random jitter
delay = min(self.retry_base_delay * (2**attempt), self.retry_max_delay)
jitter = random.uniform(0, delay * 0.1) # 10% jitter
total_delay = delay + jitter
self.logger.warning(
f'โ ๏ธ Got {e.status_code} error, retrying in {total_delay:.1f}s... (attempt {attempt + 1}/{self.max_retries})'
)
await asyncio.sleep(total_delay)
continue
# Otherwise raise
raise
except Exception as e:
# For non-ModelProviderError, wrap and raise
error_message = str(e)
status_code: int | None = None
# Try to extract status code if available
if hasattr(e, 'response'):
response_obj = getattr(e, 'response', None)
if response_obj and hasattr(response_obj, 'status_code'):
status_code = getattr(response_obj, 'status_code', None)
# Enhanced timeout error handling
if 'timeout' in error_message.lower() or 'cancelled' in error_message.lower():
if isinstance(e, asyncio.CancelledError) or 'CancelledError' in str(type(e)):
error_message = 'Gemini API request was cancelled (likely timeout). Consider: 1) Reducing input size, 2) Using a different model, 3) Checking network connectivity.'
status_code = 504
else:
status_code = 408
elif any(indicator in error_message.lower() for indicator in ['forbidden', '403']):
status_code = 403
elif any(
indicator in error_message.lower()
for indicator in ['rate limit', 'resource exhausted', 'quota exceeded', 'too many requests', '429']
):
status_code = 429
elif any(
indicator in error_message.lower()
for indicator in ['service unavailable', 'internal server error', 'bad gateway', '503', '502', '500']
):
status_code = 503
raise ModelProviderError(
message=error_message,
status_code=status_code or 502,
model=self.name,
) from e
raise RuntimeError('Retry loop completed without return or exception')
def _fix_gemini_schema(self, schema: dict[str, Any]) -> dict[str, Any]:
"""
Convert a Pydantic model to a Gemini-compatible schema.
This function removes unsupported properties like 'additionalProperties' and resolves
$ref references that Gemini doesn't support.
"""
# Handle $defs and $ref resolution
if '$defs' in schema:
defs = schema.pop('$defs')
def resolve_refs(obj: Any) -> Any:
if isinstance(obj, dict):
if '$ref' in obj:
ref = obj.pop('$ref')
ref_name = ref.split('/')[-1]
if ref_name in defs:
# Replace the reference with the actual definition
resolved = defs[ref_name].copy()
# Merge any additional properties from the reference
for key, value in obj.items():
if key != '$ref':
resolved[key] = value
return resolve_refs(resolved)
return obj
else:
# Recursively process all dictionary values
return {k: resolve_refs(v) for k, v in obj.items()}
elif isinstance(obj, list):
return [resolve_refs(item) for item in obj]
return obj
schema = resolve_refs(schema)
# Remove unsupported properties
def clean_schema(obj: Any, parent_key: str | None = None) -> Any:
if isinstance(obj, dict):
# Remove unsupported properties
cleaned = {}
for key, value in obj.items():
# Only strip 'title' when it's a JSON Schema metadata field (not inside 'properties')
# 'title' as a metadata field appears at schema level, not as a property name
is_metadata_title = key == 'title' and parent_key != 'properties'
if key not in ['additionalProperties', 'default'] and not is_metadata_title:
cleaned_value = clean_schema(value, parent_key=key)
# Handle empty object properties - Gemini doesn't allow empty OBJECT types
if (
key == 'properties'
and isinstance(cleaned_value, dict)
and len(cleaned_value) == 0
and isinstance(obj.get('type', ''), str)
and obj.get('type', '').upper() == 'OBJECT'
):
# Convert empty object to have at least one property
cleaned['properties'] = {'_placeholder': {'type': 'string'}}
else:
cleaned[key] = cleaned_value
# If this is an object type with empty properties, add a placeholder
if (
isinstance(cleaned.get('type', ''), str)
and cleaned.get('type', '').upper() == 'OBJECT'
and 'properties' in cleaned
and isinstance(cleaned['properties'], dict)
and len(cleaned['properties']) == 0
):
cleaned['properties'] = {'_placeholder': {'type': 'string'}}
return cleaned
elif isinstance(obj, list):
return [clean_schema(item, parent_key=parent_key) for item in obj]
return obj
return clean_schema(schema)
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/llm/google/__init__.py | browser_use/llm/google/__init__.py | from browser_use.llm.google.chat import ChatGoogle
__all__ = ['ChatGoogle']
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/llm/deepseek/serializer.py | browser_use/llm/deepseek/serializer.py | from __future__ import annotations
import json
from typing import Any, overload
from browser_use.llm.messages import (
AssistantMessage,
BaseMessage,
ContentPartImageParam,
ContentPartTextParam,
SystemMessage,
ToolCall,
UserMessage,
)
MessageDict = dict[str, Any]
class DeepSeekMessageSerializer:
"""Serializer for converting browser-use messages to DeepSeek messages."""
# -------- content ๅค็ --------------------------------------------------
@staticmethod
def _serialize_text_part(part: ContentPartTextParam) -> str:
return part.text
@staticmethod
def _serialize_image_part(part: ContentPartImageParam) -> dict[str, Any]:
url = part.image_url.url
if url.startswith('data:'):
return {'type': 'image_url', 'image_url': {'url': url}}
return {'type': 'image_url', 'image_url': {'url': url}}
@staticmethod
def _serialize_content(content: Any) -> str | list[dict[str, Any]]:
if content is None:
return ''
if isinstance(content, str):
return content
serialized: list[dict[str, Any]] = []
for part in content:
if part.type == 'text':
serialized.append({'type': 'text', 'text': DeepSeekMessageSerializer._serialize_text_part(part)})
elif part.type == 'image_url':
serialized.append(DeepSeekMessageSerializer._serialize_image_part(part))
elif part.type == 'refusal':
serialized.append({'type': 'text', 'text': f'[Refusal] {part.refusal}'})
return serialized
# -------- Tool-call ๅค็ -------------------------------------------------
@staticmethod
def _serialize_tool_calls(tool_calls: list[ToolCall]) -> list[dict[str, Any]]:
deepseek_tool_calls: list[dict[str, Any]] = []
for tc in tool_calls:
try:
arguments = json.loads(tc.function.arguments)
except json.JSONDecodeError:
arguments = {'arguments': tc.function.arguments}
deepseek_tool_calls.append(
{
'id': tc.id,
'type': 'function',
'function': {
'name': tc.function.name,
'arguments': arguments,
},
}
)
return deepseek_tool_calls
# -------- ๅๆกๆถๆฏๅบๅๅ -------------------------------------------------
@overload
@staticmethod
def serialize(message: UserMessage) -> MessageDict: ...
@overload
@staticmethod
def serialize(message: SystemMessage) -> MessageDict: ...
@overload
@staticmethod
def serialize(message: AssistantMessage) -> MessageDict: ...
@staticmethod
def serialize(message: BaseMessage) -> MessageDict:
if isinstance(message, UserMessage):
return {
'role': 'user',
'content': DeepSeekMessageSerializer._serialize_content(message.content),
}
if isinstance(message, SystemMessage):
return {
'role': 'system',
'content': DeepSeekMessageSerializer._serialize_content(message.content),
}
if isinstance(message, AssistantMessage):
msg: MessageDict = {
'role': 'assistant',
'content': DeepSeekMessageSerializer._serialize_content(message.content),
}
if message.tool_calls:
msg['tool_calls'] = DeepSeekMessageSerializer._serialize_tool_calls(message.tool_calls)
return msg
raise ValueError(f'Unknown message type: {type(message)}')
# -------- ๅ่กจๅบๅๅ -----------------------------------------------------
@staticmethod
def serialize_messages(messages: list[BaseMessage]) -> list[MessageDict]:
return [DeepSeekMessageSerializer.serialize(m) for m in messages]
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/llm/deepseek/chat.py | browser_use/llm/deepseek/chat.py | from __future__ import annotations
import json
from dataclasses import dataclass
from typing import Any, TypeVar, overload
import httpx
from openai import (
APIConnectionError,
APIError,
APIStatusError,
APITimeoutError,
AsyncOpenAI,
RateLimitError,
)
from pydantic import BaseModel
from browser_use.llm.base import BaseChatModel
from browser_use.llm.deepseek.serializer import DeepSeekMessageSerializer
from browser_use.llm.exceptions import ModelProviderError, ModelRateLimitError
from browser_use.llm.messages import BaseMessage
from browser_use.llm.schema import SchemaOptimizer
from browser_use.llm.views import ChatInvokeCompletion
T = TypeVar('T', bound=BaseModel)
@dataclass
class ChatDeepSeek(BaseChatModel):
"""DeepSeek /chat/completions wrapper (OpenAI-compatible)."""
model: str = 'deepseek-chat'
# Generation parameters
max_tokens: int | None = None
temperature: float | None = None
top_p: float | None = None
seed: int | None = None
# Connection parameters
api_key: str | None = None
base_url: str | httpx.URL | None = 'https://api.deepseek.com/v1'
timeout: float | httpx.Timeout | None = None
client_params: dict[str, Any] | None = None
@property
def provider(self) -> str:
return 'deepseek'
def _client(self) -> AsyncOpenAI:
return AsyncOpenAI(
api_key=self.api_key,
base_url=self.base_url,
timeout=self.timeout,
**(self.client_params or {}),
)
@property
def name(self) -> str:
return self.model
@overload
async def ainvoke(
self,
messages: list[BaseMessage],
output_format: None = None,
tools: list[dict[str, Any]] | None = None,
stop: list[str] | None = None,
**kwargs: Any,
) -> ChatInvokeCompletion[str]: ...
@overload
async def ainvoke(
self,
messages: list[BaseMessage],
output_format: type[T],
tools: list[dict[str, Any]] | None = None,
stop: list[str] | None = None,
**kwargs: Any,
) -> ChatInvokeCompletion[T]: ...
async def ainvoke(
self,
messages: list[BaseMessage],
output_format: type[T] | None = None,
tools: list[dict[str, Any]] | None = None,
stop: list[str] | None = None,
**kwargs: Any,
) -> ChatInvokeCompletion[T] | ChatInvokeCompletion[str]:
"""
DeepSeek ainvoke supports:
1. Regular text/multi-turn conversation
2. Function Calling
3. JSON Output (response_format)
4. Conversation prefix continuation (beta, prefix, stop)
"""
client = self._client()
ds_messages = DeepSeekMessageSerializer.serialize_messages(messages)
common: dict[str, Any] = {}
if self.temperature is not None:
common['temperature'] = self.temperature
if self.max_tokens is not None:
common['max_tokens'] = self.max_tokens
if self.top_p is not None:
common['top_p'] = self.top_p
if self.seed is not None:
common['seed'] = self.seed
# Beta conversation prefix continuation (see official documentation)
if self.base_url and str(self.base_url).endswith('/beta'):
# The last assistant message must have prefix
if ds_messages and isinstance(ds_messages[-1], dict) and ds_messages[-1].get('role') == 'assistant':
ds_messages[-1]['prefix'] = True
if stop:
common['stop'] = stop
# โ Regular multi-turn conversation/text output
if output_format is None and not tools:
try:
resp = await client.chat.completions.create( # type: ignore
model=self.model,
messages=ds_messages, # type: ignore
**common,
)
return ChatInvokeCompletion(
completion=resp.choices[0].message.content or '',
usage=None,
)
except RateLimitError as e:
raise ModelRateLimitError(str(e), model=self.name) from e
except (APIError, APIConnectionError, APITimeoutError, APIStatusError) as e:
raise ModelProviderError(str(e), model=self.name) from e
except Exception as e:
raise ModelProviderError(str(e), model=self.name) from e
# โก Function Calling path (with tools or output_format)
if tools or (output_format is not None and hasattr(output_format, 'model_json_schema')):
try:
call_tools = tools
tool_choice = None
if output_format is not None and hasattr(output_format, 'model_json_schema'):
tool_name = output_format.__name__
schema = SchemaOptimizer.create_optimized_json_schema(output_format)
schema.pop('title', None)
call_tools = [
{
'type': 'function',
'function': {
'name': tool_name,
'description': f'Return a JSON object of type {tool_name}',
'parameters': schema,
},
}
]
tool_choice = {'type': 'function', 'function': {'name': tool_name}}
resp = await client.chat.completions.create( # type: ignore
model=self.model,
messages=ds_messages, # type: ignore
tools=call_tools, # type: ignore
tool_choice=tool_choice, # type: ignore
**common,
)
msg = resp.choices[0].message
if not msg.tool_calls:
raise ValueError('Expected tool_calls in response but got none')
raw_args = msg.tool_calls[0].function.arguments
if isinstance(raw_args, str):
parsed = json.loads(raw_args)
else:
parsed = raw_args
# --------- Fix: only use model_validate when output_format is not None ----------
if output_format is not None:
return ChatInvokeCompletion(
completion=output_format.model_validate(parsed),
usage=None,
)
else:
# If no output_format, return dict directly
return ChatInvokeCompletion(
completion=parsed,
usage=None,
)
except RateLimitError as e:
raise ModelRateLimitError(str(e), model=self.name) from e
except (APIError, APIConnectionError, APITimeoutError, APIStatusError) as e:
raise ModelProviderError(str(e), model=self.name) from e
except Exception as e:
raise ModelProviderError(str(e), model=self.name) from e
# โข JSON Output path (official response_format)
if output_format is not None and hasattr(output_format, 'model_json_schema'):
try:
resp = await client.chat.completions.create( # type: ignore
model=self.model,
messages=ds_messages, # type: ignore
response_format={'type': 'json_object'},
**common,
)
content = resp.choices[0].message.content
if not content:
raise ModelProviderError('Empty JSON content in DeepSeek response', model=self.name)
parsed = output_format.model_validate_json(content)
return ChatInvokeCompletion(
completion=parsed,
usage=None,
)
except RateLimitError as e:
raise ModelRateLimitError(str(e), model=self.name) from e
except (APIError, APIConnectionError, APITimeoutError, APIStatusError) as e:
raise ModelProviderError(str(e), model=self.name) from e
except Exception as e:
raise ModelProviderError(str(e), model=self.name) from e
raise ModelProviderError('No valid ainvoke execution path for DeepSeek LLM', model=self.name)
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/dom/views.py | browser_use/dom/views.py | import hashlib
from dataclasses import asdict, dataclass, field
from enum import Enum
from typing import Any
from cdp_use.cdp.accessibility.commands import GetFullAXTreeReturns
from cdp_use.cdp.accessibility.types import AXPropertyName
from cdp_use.cdp.dom.commands import GetDocumentReturns
from cdp_use.cdp.dom.types import ShadowRootType
from cdp_use.cdp.domsnapshot.commands import CaptureSnapshotReturns
from cdp_use.cdp.target.types import SessionID, TargetID, TargetInfo
from uuid_extensions import uuid7str
from browser_use.dom.utils import cap_text_length
from browser_use.observability import observe_debug
# Serializer types
DEFAULT_INCLUDE_ATTRIBUTES = [
'title',
'type',
'checked',
# 'class',
'id',
'name',
'role',
'value',
'placeholder',
'data-date-format',
'alt',
'aria-label',
'aria-expanded',
'data-state',
'aria-checked',
# ARIA value attributes for datetime/range inputs
'aria-valuemin',
'aria-valuemax',
'aria-valuenow',
'aria-placeholder',
# Validation attributes - help agents avoid brute force attempts
'pattern',
'min',
'max',
'minlength',
'maxlength',
'step',
'accept', # File input types (e.g., accept="image/*" or accept=".pdf")
'multiple', # Whether multiple files/selections are allowed
'inputmode', # Virtual keyboard hint (numeric, tel, email, url, etc.)
'autocomplete', # Autocomplete behavior hint
'data-mask', # Input mask format (e.g., phone numbers, credit cards)
'data-inputmask', # Alternative input mask attribute
'data-datepicker', # jQuery datepicker indicator
'format', # Synthetic attribute for date/time input format (e.g., MM/dd/yyyy)
'expected_format', # Synthetic attribute for explicit expected format (e.g., AngularJS datepickers)
'contenteditable', # Rich text editor detection
# Webkit shadow DOM identifiers
'pseudo',
# Accessibility properties from ax_node (ordered by importance for automation)
'checked',
'selected',
'expanded',
'pressed',
'disabled',
'invalid', # Current validation state from AX node
'valuemin', # Min value from AX node (for datetime/range)
'valuemax', # Max value from AX node (for datetime/range)
'valuenow',
'keyshortcuts',
'haspopup',
'multiselectable',
# Less commonly needed (uncomment if required):
# 'readonly',
'required',
'valuetext',
'level',
'busy',
'live',
# Accessibility name (contains text content for StaticText elements)
'ax_name',
]
STATIC_ATTRIBUTES = {
'class',
'id',
'name',
'type',
'placeholder',
'aria-label',
'title',
# 'aria-expanded',
'role',
'data-testid',
'data-test',
'data-cy',
'data-selenium',
'for',
'required',
'disabled',
'readonly',
'checked',
'selected',
'multiple',
'accept',
'href',
'target',
'rel',
'aria-describedby',
'aria-labelledby',
'aria-controls',
'aria-owns',
'aria-live',
'aria-atomic',
'aria-busy',
'aria-disabled',
'aria-hidden',
'aria-pressed',
'aria-checked',
'aria-selected',
'tabindex',
'alt',
'src',
'lang',
'itemscope',
'itemtype',
'itemprop',
# Webkit shadow DOM attributes
'pseudo',
'aria-valuemin',
'aria-valuemax',
'aria-valuenow',
'aria-placeholder',
}
# Class patterns that indicate dynamic/transient UI state - excluded from stable hash
DYNAMIC_CLASS_PATTERNS = frozenset(
{
'focus',
'hover',
'active',
'selected',
'disabled',
'animation',
'transition',
'loading',
'open',
'closed',
'expanded',
'collapsed',
'visible',
'hidden',
'pressed',
'checked',
'highlighted',
'current',
'entering',
'leaving',
}
)
class MatchLevel(Enum):
"""Element matching strictness levels for history replay."""
EXACT = 1 # Full hash with all attributes (current behavior)
STABLE = 2 # Hash with dynamic classes filtered out
XPATH = 3 # XPath string comparison
def filter_dynamic_classes(class_str: str | None) -> str:
"""
Remove dynamic state classes, keep semantic/identifying ones.
Returns sorted classes for deterministic hashing.
"""
if not class_str:
return ''
classes = class_str.split()
stable = [c for c in classes if not any(pattern in c.lower() for pattern in DYNAMIC_CLASS_PATTERNS)]
return ' '.join(sorted(stable))
@dataclass
class CurrentPageTargets:
page_session: TargetInfo
iframe_sessions: list[TargetInfo]
"""
Iframe sessions are ALL the iframes sessions of all the pages (not just the current page)
"""
@dataclass
class TargetAllTrees:
snapshot: CaptureSnapshotReturns
dom_tree: GetDocumentReturns
ax_tree: GetFullAXTreeReturns
device_pixel_ratio: float
cdp_timing: dict[str, float]
@dataclass(slots=True)
class PropagatingBounds:
"""Track bounds that propagate from parent elements to filter children."""
tag: str # The tag that started propagation ('a' or 'button')
bounds: 'DOMRect' # The bounding box
node_id: int # Node ID for debugging
depth: int # How deep in tree this started (for debugging)
@dataclass(slots=True)
class SimplifiedNode:
"""Simplified tree node for optimization."""
original_node: 'EnhancedDOMTreeNode'
children: list['SimplifiedNode']
should_display: bool = True
is_interactive: bool = False # True if element is in selector_map
is_new: bool = False
ignored_by_paint_order: bool = False # More info in dom/serializer/paint_order.py
excluded_by_parent: bool = False # New field for bbox filtering
is_shadow_host: bool = False # New field for shadow DOM hosts
is_compound_component: bool = False # True for virtual components of compound controls
def _clean_original_node_json(self, node_json: dict) -> dict:
"""Recursively remove children_nodes and shadow_roots from original_node JSON."""
# Remove the fields we don't want in SimplifiedNode serialization
if 'children_nodes' in node_json:
del node_json['children_nodes']
if 'shadow_roots' in node_json:
del node_json['shadow_roots']
# Clean nested content_document if it exists
if node_json.get('content_document'):
node_json['content_document'] = self._clean_original_node_json(node_json['content_document'])
return node_json
def __json__(self) -> dict:
original_node_json = self.original_node.__json__()
# Remove children_nodes and shadow_roots to avoid duplication with SimplifiedNode.children
cleaned_original_node_json = self._clean_original_node_json(original_node_json)
return {
'should_display': self.should_display,
'is_interactive': self.is_interactive,
'ignored_by_paint_order': self.ignored_by_paint_order,
'excluded_by_parent': self.excluded_by_parent,
'original_node': cleaned_original_node_json,
'children': [c.__json__() for c in self.children],
}
class NodeType(int, Enum):
"""DOM node types based on the DOM specification."""
ELEMENT_NODE = 1
ATTRIBUTE_NODE = 2
TEXT_NODE = 3
CDATA_SECTION_NODE = 4
ENTITY_REFERENCE_NODE = 5
ENTITY_NODE = 6
PROCESSING_INSTRUCTION_NODE = 7
COMMENT_NODE = 8
DOCUMENT_NODE = 9
DOCUMENT_TYPE_NODE = 10
DOCUMENT_FRAGMENT_NODE = 11
NOTATION_NODE = 12
@dataclass(slots=True)
class DOMRect:
x: float
y: float
width: float
height: float
def to_dict(self) -> dict[str, Any]:
return {
'x': self.x,
'y': self.y,
'width': self.width,
'height': self.height,
}
def __json__(self) -> dict:
return self.to_dict()
@dataclass(slots=True)
class EnhancedAXProperty:
"""we don't need `sources` and `related_nodes` for now (not sure how to use them)
TODO: there is probably some way to determine whether it has a value or related nodes or not, but for now it's kinda fine idk
"""
name: AXPropertyName
value: str | bool | None
# related_nodes: list[EnhancedAXRelatedNode] | None
@dataclass(slots=True)
class EnhancedAXNode:
ax_node_id: str
"""Not to be confused the DOM node_id. Only useful for AX node tree"""
ignored: bool
# we don't need ignored_reasons as we anyway ignore the node otherwise
role: str | None
name: str | None
description: str | None
properties: list[EnhancedAXProperty] | None
child_ids: list[str] | None
@dataclass(slots=True)
class EnhancedSnapshotNode:
"""Snapshot data extracted from DOMSnapshot for enhanced functionality."""
is_clickable: bool | None
cursor_style: str | None
bounds: DOMRect | None
"""
Document coordinates (origin = top-left of the page, ignores current scroll).
Equivalent JS API: layoutNode.boundingBox in the older API.
Typical use: Quick hit-test that doesn't care about scroll position.
"""
clientRects: DOMRect | None
"""
Viewport coordinates (origin = top-left of the visible scrollport).
Equivalent JS API: element.getClientRects() / getBoundingClientRect().
Typical use: Pixel-perfect hit-testing on screen, taking current scroll into account.
"""
scrollRects: DOMRect | None
"""
Scrollable area of the element.
"""
computed_styles: dict[str, str] | None
"""Computed styles from the layout tree"""
paint_order: int | None
"""Paint order from the layout tree"""
stacking_contexts: int | None
"""Stacking contexts from the layout tree"""
# @dataclass(slots=True)
# class SuperSelector:
# node_id: int
# backend_node_id: int
# frame_id: str | None
# target_id: TargetID
# node_type: NodeType
# node_name: str
# # is_visible: bool | None
# # is_scrollable: bool | None
# element_index: int | None
@dataclass(slots=True)
class EnhancedDOMTreeNode:
"""
Enhanced DOM tree node that contains information from AX, DOM, and Snapshot trees. It's mostly based on the types on DOM node type with enhanced data from AX and Snapshot trees.
@dev when serializing check if the value is a valid value first!
Learn more about the fields:
- (DOM node) https://chromedevtools.github.io/devtools-protocol/tot/DOM/#type-BackendNode
- (AX node) https://chromedevtools.github.io/devtools-protocol/tot/Accessibility/#type-AXNode
- (Snapshot node) https://chromedevtools.github.io/devtools-protocol/tot/DOMSnapshot/#type-DOMNode
"""
# region - DOM Node data
node_id: int
backend_node_id: int
node_type: NodeType
"""Node types, defined in `NodeType` enum."""
node_name: str
"""Only applicable for `NodeType.ELEMENT_NODE`"""
node_value: str
"""this is where the value from `NodeType.TEXT_NODE` is stored usually"""
attributes: dict[str, str]
"""slightly changed from the original attributes to be more readable"""
is_scrollable: bool | None
"""
Whether the node is scrollable.
"""
is_visible: bool | None
"""
Whether the node is visible according to the upper most frame node.
"""
absolute_position: DOMRect | None
"""
Absolute position of the node in the document according to the top-left of the page.
"""
# frames
target_id: TargetID
frame_id: str | None
session_id: SessionID | None
content_document: 'EnhancedDOMTreeNode | None'
"""
Content document is the document inside a new iframe.
"""
# Shadow DOM
shadow_root_type: ShadowRootType | None
shadow_roots: list['EnhancedDOMTreeNode'] | None
"""
Shadow roots are the shadow DOMs of the element.
"""
# Navigation
parent_node: 'EnhancedDOMTreeNode | None'
children_nodes: list['EnhancedDOMTreeNode'] | None
# endregion - DOM Node data
# region - AX Node data
ax_node: EnhancedAXNode | None
# endregion - AX Node data
# region - Snapshot Node data
snapshot_node: EnhancedSnapshotNode | None
# endregion - Snapshot Node data
# Compound control child components information
_compound_children: list[dict[str, Any]] = field(default_factory=list)
uuid: str = field(default_factory=uuid7str)
@property
def parent(self) -> 'EnhancedDOMTreeNode | None':
return self.parent_node
@property
def children(self) -> list['EnhancedDOMTreeNode']:
return self.children_nodes or []
@property
def children_and_shadow_roots(self) -> list['EnhancedDOMTreeNode']:
"""
Returns all children nodes, including shadow roots
"""
# IMPORTANT: Make a copy to avoid mutating the original children_nodes list!
children = list(self.children_nodes) if self.children_nodes else []
if self.shadow_roots:
children.extend(self.shadow_roots)
return children
@property
def tag_name(self) -> str:
return self.node_name.lower()
@property
def xpath(self) -> str:
"""Generate XPath for this DOM node, stopping at shadow boundaries or iframes."""
segments = []
current_element = self
while current_element and (
current_element.node_type == NodeType.ELEMENT_NODE or current_element.node_type == NodeType.DOCUMENT_FRAGMENT_NODE
):
# just pass through shadow roots
if current_element.node_type == NodeType.DOCUMENT_FRAGMENT_NODE:
current_element = current_element.parent_node
continue
# stop ONLY if we hit iframe
if current_element.parent_node and current_element.parent_node.node_name.lower() == 'iframe':
break
position = self._get_element_position(current_element)
tag_name = current_element.node_name.lower()
xpath_index = f'[{position}]' if position > 0 else ''
segments.insert(0, f'{tag_name}{xpath_index}')
current_element = current_element.parent_node
return '/'.join(segments)
def _get_element_position(self, element: 'EnhancedDOMTreeNode') -> int:
"""Get the position of an element among its siblings with the same tag name.
Returns 0 if it's the only element of its type, otherwise returns 1-based index."""
if not element.parent_node or not element.parent_node.children_nodes:
return 0
same_tag_siblings = [
child
for child in element.parent_node.children_nodes
if child.node_type == NodeType.ELEMENT_NODE and child.node_name.lower() == element.node_name.lower()
]
if len(same_tag_siblings) <= 1:
return 0 # No index needed if it's the only one
try:
# XPath is 1-indexed
position = same_tag_siblings.index(element) + 1
return position
except ValueError:
return 0
def __json__(self) -> dict:
"""Serializes the node and its descendants to a dictionary, omitting parent references."""
return {
'node_id': self.node_id,
'backend_node_id': self.backend_node_id,
'node_type': self.node_type.name,
'node_name': self.node_name,
'node_value': self.node_value,
'is_visible': self.is_visible,
'attributes': self.attributes,
'is_scrollable': self.is_scrollable,
'session_id': self.session_id,
'target_id': self.target_id,
'frame_id': self.frame_id,
'content_document': self.content_document.__json__() if self.content_document else None,
'shadow_root_type': self.shadow_root_type,
'ax_node': asdict(self.ax_node) if self.ax_node else None,
'snapshot_node': asdict(self.snapshot_node) if self.snapshot_node else None,
# these two in the end, so it's easier to read json
'shadow_roots': [r.__json__() for r in self.shadow_roots] if self.shadow_roots else [],
'children_nodes': [c.__json__() for c in self.children_nodes] if self.children_nodes else [],
}
def get_all_children_text(self, max_depth: int = -1) -> str:
text_parts = []
def collect_text(node: EnhancedDOMTreeNode, current_depth: int) -> None:
if max_depth != -1 and current_depth > max_depth:
return
# Skip this branch if we hit a highlighted element (except for the current node)
# TODO: think whether if makese sense to add text until the next clickable element or everything from children
# if node.node_type == NodeType.ELEMENT_NODE
# if isinstance(node, DOMElementNode) and node != self and node.highlight_index is not None:
# return
if node.node_type == NodeType.TEXT_NODE:
text_parts.append(node.node_value)
elif node.node_type == NodeType.ELEMENT_NODE:
for child in node.children:
collect_text(child, current_depth + 1)
collect_text(self, 0)
return '\n'.join(text_parts).strip()
def __repr__(self) -> str:
"""
@DEV ! don't display this to the LLM, it's SUPER long
"""
attributes = ', '.join([f'{k}={v}' for k, v in self.attributes.items()])
is_scrollable = getattr(self, 'is_scrollable', False)
num_children = len(self.children_nodes or [])
return (
f'<{self.tag_name} {attributes} is_scrollable={is_scrollable} '
f'num_children={num_children} >{self.node_value}</{self.tag_name}>'
)
def llm_representation(self, max_text_length: int = 100) -> str:
"""
Token friendly representation of the node, used in the LLM
"""
return f'<{self.tag_name}>{cap_text_length(self.get_all_children_text(), max_text_length) or ""}'
def get_meaningful_text_for_llm(self) -> str:
"""
Get the meaningful text content that the LLM actually sees for this element.
This matches exactly what goes into the DOMTreeSerializer output.
"""
meaningful_text = ''
if hasattr(self, 'attributes') and self.attributes:
# Priority order: value, aria-label, title, placeholder, alt, text content
for attr in ['value', 'aria-label', 'title', 'placeholder', 'alt']:
if attr in self.attributes and self.attributes[attr]:
meaningful_text = self.attributes[attr]
break
# Fallback to text content if no meaningful attributes
if not meaningful_text:
meaningful_text = self.get_all_children_text()
return meaningful_text.strip()
@property
def is_actually_scrollable(self) -> bool:
"""
Enhanced scroll detection that combines CDP detection with CSS analysis.
This detects scrollable elements that Chrome's CDP might miss, which is common
in iframes and dynamically sized containers.
"""
# First check if CDP already detected it as scrollable
if self.is_scrollable:
return True
# Enhanced detection for elements CDP missed
if not self.snapshot_node:
return False
# Check scroll vs client rects - this is the most reliable indicator
scroll_rects = self.snapshot_node.scrollRects
client_rects = self.snapshot_node.clientRects
if scroll_rects and client_rects:
# Content is larger than visible area = scrollable
has_vertical_scroll = scroll_rects.height > client_rects.height + 1 # +1 for rounding
has_horizontal_scroll = scroll_rects.width > client_rects.width + 1
if has_vertical_scroll or has_horizontal_scroll:
# Also check CSS to make sure scrolling is allowed
if self.snapshot_node.computed_styles:
styles = self.snapshot_node.computed_styles
overflow = styles.get('overflow', 'visible').lower()
overflow_x = styles.get('overflow-x', overflow).lower()
overflow_y = styles.get('overflow-y', overflow).lower()
# Only allow scrolling if overflow is explicitly set to auto, scroll, or overlay
# Do NOT consider 'visible' overflow as scrollable - this was causing the issue
allows_scroll = (
overflow in ['auto', 'scroll', 'overlay']
or overflow_x in ['auto', 'scroll', 'overlay']
or overflow_y in ['auto', 'scroll', 'overlay']
)
return allows_scroll
else:
# No CSS info, but content overflows - be more conservative
# Only consider it scrollable if it's a common scrollable container element
scrollable_tags = {'div', 'main', 'section', 'article', 'aside', 'body', 'html'}
return self.tag_name.lower() in scrollable_tags
return False
@property
def should_show_scroll_info(self) -> bool:
"""
Simple check: show scroll info only if this element is scrollable
and doesn't have a scrollable parent (to avoid nested scroll spam).
Special case for iframes: Always show scroll info since Chrome might not
always detect iframe scrollability correctly (scrollHeight: 0 issue).
"""
# Special case: Always show scroll info for iframe elements
# Even if not detected as scrollable, they might have scrollable content
if self.tag_name.lower() == 'iframe':
return True
# Must be scrollable first for non-iframe elements
if not (self.is_scrollable or self.is_actually_scrollable):
return False
# Always show for iframe content documents (body/html)
if self.tag_name.lower() in {'body', 'html'}:
return True
# Don't show if parent is already scrollable (avoid nested spam)
if self.parent_node and (self.parent_node.is_scrollable or self.parent_node.is_actually_scrollable):
return False
return True
def _find_html_in_content_document(self) -> 'EnhancedDOMTreeNode | None':
"""Find HTML element in iframe content document."""
if not self.content_document:
return None
# Check if content document itself is HTML
if self.content_document.tag_name.lower() == 'html':
return self.content_document
# Look through children for HTML element
if self.content_document.children_nodes:
for child in self.content_document.children_nodes:
if child.tag_name.lower() == 'html':
return child
return None
@property
def scroll_info(self) -> dict[str, Any] | None:
"""Calculate scroll information for this element if it's scrollable."""
if not self.is_actually_scrollable or not self.snapshot_node:
return None
# Get scroll and client rects from snapshot data
scroll_rects = self.snapshot_node.scrollRects
client_rects = self.snapshot_node.clientRects
bounds = self.snapshot_node.bounds
if not scroll_rects or not client_rects:
return None
# Calculate scroll position and percentages
scroll_top = scroll_rects.y
scroll_left = scroll_rects.x
# Total scrollable height and width
scrollable_height = scroll_rects.height
scrollable_width = scroll_rects.width
# Visible (client) dimensions
visible_height = client_rects.height
visible_width = client_rects.width
# Calculate how much content is above/below/left/right of current view
content_above = max(0, scroll_top)
content_below = max(0, scrollable_height - visible_height - scroll_top)
content_left = max(0, scroll_left)
content_right = max(0, scrollable_width - visible_width - scroll_left)
# Calculate scroll percentages
vertical_scroll_percentage = 0
horizontal_scroll_percentage = 0
if scrollable_height > visible_height:
max_scroll_top = scrollable_height - visible_height
vertical_scroll_percentage = (scroll_top / max_scroll_top) * 100 if max_scroll_top > 0 else 0
if scrollable_width > visible_width:
max_scroll_left = scrollable_width - visible_width
horizontal_scroll_percentage = (scroll_left / max_scroll_left) * 100 if max_scroll_left > 0 else 0
# Calculate pages equivalent (using visible height as page unit)
pages_above = content_above / visible_height if visible_height > 0 else 0
pages_below = content_below / visible_height if visible_height > 0 else 0
total_pages = scrollable_height / visible_height if visible_height > 0 else 1
return {
'scroll_top': scroll_top,
'scroll_left': scroll_left,
'scrollable_height': scrollable_height,
'scrollable_width': scrollable_width,
'visible_height': visible_height,
'visible_width': visible_width,
'content_above': content_above,
'content_below': content_below,
'content_left': content_left,
'content_right': content_right,
'vertical_scroll_percentage': round(vertical_scroll_percentage, 1),
'horizontal_scroll_percentage': round(horizontal_scroll_percentage, 1),
'pages_above': round(pages_above, 1),
'pages_below': round(pages_below, 1),
'total_pages': round(total_pages, 1),
'can_scroll_up': content_above > 0,
'can_scroll_down': content_below > 0,
'can_scroll_left': content_left > 0,
'can_scroll_right': content_right > 0,
}
def get_scroll_info_text(self) -> str:
"""Get human-readable scroll information text for this element."""
# Special case for iframes: check content document for scroll info
if self.tag_name.lower() == 'iframe':
# Try to get scroll info from the HTML document inside the iframe
if self.content_document:
# Look for HTML element in content document
html_element = self._find_html_in_content_document()
if html_element and html_element.scroll_info:
info = html_element.scroll_info
# Provide minimal but useful scroll info
pages_below = info.get('pages_below', 0)
pages_above = info.get('pages_above', 0)
v_pct = int(info.get('vertical_scroll_percentage', 0))
if pages_below > 0 or pages_above > 0:
return f'scroll: {pages_above:.1f}โ {pages_below:.1f}โ {v_pct}%'
return 'scroll'
scroll_info = self.scroll_info
if not scroll_info:
return ''
parts = []
# Vertical scroll info (concise format)
if scroll_info['scrollable_height'] > scroll_info['visible_height']:
parts.append(f'{scroll_info["pages_above"]:.1f} pages above, {scroll_info["pages_below"]:.1f} pages below')
# Horizontal scroll info (concise format)
if scroll_info['scrollable_width'] > scroll_info['visible_width']:
parts.append(f'horizontal {scroll_info["horizontal_scroll_percentage"]:.0f}%')
return ' '.join(parts)
@property
def element_hash(self) -> int:
return hash(self)
def compute_stable_hash(self) -> int:
"""
Compute hash with dynamic classes filtered out.
More stable across sessions than element_hash since it excludes
transient CSS state classes like focus, hover, animation, etc.
"""
parent_branch_path = self._get_parent_branch_path()
parent_branch_path_string = '/'.join(parent_branch_path)
# Filter dynamic classes before building attributes string
filtered_attrs: dict[str, str] = {}
for k, v in self.attributes.items():
if k not in STATIC_ATTRIBUTES:
continue
if k == 'class':
v = filter_dynamic_classes(v)
if not v: # Skip empty class after filtering
continue
filtered_attrs[k] = v
attributes_string = ''.join(f'{k}={v}' for k, v in sorted(filtered_attrs.items()))
ax_name = ''
if self.ax_node and self.ax_node.name:
ax_name = f'|ax_name={self.ax_node.name}'
combined_string = f'{parent_branch_path_string}|{attributes_string}{ax_name}'
hash_hex = hashlib.sha256(combined_string.encode()).hexdigest()
return int(hash_hex[:16], 16)
def __str__(self) -> str:
return f'[<{self.tag_name}>#{self.frame_id[-4:] if self.frame_id else "?"}:{self.backend_node_id}]'
def __hash__(self) -> int:
"""
Hash the element based on its parent branch path, attributes, and accessibility name.
TODO: migrate this to use only backendNodeId + current SessionId
"""
# Get parent branch path
parent_branch_path = self._get_parent_branch_path()
parent_branch_path_string = '/'.join(parent_branch_path)
attributes_string = ''.join(
f'{k}={v}' for k, v in sorted((k, v) for k, v in self.attributes.items() if k in STATIC_ATTRIBUTES)
)
# Include accessibility name (ax_name) if available - this helps distinguish
# elements that have identical structure and attributes but different visible text
ax_name = ''
if self.ax_node and self.ax_node.name:
ax_name = f'|ax_name={self.ax_node.name}'
# Combine all for final hash
combined_string = f'{parent_branch_path_string}|{attributes_string}{ax_name}'
element_hash = hashlib.sha256(combined_string.encode()).hexdigest()
# Convert to int for __hash__ return type - use first 16 chars and convert from hex to int
return int(element_hash[:16], 16)
def parent_branch_hash(self) -> int:
"""
Hash the element based on its parent branch path and attributes.
"""
parent_branch_path = self._get_parent_branch_path()
parent_branch_path_string = '/'.join(parent_branch_path)
element_hash = hashlib.sha256(parent_branch_path_string.encode()).hexdigest()
return int(element_hash[:16], 16)
def _get_parent_branch_path(self) -> list[str]:
"""Get the parent branch path as a list of tag names from root to current element."""
parents: list['EnhancedDOMTreeNode'] = []
current_element: 'EnhancedDOMTreeNode | None' = self
while current_element is not None:
if current_element.node_type == NodeType.ELEMENT_NODE:
parents.append(current_element)
current_element = current_element.parent_node
parents.reverse()
return [parent.tag_name for parent in parents]
DOMSelectorMap = dict[int, EnhancedDOMTreeNode]
@dataclass
class SerializedDOMState:
_root: SimplifiedNode | None
"""Not meant to be used directly, use `llm_representation` instead"""
selector_map: DOMSelectorMap
@observe_debug(ignore_input=True, ignore_output=True, name='llm_representation')
def llm_representation(
self,
include_attributes: list[str] | None = None,
) -> str:
"""Kinda ugly, but leaving this as an internal method because include_attributes are a parameter on the agent, so we need to leave it as a 2 step process"""
from browser_use.dom.serializer.serializer import DOMTreeSerializer
if not self._root:
return 'Empty DOM tree (you might have to wait for the page to load)'
include_attributes = include_attributes or DEFAULT_INCLUDE_ATTRIBUTES
return DOMTreeSerializer.serialize_tree(self._root, include_attributes)
@observe_debug(ignore_input=True, ignore_output=True, name='eval_representation')
def eval_representation(
self,
include_attributes: list[str] | None = None,
) -> str:
"""
Evaluation-focused DOM representation without interactive indexes.
This serializer is designed for evaluation/judge contexts where:
- No interactive indexes are needed (we're not clicking)
- Full HTML structure should be preserved for context
- More attribute information is helpful
- Text content is important for understanding page structure
"""
from browser_use.dom.serializer.eval_serializer import DOMEvalSerializer
if not self._root:
return 'Empty DOM tree (you might have to wait for the page to load)'
include_attributes = include_attributes or DEFAULT_INCLUDE_ATTRIBUTES
return DOMEvalSerializer.serialize_tree(self._root, include_attributes)
@dataclass
class DOMInteractedElement:
"""
DOMInteractedElement is a class that represents a DOM element that has been interacted with.
It is used to store the DOM element that has been interacted with and to store the DOM element that has been interacted with.
TODO: this is a bit of a hack, we should probably have a better way to do this
"""
node_id: int
backend_node_id: int
frame_id: str | None
node_type: NodeType
node_value: str
node_name: str
attributes: dict[str, str] | None
bounds: DOMRect | None
x_path: str
element_hash: int
# Stable hash with dynamic classes filtered - computed at save time for consistent matching
stable_hash: int | None = None
# Accessibility name (visible text) - used for fallback matching when hash/xpath fail
ax_name: str | None = None
def to_dict(self) -> dict[str, Any]:
return {
'node_id': self.node_id,
'backend_node_id': self.backend_node_id,
'frame_id': self.frame_id,
'node_type': self.node_type.value,
'node_value': self.node_value,
'node_name': self.node_name,
'attributes': self.attributes,
'x_path': self.x_path,
'element_hash': self.element_hash,
'stable_hash': self.stable_hash,
'bounds': self.bounds.to_dict() if self.bounds else None,
'ax_name': self.ax_name,
}
@classmethod
def load_from_enhanced_dom_tree(cls, enhanced_dom_tree: EnhancedDOMTreeNode) -> 'DOMInteractedElement':
# Extract accessibility name if available
ax_name = None
if enhanced_dom_tree.ax_node and enhanced_dom_tree.ax_node.name:
ax_name = enhanced_dom_tree.ax_node.name
return cls(
node_id=enhanced_dom_tree.node_id,
backend_node_id=enhanced_dom_tree.backend_node_id,
frame_id=enhanced_dom_tree.frame_id,
node_type=enhanced_dom_tree.node_type,
node_value=enhanced_dom_tree.node_value,
node_name=enhanced_dom_tree.node_name,
attributes=enhanced_dom_tree.attributes,
bounds=enhanced_dom_tree.snapshot_node.bounds if enhanced_dom_tree.snapshot_node else None,
x_path=enhanced_dom_tree.xpath,
element_hash=hash(enhanced_dom_tree),
stable_hash=enhanced_dom_tree.compute_stable_hash(), # Compute from source for single source of truth
ax_name=ax_name,
)
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/dom/enhanced_snapshot.py | browser_use/dom/enhanced_snapshot.py | """
Enhanced snapshot processing for browser-use DOM tree extraction.
This module provides stateless functions for parsing Chrome DevTools Protocol (CDP) DOMSnapshot data
to extract visibility, clickability, cursor styles, and other layout information.
"""
from cdp_use.cdp.domsnapshot.commands import CaptureSnapshotReturns
from cdp_use.cdp.domsnapshot.types import (
LayoutTreeSnapshot,
NodeTreeSnapshot,
RareBooleanData,
)
from browser_use.dom.views import DOMRect, EnhancedSnapshotNode
# Only the ESSENTIAL computed styles for interactivity and visibility detection
REQUIRED_COMPUTED_STYLES = [
# Only styles actually accessed in the codebase (prevents Chrome crashes on heavy sites)
'display', # Used in service.py visibility detection
'visibility', # Used in service.py visibility detection
'opacity', # Used in service.py visibility detection
'overflow', # Used in views.py scrollability detection
'overflow-x', # Used in views.py scrollability detection
'overflow-y', # Used in views.py scrollability detection
'cursor', # Used in enhanced_snapshot.py cursor extraction
'pointer-events', # Used for clickability logic
'position', # Used for visibility logic
'background-color', # Used for visibility logic
]
def _parse_rare_boolean_data(rare_data: RareBooleanData, index: int) -> bool | None:
"""Parse rare boolean data from snapshot - returns True if index is in the rare data."""
return index in rare_data['index']
def _parse_computed_styles(strings: list[str], style_indices: list[int]) -> dict[str, str]:
"""Parse computed styles from layout tree using string indices."""
styles = {}
for i, style_index in enumerate(style_indices):
if i < len(REQUIRED_COMPUTED_STYLES) and 0 <= style_index < len(strings):
styles[REQUIRED_COMPUTED_STYLES[i]] = strings[style_index]
return styles
def build_snapshot_lookup(
snapshot: CaptureSnapshotReturns,
device_pixel_ratio: float = 1.0,
) -> dict[int, EnhancedSnapshotNode]:
"""Build a lookup table of backend node ID to enhanced snapshot data with everything calculated upfront."""
import logging
logger = logging.getLogger('browser_use.dom.enhanced_snapshot')
snapshot_lookup: dict[int, EnhancedSnapshotNode] = {}
if not snapshot['documents']:
return snapshot_lookup
strings = snapshot['strings']
logger.debug(f'๐ SNAPSHOT: Processing {len(snapshot["documents"])} documents with {len(strings)} strings')
for doc_idx, document in enumerate(snapshot['documents']):
nodes: NodeTreeSnapshot = document['nodes']
layout: LayoutTreeSnapshot = document['layout']
# Build backend node id to snapshot index lookup
backend_node_to_snapshot_index = {}
if 'backendNodeId' in nodes:
for i, backend_node_id in enumerate(nodes['backendNodeId']):
backend_node_to_snapshot_index[backend_node_id] = i
# Log document info
doc_url = strings[document.get('documentURL', 0)] if document.get('documentURL', 0) < len(strings) else 'N/A'
logger.debug(
f'๐ SNAPSHOT doc[{doc_idx}]: url={doc_url[:80]}... has {len(backend_node_to_snapshot_index)} nodes, '
f'layout has {len(layout.get("nodeIndex", []))} entries'
)
# PERFORMANCE: Pre-build layout index map to eliminate O(nยฒ) double lookups
# Preserve original behavior: use FIRST occurrence for duplicates
layout_index_map = {}
if layout and 'nodeIndex' in layout:
for layout_idx, node_index in enumerate(layout['nodeIndex']):
if node_index not in layout_index_map: # Only store first occurrence
layout_index_map[node_index] = layout_idx
# Build snapshot lookup for each backend node id
for backend_node_id, snapshot_index in backend_node_to_snapshot_index.items():
is_clickable = None
if 'isClickable' in nodes:
is_clickable = _parse_rare_boolean_data(nodes['isClickable'], snapshot_index)
# Find corresponding layout node
cursor_style = None
is_visible = None
bounding_box = None
computed_styles = {}
# Look for layout tree node that corresponds to this snapshot node
paint_order = None
client_rects = None
scroll_rects = None
stacking_contexts = None
if snapshot_index in layout_index_map:
layout_idx = layout_index_map[snapshot_index]
if layout_idx < len(layout.get('bounds', [])):
# Parse bounding box
bounds = layout['bounds'][layout_idx]
if len(bounds) >= 4:
# IMPORTANT: CDP coordinates are in device pixels, convert to CSS pixels
# by dividing by the device pixel ratio
raw_x, raw_y, raw_width, raw_height = bounds[0], bounds[1], bounds[2], bounds[3]
# Apply device pixel ratio scaling to convert device pixels to CSS pixels
bounding_box = DOMRect(
x=raw_x / device_pixel_ratio,
y=raw_y / device_pixel_ratio,
width=raw_width / device_pixel_ratio,
height=raw_height / device_pixel_ratio,
)
# Parse computed styles for this layout node
if layout_idx < len(layout.get('styles', [])):
style_indices = layout['styles'][layout_idx]
computed_styles = _parse_computed_styles(strings, style_indices)
cursor_style = computed_styles.get('cursor')
# Extract paint order if available
if layout_idx < len(layout.get('paintOrders', [])):
paint_order = layout.get('paintOrders', [])[layout_idx]
# Extract client rects if available
client_rects_data = layout.get('clientRects', [])
if layout_idx < len(client_rects_data):
client_rect_data = client_rects_data[layout_idx]
if client_rect_data and len(client_rect_data) >= 4:
client_rects = DOMRect(
x=client_rect_data[0],
y=client_rect_data[1],
width=client_rect_data[2],
height=client_rect_data[3],
)
# Extract scroll rects if available
scroll_rects_data = layout.get('scrollRects', [])
if layout_idx < len(scroll_rects_data):
scroll_rect_data = scroll_rects_data[layout_idx]
if scroll_rect_data and len(scroll_rect_data) >= 4:
scroll_rects = DOMRect(
x=scroll_rect_data[0],
y=scroll_rect_data[1],
width=scroll_rect_data[2],
height=scroll_rect_data[3],
)
# Extract stacking contexts if available
if layout_idx < len(layout.get('stackingContexts', [])):
stacking_contexts = layout.get('stackingContexts', {}).get('index', [])[layout_idx]
snapshot_lookup[backend_node_id] = EnhancedSnapshotNode(
is_clickable=is_clickable,
cursor_style=cursor_style,
bounds=bounding_box,
clientRects=client_rects,
scrollRects=scroll_rects,
computed_styles=computed_styles if computed_styles else None,
paint_order=paint_order,
stacking_contexts=stacking_contexts,
)
# Count how many have bounds (are actually visible/laid out)
with_bounds = sum(1 for n in snapshot_lookup.values() if n.bounds)
logger.debug(f'๐ SNAPSHOT: Built lookup with {len(snapshot_lookup)} total entries, {with_bounds} have bounds')
return snapshot_lookup
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/dom/service.py | browser_use/dom/service.py | import asyncio
import logging
import time
from typing import TYPE_CHECKING
from cdp_use.cdp.accessibility.commands import GetFullAXTreeReturns
from cdp_use.cdp.accessibility.types import AXNode
from cdp_use.cdp.dom.types import Node
from cdp_use.cdp.target import TargetID
from browser_use.dom.enhanced_snapshot import (
REQUIRED_COMPUTED_STYLES,
build_snapshot_lookup,
)
from browser_use.dom.serializer.serializer import DOMTreeSerializer
from browser_use.dom.views import (
DOMRect,
EnhancedAXNode,
EnhancedAXProperty,
EnhancedDOMTreeNode,
NodeType,
SerializedDOMState,
TargetAllTrees,
)
from browser_use.observability import observe_debug
from browser_use.utils import create_task_with_error_handling
if TYPE_CHECKING:
from browser_use.browser.session import BrowserSession
# Note: iframe limits are now configurable via BrowserProfile.max_iframes and BrowserProfile.max_iframe_depth
class DomService:
"""
Service for getting the DOM tree and other DOM-related information.
Either browser or page must be provided.
TODO: currently we start a new websocket connection PER STEP, we should definitely keep this persistent
"""
logger: logging.Logger
def __init__(
self,
browser_session: 'BrowserSession',
logger: logging.Logger | None = None,
cross_origin_iframes: bool = False,
paint_order_filtering: bool = True,
max_iframes: int = 100,
max_iframe_depth: int = 5,
):
self.browser_session = browser_session
self.logger = logger or browser_session.logger
self.cross_origin_iframes = cross_origin_iframes
self.paint_order_filtering = paint_order_filtering
self.max_iframes = max_iframes
self.max_iframe_depth = max_iframe_depth
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_value, traceback):
pass # no need to cleanup anything, browser_session auto handles cleaning up session cache
def _build_enhanced_ax_node(self, ax_node: AXNode) -> EnhancedAXNode:
properties: list[EnhancedAXProperty] | None = None
if 'properties' in ax_node and ax_node['properties']:
properties = []
for property in ax_node['properties']:
try:
# test whether property name can go into the enum (sometimes Chrome returns some random properties)
properties.append(
EnhancedAXProperty(
name=property['name'],
value=property.get('value', {}).get('value', None),
# related_nodes=[], # TODO: add related nodes
)
)
except ValueError:
pass
enhanced_ax_node = EnhancedAXNode(
ax_node_id=ax_node['nodeId'],
ignored=ax_node['ignored'],
role=ax_node.get('role', {}).get('value', None),
name=ax_node.get('name', {}).get('value', None),
description=ax_node.get('description', {}).get('value', None),
properties=properties,
child_ids=ax_node.get('childIds', []) if ax_node.get('childIds') else None,
)
return enhanced_ax_node
async def _get_viewport_ratio(self, target_id: TargetID) -> float:
"""Get viewport dimensions, device pixel ratio, and scroll position using CDP."""
cdp_session = await self.browser_session.get_or_create_cdp_session(target_id=target_id, focus=False)
try:
# Get the layout metrics which includes the visual viewport
metrics = await cdp_session.cdp_client.send.Page.getLayoutMetrics(session_id=cdp_session.session_id)
visual_viewport = metrics.get('visualViewport', {})
# IMPORTANT: Use CSS viewport instead of device pixel viewport
# This fixes the coordinate mismatch on high-DPI displays
css_visual_viewport = metrics.get('cssVisualViewport', {})
css_layout_viewport = metrics.get('cssLayoutViewport', {})
# Use CSS pixels (what JavaScript sees) instead of device pixels
width = css_visual_viewport.get('clientWidth', css_layout_viewport.get('clientWidth', 1920.0))
# Calculate device pixel ratio
device_width = visual_viewport.get('clientWidth', width)
css_width = css_visual_viewport.get('clientWidth', width)
device_pixel_ratio = device_width / css_width if css_width > 0 else 1.0
return float(device_pixel_ratio)
except Exception as e:
self.logger.debug(f'Viewport size detection failed: {e}')
# Fallback to default viewport size
return 1.0
@classmethod
def is_element_visible_according_to_all_parents(
cls, node: EnhancedDOMTreeNode, html_frames: list[EnhancedDOMTreeNode]
) -> bool:
"""Check if the element is visible according to all its parent HTML frames."""
if not node.snapshot_node:
return False
computed_styles = node.snapshot_node.computed_styles or {}
display = computed_styles.get('display', '').lower()
visibility = computed_styles.get('visibility', '').lower()
opacity = computed_styles.get('opacity', '1')
if display == 'none' or visibility == 'hidden':
return False
try:
if float(opacity) <= 0:
return False
except (ValueError, TypeError):
pass
# Start with the element's local bounds (in its own frame's coordinate system)
current_bounds = node.snapshot_node.bounds
if not current_bounds:
return False # If there are no bounds, the element is not visible
"""
Reverse iterate through the html frames (that can be either iframe or document -> if it's a document frame compare if the current bounds interest with it (taking scroll into account) otherwise move the current bounds by the iframe offset)
"""
for frame in reversed(html_frames):
if (
frame.node_type == NodeType.ELEMENT_NODE
and (frame.node_name.upper() == 'IFRAME' or frame.node_name.upper() == 'FRAME')
and frame.snapshot_node
and frame.snapshot_node.bounds
):
iframe_bounds = frame.snapshot_node.bounds
# negate the values added in `_construct_enhanced_node`
current_bounds.x += iframe_bounds.x
current_bounds.y += iframe_bounds.y
if (
frame.node_type == NodeType.ELEMENT_NODE
and frame.node_name == 'HTML'
and frame.snapshot_node
and frame.snapshot_node.scrollRects
and frame.snapshot_node.clientRects
):
# For iframe content, we need to check visibility within the iframe's viewport
# The scrollRects represent the current scroll position
# The clientRects represent the viewport size
# Elements are visible if they fall within the viewport after accounting for scroll
# The viewport of the frame (what's actually visible)
viewport_left = 0 # Viewport always starts at 0 in frame coordinates
viewport_top = 0
viewport_right = frame.snapshot_node.clientRects.width
viewport_bottom = frame.snapshot_node.clientRects.height
# Adjust element bounds by the scroll offset to get position relative to viewport
# When scrolled down, scrollRects.y is positive, so we subtract it from element's y
adjusted_x = current_bounds.x - frame.snapshot_node.scrollRects.x
adjusted_y = current_bounds.y - frame.snapshot_node.scrollRects.y
frame_intersects = (
adjusted_x < viewport_right
and adjusted_x + current_bounds.width > viewport_left
and adjusted_y < viewport_bottom + 1000
and adjusted_y + current_bounds.height > viewport_top - 1000
)
if not frame_intersects:
return False
# Keep the original coordinate adjustment to maintain consistency
# This adjustment is needed for proper coordinate transformation
current_bounds.x -= frame.snapshot_node.scrollRects.x
current_bounds.y -= frame.snapshot_node.scrollRects.y
# If we reach here, element is visible in main viewport and all containing iframes
return True
async def _get_ax_tree_for_all_frames(self, target_id: TargetID) -> GetFullAXTreeReturns:
"""Recursively collect all frames and merge their accessibility trees into a single array."""
cdp_session = await self.browser_session.get_or_create_cdp_session(target_id=target_id, focus=False)
frame_tree = await cdp_session.cdp_client.send.Page.getFrameTree(session_id=cdp_session.session_id)
def collect_all_frame_ids(frame_tree_node) -> list[str]:
"""Recursively collect all frame IDs from the frame tree."""
frame_ids = [frame_tree_node['frame']['id']]
if 'childFrames' in frame_tree_node and frame_tree_node['childFrames']:
for child_frame in frame_tree_node['childFrames']:
frame_ids.extend(collect_all_frame_ids(child_frame))
return frame_ids
# Collect all frame IDs recursively
all_frame_ids = collect_all_frame_ids(frame_tree['frameTree'])
# Get accessibility tree for each frame
ax_tree_requests = []
for frame_id in all_frame_ids:
ax_tree_request = cdp_session.cdp_client.send.Accessibility.getFullAXTree(
params={'frameId': frame_id}, session_id=cdp_session.session_id
)
ax_tree_requests.append(ax_tree_request)
# Wait for all requests to complete
ax_trees = await asyncio.gather(*ax_tree_requests)
# Merge all AX nodes into a single array
merged_nodes: list[AXNode] = []
for ax_tree in ax_trees:
merged_nodes.extend(ax_tree['nodes'])
return {'nodes': merged_nodes}
async def _get_all_trees(self, target_id: TargetID) -> TargetAllTrees:
cdp_session = await self.browser_session.get_or_create_cdp_session(target_id=target_id, focus=False)
# Wait for the page to be ready first
try:
ready_state = await cdp_session.cdp_client.send.Runtime.evaluate(
params={'expression': 'document.readyState'}, session_id=cdp_session.session_id
)
except Exception as e:
pass # Page might not be ready yet
# DEBUG: Log before capturing snapshot
self.logger.debug(f'๐ DEBUG: Capturing DOM snapshot for target {target_id}')
# Get actual scroll positions for all iframes before capturing snapshot
start_iframe_scroll = time.time()
iframe_scroll_positions = {}
try:
scroll_result = await cdp_session.cdp_client.send.Runtime.evaluate(
params={
'expression': """
(() => {
const scrollData = {};
const iframes = document.querySelectorAll('iframe');
iframes.forEach((iframe, index) => {
try {
const doc = iframe.contentDocument || iframe.contentWindow.document;
if (doc) {
scrollData[index] = {
scrollTop: doc.documentElement.scrollTop || doc.body.scrollTop || 0,
scrollLeft: doc.documentElement.scrollLeft || doc.body.scrollLeft || 0
};
}
} catch (e) {
// Cross-origin iframe, can't access
}
});
return scrollData;
})()
""",
'returnByValue': True,
},
session_id=cdp_session.session_id,
)
if scroll_result and 'result' in scroll_result and 'value' in scroll_result['result']:
iframe_scroll_positions = scroll_result['result']['value']
for idx, scroll_data in iframe_scroll_positions.items():
self.logger.debug(
f'๐ DEBUG: Iframe {idx} actual scroll position - scrollTop={scroll_data.get("scrollTop", 0)}, scrollLeft={scroll_data.get("scrollLeft", 0)}'
)
except Exception as e:
self.logger.debug(f'Failed to get iframe scroll positions: {e}')
iframe_scroll_ms = (time.time() - start_iframe_scroll) * 1000
# Define CDP request factories to avoid duplication
def create_snapshot_request():
return cdp_session.cdp_client.send.DOMSnapshot.captureSnapshot(
params={
'computedStyles': REQUIRED_COMPUTED_STYLES,
'includePaintOrder': True,
'includeDOMRects': True,
'includeBlendedBackgroundColors': False,
'includeTextColorOpacities': False,
},
session_id=cdp_session.session_id,
)
def create_dom_tree_request():
return cdp_session.cdp_client.send.DOM.getDocument(
params={'depth': -1, 'pierce': True}, session_id=cdp_session.session_id
)
start_cdp_calls = time.time()
# Create initial tasks
tasks = {
'snapshot': create_task_with_error_handling(create_snapshot_request(), name='get_snapshot'),
'dom_tree': create_task_with_error_handling(create_dom_tree_request(), name='get_dom_tree'),
'ax_tree': create_task_with_error_handling(self._get_ax_tree_for_all_frames(target_id), name='get_ax_tree'),
'device_pixel_ratio': create_task_with_error_handling(self._get_viewport_ratio(target_id), name='get_viewport_ratio'),
}
# Wait for all tasks with timeout
done, pending = await asyncio.wait(tasks.values(), timeout=10.0)
# Retry any failed or timed out tasks
if pending:
for task in pending:
task.cancel()
# Retry mapping for pending tasks
retry_map = {
tasks['snapshot']: lambda: create_task_with_error_handling(create_snapshot_request(), name='get_snapshot_retry'),
tasks['dom_tree']: lambda: create_task_with_error_handling(create_dom_tree_request(), name='get_dom_tree_retry'),
tasks['ax_tree']: lambda: create_task_with_error_handling(
self._get_ax_tree_for_all_frames(target_id), name='get_ax_tree_retry'
),
tasks['device_pixel_ratio']: lambda: create_task_with_error_handling(
self._get_viewport_ratio(target_id), name='get_viewport_ratio_retry'
),
}
# Create new tasks only for the ones that didn't complete
for key, task in tasks.items():
if task in pending and task in retry_map:
tasks[key] = retry_map[task]()
# Wait again with shorter timeout
done2, pending2 = await asyncio.wait([t for t in tasks.values() if not t.done()], timeout=2.0)
if pending2:
for task in pending2:
task.cancel()
# Extract results, tracking which ones failed
results = {}
failed = []
for key, task in tasks.items():
if task.done() and not task.cancelled():
try:
results[key] = task.result()
except Exception as e:
self.logger.warning(f'CDP request {key} failed with exception: {e}')
failed.append(key)
else:
self.logger.warning(f'CDP request {key} timed out')
failed.append(key)
# If any required tasks failed, raise an exception
if failed:
raise TimeoutError(f'CDP requests failed or timed out: {", ".join(failed)}')
snapshot = results['snapshot']
dom_tree = results['dom_tree']
ax_tree = results['ax_tree']
device_pixel_ratio = results['device_pixel_ratio']
end_cdp_calls = time.time()
cdp_calls_ms = (end_cdp_calls - start_cdp_calls) * 1000
# Calculate total time for _get_all_trees and overhead
start_snapshot_processing = time.time()
# DEBUG: Log snapshot info and limit documents to prevent explosion
if snapshot and 'documents' in snapshot:
original_doc_count = len(snapshot['documents'])
# Limit to max_iframes documents to prevent iframe explosion
if original_doc_count > self.max_iframes:
self.logger.warning(
f'โ ๏ธ Limiting processing of {original_doc_count} iframes on page to only first {self.max_iframes} to prevent crashes!'
)
snapshot['documents'] = snapshot['documents'][: self.max_iframes]
total_nodes = sum(len(doc.get('nodes', [])) for doc in snapshot['documents'])
self.logger.debug(f'๐ DEBUG: Snapshot contains {len(snapshot["documents"])} frames with {total_nodes} total nodes')
# Log iframe-specific info
for doc_idx, doc in enumerate(snapshot['documents']):
if doc_idx > 0: # Not the main document
self.logger.debug(
f'๐ DEBUG: Iframe #{doc_idx} {doc.get("frameId", "no-frame-id")} {doc.get("url", "no-url")} has {len(doc.get("nodes", []))} nodes'
)
snapshot_processing_ms = (time.time() - start_snapshot_processing) * 1000
# Return with detailed timing breakdown
return TargetAllTrees(
snapshot=snapshot,
dom_tree=dom_tree,
ax_tree=ax_tree,
device_pixel_ratio=device_pixel_ratio,
cdp_timing={
'iframe_scroll_detection_ms': iframe_scroll_ms,
'cdp_parallel_calls_ms': cdp_calls_ms,
'snapshot_processing_ms': snapshot_processing_ms,
},
)
@observe_debug(ignore_input=True, ignore_output=True, name='get_dom_tree')
async def get_dom_tree(
self,
target_id: TargetID,
all_frames: dict | None = None,
initial_html_frames: list[EnhancedDOMTreeNode] | None = None,
initial_total_frame_offset: DOMRect | None = None,
iframe_depth: int = 0,
) -> tuple[EnhancedDOMTreeNode, dict[str, float]]:
"""Get the DOM tree for a specific target.
Args:
target_id: Target ID of the page to get the DOM tree for.
all_frames: Pre-fetched frame hierarchy to avoid redundant CDP calls (optional, lazy fetch if None)
initial_html_frames: List of HTML frame nodes encountered so far
initial_total_frame_offset: Accumulated coordinate offset
iframe_depth: Current depth of iframe nesting to prevent infinite recursion
Returns:
Tuple of (enhanced_dom_tree_node, timing_info)
"""
timing_info: dict[str, float] = {}
timing_start_total = time.time()
# Get all trees from CDP (snapshot, DOM, AX, viewport ratio)
start_get_trees = time.time()
trees = await self._get_all_trees(target_id)
get_trees_ms = (time.time() - start_get_trees) * 1000
timing_info.update(trees.cdp_timing)
timing_info['get_all_trees_total_ms'] = get_trees_ms
dom_tree = trees.dom_tree
ax_tree = trees.ax_tree
snapshot = trees.snapshot
device_pixel_ratio = trees.device_pixel_ratio
# Build AX tree lookup
start_ax = time.time()
ax_tree_lookup: dict[int, AXNode] = {
ax_node['backendDOMNodeId']: ax_node for ax_node in ax_tree['nodes'] if 'backendDOMNodeId' in ax_node
}
timing_info['build_ax_lookup_ms'] = (time.time() - start_ax) * 1000
enhanced_dom_tree_node_lookup: dict[int, EnhancedDOMTreeNode] = {}
""" NodeId (NOT backend node id) -> enhanced dom tree node""" # way to get the parent/content node
# Parse snapshot data with everything calculated upfront
start_snapshot = time.time()
snapshot_lookup = build_snapshot_lookup(snapshot, device_pixel_ratio)
timing_info['build_snapshot_lookup_ms'] = (time.time() - start_snapshot) * 1000
async def _construct_enhanced_node(
node: Node,
html_frames: list[EnhancedDOMTreeNode] | None,
total_frame_offset: DOMRect | None,
all_frames: dict | None,
) -> EnhancedDOMTreeNode:
"""
Recursively construct enhanced DOM tree nodes.
Args:
node: The DOM node to construct
html_frames: List of HTML frame nodes encountered so far
total_frame_offset: Accumulated coordinate translation from parent iframes (includes scroll corrections)
all_frames: Pre-fetched frame hierarchy to avoid redundant CDP calls
"""
# Initialize lists if not provided
if html_frames is None:
html_frames = []
# to get rid of the pointer references
if total_frame_offset is None:
total_frame_offset = DOMRect(x=0.0, y=0.0, width=0.0, height=0.0)
else:
total_frame_offset = DOMRect(
total_frame_offset.x, total_frame_offset.y, total_frame_offset.width, total_frame_offset.height
)
# memoize the mf (I don't know if some nodes are duplicated)
if node['nodeId'] in enhanced_dom_tree_node_lookup:
return enhanced_dom_tree_node_lookup[node['nodeId']]
ax_node = ax_tree_lookup.get(node['backendNodeId'])
if ax_node:
enhanced_ax_node = self._build_enhanced_ax_node(ax_node)
else:
enhanced_ax_node = None
# To make attributes more readable
attributes: dict[str, str] | None = None
if 'attributes' in node and node['attributes']:
attributes = {}
for i in range(0, len(node['attributes']), 2):
attributes[node['attributes'][i]] = node['attributes'][i + 1]
shadow_root_type = None
if 'shadowRootType' in node and node['shadowRootType']:
try:
shadow_root_type = node['shadowRootType']
except ValueError:
pass
# Get snapshot data and calculate absolute position
snapshot_data = snapshot_lookup.get(node['backendNodeId'], None)
# DIAGNOSTIC: Log when interactive elements don't have snapshot data
if not snapshot_data and node['nodeName'].upper() in ['INPUT', 'BUTTON', 'SELECT', 'TEXTAREA', 'A']:
parent_has_shadow = False
parent_info = ''
if 'parentId' in node and node['parentId'] in enhanced_dom_tree_node_lookup:
parent = enhanced_dom_tree_node_lookup[node['parentId']]
if parent.shadow_root_type:
parent_has_shadow = True
parent_info = f'parent={parent.tag_name}(shadow={parent.shadow_root_type})'
attr_str = ''
if 'attributes' in node and node['attributes']:
attrs_dict = {node['attributes'][i]: node['attributes'][i + 1] for i in range(0, len(node['attributes']), 2)}
attr_str = f'name={attrs_dict.get("name", "N/A")} id={attrs_dict.get("id", "N/A")}'
self.logger.debug(
f'๐ NO SNAPSHOT DATA for <{node["nodeName"]}> backendNodeId={node["backendNodeId"]} '
f'{attr_str} {parent_info} (snapshot_lookup has {len(snapshot_lookup)} entries)'
)
absolute_position = None
if snapshot_data and snapshot_data.bounds:
absolute_position = DOMRect(
x=snapshot_data.bounds.x + total_frame_offset.x,
y=snapshot_data.bounds.y + total_frame_offset.y,
width=snapshot_data.bounds.width,
height=snapshot_data.bounds.height,
)
try:
session = await self.browser_session.get_or_create_cdp_session(target_id, focus=False)
session_id = session.session_id
except ValueError:
# Target may have detached during DOM construction
session_id = None
dom_tree_node = EnhancedDOMTreeNode(
node_id=node['nodeId'],
backend_node_id=node['backendNodeId'],
node_type=NodeType(node['nodeType']),
node_name=node['nodeName'],
node_value=node['nodeValue'],
attributes=attributes or {},
is_scrollable=node.get('isScrollable', None),
frame_id=node.get('frameId', None),
session_id=session_id,
target_id=target_id,
content_document=None,
shadow_root_type=shadow_root_type,
shadow_roots=None,
parent_node=None,
children_nodes=None,
ax_node=enhanced_ax_node,
snapshot_node=snapshot_data,
is_visible=None,
absolute_position=absolute_position,
)
enhanced_dom_tree_node_lookup[node['nodeId']] = dom_tree_node
if 'parentId' in node and node['parentId']:
dom_tree_node.parent_node = enhanced_dom_tree_node_lookup[
node['parentId']
] # parents should always be in the lookup
# Check if this is an HTML frame node and add it to the list
updated_html_frames = html_frames.copy()
if node['nodeType'] == NodeType.ELEMENT_NODE.value and node['nodeName'] == 'HTML' and node.get('frameId') is not None:
updated_html_frames.append(dom_tree_node)
# and adjust the total frame offset by scroll
if snapshot_data and snapshot_data.scrollRects:
total_frame_offset.x -= snapshot_data.scrollRects.x
total_frame_offset.y -= snapshot_data.scrollRects.y
# DEBUG: Log iframe scroll information
self.logger.debug(
f'๐ DEBUG: HTML frame scroll - scrollY={snapshot_data.scrollRects.y}, scrollX={snapshot_data.scrollRects.x}, frameId={node.get("frameId")}, nodeId={node["nodeId"]}'
)
# Calculate new iframe offset for content documents, accounting for iframe scroll
if (
(node['nodeName'].upper() == 'IFRAME' or node['nodeName'].upper() == 'FRAME')
and snapshot_data
and snapshot_data.bounds
):
if snapshot_data.bounds:
updated_html_frames.append(dom_tree_node)
total_frame_offset.x += snapshot_data.bounds.x
total_frame_offset.y += snapshot_data.bounds.y
if 'contentDocument' in node and node['contentDocument']:
dom_tree_node.content_document = await _construct_enhanced_node(
node['contentDocument'], updated_html_frames, total_frame_offset, all_frames
)
dom_tree_node.content_document.parent_node = dom_tree_node
# forcefully set the parent node to the content document node (helps traverse the tree)
if 'shadowRoots' in node and node['shadowRoots']:
dom_tree_node.shadow_roots = []
for shadow_root in node['shadowRoots']:
shadow_root_node = await _construct_enhanced_node(
shadow_root, updated_html_frames, total_frame_offset, all_frames
)
# forcefully set the parent node to the shadow root node (helps traverse the tree)
shadow_root_node.parent_node = dom_tree_node
dom_tree_node.shadow_roots.append(shadow_root_node)
if 'children' in node and node['children']:
dom_tree_node.children_nodes = []
# Build set of shadow root node IDs to filter them out from children
shadow_root_node_ids = set()
if 'shadowRoots' in node and node['shadowRoots']:
for shadow_root in node['shadowRoots']:
shadow_root_node_ids.add(shadow_root['nodeId'])
for child in node['children']:
# Skip shadow roots - they should only be in shadow_roots list
if child['nodeId'] in shadow_root_node_ids:
continue
dom_tree_node.children_nodes.append(
await _construct_enhanced_node(child, updated_html_frames, total_frame_offset, all_frames)
)
# Set visibility using the collected HTML frames
dom_tree_node.is_visible = self.is_element_visible_according_to_all_parents(dom_tree_node, updated_html_frames)
# DEBUG: Log visibility info for form elements in iframes
if dom_tree_node.tag_name and dom_tree_node.tag_name.upper() in ['INPUT', 'SELECT', 'TEXTAREA', 'LABEL']:
attrs = dom_tree_node.attributes or {}
elem_id = attrs.get('id', '')
elem_name = attrs.get('name', '')
if (
'city' in elem_id.lower()
or 'city' in elem_name.lower()
or 'state' in elem_id.lower()
or 'state' in elem_name.lower()
or 'zip' in elem_id.lower()
or 'zip' in elem_name.lower()
):
self.logger.debug(
f"๐ DEBUG: Form element {dom_tree_node.tag_name} id='{elem_id}' name='{elem_name}' - visible={dom_tree_node.is_visible}, bounds={dom_tree_node.snapshot_node.bounds if dom_tree_node.snapshot_node else 'NO_SNAPSHOT'}"
)
# handle cross origin iframe (just recursively call the main function with the proper target if it exists in iframes)
# only do this if the iframe is visible (otherwise it's not worth it)
if (
# TODO: hacky way to disable cross origin iframes for now
self.cross_origin_iframes and node['nodeName'].upper() == 'IFRAME' and node.get('contentDocument', None) is None
): # None meaning there is no content
# Check iframe depth to prevent infinite recursion
if iframe_depth >= self.max_iframe_depth:
self.logger.debug(
f'Skipping iframe at depth {iframe_depth} to prevent infinite recursion (max depth: {self.max_iframe_depth})'
)
else:
# Check if iframe is visible and large enough (>= 50px in both dimensions)
should_process_iframe = False
# First check if the iframe element itself is visible
if dom_tree_node.is_visible:
# Check iframe dimensions
if dom_tree_node.snapshot_node and dom_tree_node.snapshot_node.bounds:
bounds = dom_tree_node.snapshot_node.bounds
width = bounds.width
height = bounds.height
# Only process if iframe is at least 50px in both dimensions
if width >= 50 and height >= 50:
should_process_iframe = True
self.logger.debug(f'Processing cross-origin iframe: visible=True, width={width}, height={height}')
else:
self.logger.debug(
f'Skipping small cross-origin iframe: width={width}, height={height} (needs >= 50px)'
)
else:
self.logger.debug('Skipping cross-origin iframe: no bounds available')
else:
self.logger.debug('Skipping invisible cross-origin iframe')
if should_process_iframe:
# Lazy fetch all_frames only when actually needed (for cross-origin iframes)
if all_frames is None:
all_frames, _ = await self.browser_session.get_all_frames()
# Use pre-fetched all_frames to find the iframe's target (no redundant CDP call)
frame_id = node.get('frameId', None)
if frame_id:
frame_info = all_frames.get(frame_id)
iframe_document_target = None
if frame_info and frame_info.get('frameTargetId'):
iframe_target_id = frame_info['frameTargetId']
iframe_target = self.browser_session.session_manager.get_target(iframe_target_id)
if iframe_target:
iframe_document_target = {
'targetId': iframe_target.target_id,
'url': iframe_target.url,
'title': iframe_target.title,
'type': iframe_target.target_type,
}
else:
iframe_document_target = None
# if target actually exists in one of the frames, just recursively build the dom tree for it
if iframe_document_target:
self.logger.debug(
f'Getting content document for iframe {node.get("frameId", None)} at depth {iframe_depth + 1}'
)
content_document, _ = await self.get_dom_tree(
target_id=iframe_document_target['targetId'],
all_frames=all_frames,
# TODO: experiment with this values -> not sure whether the whole cross origin iframe should be ALWAYS included as soon as some part of it is visible or not.
# Current config: if the cross origin iframe is AT ALL visible, then just include everything inside of it!
# initial_html_frames=updated_html_frames,
initial_total_frame_offset=total_frame_offset,
iframe_depth=iframe_depth + 1,
)
dom_tree_node.content_document = content_document
dom_tree_node.content_document.parent_node = dom_tree_node
return dom_tree_node
# Build enhanced DOM tree recursively
# Note: all_frames stays None and will be lazily fetched inside _construct_enhanced_node
# only if/when a cross-origin iframe is encountered
start_construct = time.time()
enhanced_dom_tree_node = await _construct_enhanced_node(
dom_tree['root'], initial_html_frames, initial_total_frame_offset, all_frames
)
timing_info['construct_enhanced_tree_ms'] = (time.time() - start_construct) * 1000
# Calculate total time for get_dom_tree
total_get_dom_tree_ms = (time.time() - timing_start_total) * 1000
timing_info['get_dom_tree_total_ms'] = total_get_dom_tree_ms
# Calculate overhead in get_dom_tree (time not accounted for by sub-operations)
tracked_sub_operations_ms = (
timing_info.get('get_all_trees_total_ms', 0)
+ timing_info.get('build_ax_lookup_ms', 0)
+ timing_info.get('build_snapshot_lookup_ms', 0)
+ timing_info.get('construct_enhanced_tree_ms', 0)
)
get_dom_tree_overhead_ms = total_get_dom_tree_ms - tracked_sub_operations_ms
if get_dom_tree_overhead_ms > 0.1:
timing_info['get_dom_tree_overhead_ms'] = get_dom_tree_overhead_ms
return enhanced_dom_tree_node, timing_info
@observe_debug(ignore_input=True, ignore_output=True, name='get_serialized_dom_tree')
async def get_serialized_dom_tree(
self, previous_cached_state: SerializedDOMState | None = None
) -> tuple[SerializedDOMState, EnhancedDOMTreeNode, dict[str, float]]:
"""Get the serialized DOM tree representation for LLM consumption.
Returns:
Tuple of (serialized_dom_state, enhanced_dom_tree_root, timing_info)
"""
timing_info: dict[str, float] = {}
start_total = time.time()
# Use current target (None means use current)
assert self.browser_session.agent_focus_target_id is not None
session_id = self.browser_session.id
# Build DOM tree (includes CDP calls for snapshot, DOM, AX tree)
# Note: all_frames is fetched lazily inside get_dom_tree only if cross-origin iframes need it
enhanced_dom_tree, dom_tree_timing = await self.get_dom_tree(
target_id=self.browser_session.agent_focus_target_id,
all_frames=None, # Lazy - will fetch if needed
)
# Add sub-timings from DOM tree construction
timing_info.update(dom_tree_timing)
# Serialize DOM tree for LLM
start_serialize = time.time()
serialized_dom_state, serializer_timing = DOMTreeSerializer(
enhanced_dom_tree, previous_cached_state, paint_order_filtering=self.paint_order_filtering, session_id=session_id
).serialize_accessible_elements()
total_serialization_ms = (time.time() - start_serialize) * 1000
# Add serializer sub-timings (convert to ms)
for key, value in serializer_timing.items():
timing_info[f'{key}_ms'] = value * 1000
# Calculate untracked time in serialization
tracked_serialization_ms = sum(value * 1000 for value in serializer_timing.values())
serialization_overhead_ms = total_serialization_ms - tracked_serialization_ms
if serialization_overhead_ms > 0.1: # Only log if significant
timing_info['serialization_overhead_ms'] = serialization_overhead_ms
# Calculate total time for get_serialized_dom_tree
total_get_serialized_dom_tree_ms = (time.time() - start_total) * 1000
timing_info['get_serialized_dom_tree_total_ms'] = total_get_serialized_dom_tree_ms
# Calculate overhead in get_serialized_dom_tree (time not accounted for)
tracked_major_operations_ms = timing_info.get('get_dom_tree_total_ms', 0) + total_serialization_ms
get_serialized_overhead_ms = total_get_serialized_dom_tree_ms - tracked_major_operations_ms
if get_serialized_overhead_ms > 0.1:
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | true |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/dom/utils.py | browser_use/dom/utils.py | def cap_text_length(text: str, max_length: int) -> str:
"""Cap text length for display."""
if len(text) <= max_length:
return text
return text[:max_length] + '...'
def generate_css_selector_for_element(enhanced_node) -> str | None:
"""Generate a CSS selector using node properties from version 0.5.0 approach."""
import re
if not enhanced_node or not hasattr(enhanced_node, 'tag_name') or not enhanced_node.tag_name:
return None
# Get base selector from tag name (simplified since we don't have xpath in EnhancedDOMTreeNode)
tag_name = enhanced_node.tag_name.lower().strip()
if not tag_name or not re.match(r'^[a-zA-Z][a-zA-Z0-9-]*$', tag_name):
return None
css_selector = tag_name
# Add ID if available (most specific)
if enhanced_node.attributes and 'id' in enhanced_node.attributes:
element_id = enhanced_node.attributes['id']
if element_id and element_id.strip():
element_id = element_id.strip()
# Validate ID contains only valid characters for # selector
if re.match(r'^[a-zA-Z][a-zA-Z0-9_-]*$', element_id):
return f'#{element_id}'
else:
# For IDs with special characters ($, ., :, etc.), use attribute selector
# Escape quotes in the ID value
escaped_id = element_id.replace('"', '\\"')
return f'{tag_name}[id="{escaped_id}"]'
# Handle class attributes (from version 0.5.0 approach)
if enhanced_node.attributes and 'class' in enhanced_node.attributes and enhanced_node.attributes['class']:
# Define a regex pattern for valid class names in CSS
valid_class_name_pattern = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_-]*$')
# Iterate through the class attribute values
classes = enhanced_node.attributes['class'].split()
for class_name in classes:
# Skip empty class names
if not class_name.strip():
continue
# Check if the class name is valid
if valid_class_name_pattern.match(class_name):
# Append the valid class name to the CSS selector
css_selector += f'.{class_name}'
# Expanded set of safe attributes that are stable and useful for selection (from v0.5.0)
SAFE_ATTRIBUTES = {
# Data attributes (if they're stable in your application)
'id',
# Standard HTML attributes
'name',
'type',
'placeholder',
# Accessibility attributes
'aria-label',
'aria-labelledby',
'aria-describedby',
'role',
# Common form attributes
'for',
'autocomplete',
'required',
'readonly',
# Media attributes
'alt',
'title',
'src',
# Custom stable attributes (add any application-specific ones)
'href',
'target',
}
# Always include dynamic attributes (include_dynamic_attributes=True equivalent)
include_dynamic_attributes = True
if include_dynamic_attributes:
dynamic_attributes = {
'data-id',
'data-qa',
'data-cy',
'data-testid',
}
SAFE_ATTRIBUTES.update(dynamic_attributes)
# Handle other attributes (from version 0.5.0 approach)
if enhanced_node.attributes:
for attribute, value in enhanced_node.attributes.items():
if attribute == 'class':
continue
# Skip invalid attribute names
if not attribute.strip():
continue
if attribute not in SAFE_ATTRIBUTES:
continue
# Escape special characters in attribute names
safe_attribute = attribute.replace(':', r'\:')
# Handle different value cases
if value == '':
css_selector += f'[{safe_attribute}]'
elif any(char in value for char in '"\'<>`\n\r\t'):
# Use contains for values with special characters
# For newline-containing text, only use the part before the newline
if '\n' in value:
value = value.split('\n')[0]
# Regex-substitute *any* whitespace with a single space, then strip.
collapsed_value = re.sub(r'\s+', ' ', value).strip()
# Escape embedded double-quotes.
safe_value = collapsed_value.replace('"', '\\"')
css_selector += f'[{safe_attribute}*="{safe_value}"]'
else:
css_selector += f'[{safe_attribute}="{value}"]'
# Final validation: ensure the selector is safe and doesn't contain problematic characters
# Note: quotes are allowed in attribute selectors like [name="value"]
if css_selector and not any(char in css_selector for char in ['\n', '\r', '\t']):
return css_selector
# If we get here, the selector was problematic, return just the tag name as fallback
return tag_name
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/dom/markdown_extractor.py | browser_use/dom/markdown_extractor.py | """
Shared markdown extraction utilities for browser content processing.
This module provides a unified interface for extracting clean markdown from browser content,
used by both the tools service and page actor.
"""
import re
from typing import TYPE_CHECKING, Any
from browser_use.dom.serializer.html_serializer import HTMLSerializer
from browser_use.dom.service import DomService
if TYPE_CHECKING:
from browser_use.browser.session import BrowserSession
from browser_use.browser.watchdogs.dom_watchdog import DOMWatchdog
async def extract_clean_markdown(
browser_session: 'BrowserSession | None' = None,
dom_service: DomService | None = None,
target_id: str | None = None,
extract_links: bool = False,
) -> tuple[str, dict[str, Any]]:
"""Extract clean markdown from browser content using enhanced DOM tree.
This unified function can extract markdown using either a browser session (for tools service)
or a DOM service with target ID (for page actor).
Args:
browser_session: Browser session to extract content from (tools service path)
dom_service: DOM service instance (page actor path)
target_id: Target ID for the page (required when using dom_service)
extract_links: Whether to preserve links in markdown
Returns:
tuple: (clean_markdown_content, content_statistics)
Raises:
ValueError: If neither browser_session nor (dom_service + target_id) are provided
"""
# Validate input parameters
if browser_session is not None:
if dom_service is not None or target_id is not None:
raise ValueError('Cannot specify both browser_session and dom_service/target_id')
# Browser session path (tools service)
enhanced_dom_tree = await _get_enhanced_dom_tree_from_browser_session(browser_session)
current_url = await browser_session.get_current_page_url()
method = 'enhanced_dom_tree'
elif dom_service is not None and target_id is not None:
# DOM service path (page actor)
# Lazy fetch all_frames inside get_dom_tree if needed (for cross-origin iframes)
enhanced_dom_tree, _ = await dom_service.get_dom_tree(target_id=target_id, all_frames=None)
current_url = None # Not available via DOM service
method = 'dom_service'
else:
raise ValueError('Must provide either browser_session or both dom_service and target_id')
# Use the HTML serializer with the enhanced DOM tree
html_serializer = HTMLSerializer(extract_links=extract_links)
page_html = html_serializer.serialize(enhanced_dom_tree)
original_html_length = len(page_html)
# Use markdownify for clean markdown conversion
from markdownify import markdownify as md
content = md(
page_html,
heading_style='ATX', # Use # style headings
strip=['script', 'style'], # Remove these tags
bullets='-', # Use - for unordered lists
code_language='', # Don't add language to code blocks
escape_asterisks=False, # Don't escape asterisks (cleaner output)
escape_underscores=False, # Don't escape underscores (cleaner output)
escape_misc=False, # Don't escape other characters (cleaner output)
autolinks=False, # Don't convert URLs to <> format
default_title=False, # Don't add default title attributes
keep_inline_images_in=[], # Don't keep inline images in any tags (we already filter base64 in HTML)
)
initial_markdown_length = len(content)
# Minimal cleanup - markdownify already does most of the work
content = re.sub(r'%[0-9A-Fa-f]{2}', '', content) # Remove any remaining URL encoding
# Apply light preprocessing to clean up excessive whitespace
content, chars_filtered = _preprocess_markdown_content(content)
final_filtered_length = len(content)
# Content statistics
stats = {
'method': method,
'original_html_chars': original_html_length,
'initial_markdown_chars': initial_markdown_length,
'filtered_chars_removed': chars_filtered,
'final_filtered_chars': final_filtered_length,
}
# Add URL to stats if available
if current_url:
stats['url'] = current_url
return content, stats
async def _get_enhanced_dom_tree_from_browser_session(browser_session: 'BrowserSession'):
"""Get enhanced DOM tree from browser session via DOMWatchdog."""
# Get the enhanced DOM tree from DOMWatchdog
# This captures the current state of the page including dynamic content, shadow roots, etc.
dom_watchdog: DOMWatchdog | None = browser_session._dom_watchdog
assert dom_watchdog is not None, 'DOMWatchdog not available'
# Use cached enhanced DOM tree if available, otherwise build it
if dom_watchdog.enhanced_dom_tree is not None:
return dom_watchdog.enhanced_dom_tree
# Build the enhanced DOM tree if not cached
await dom_watchdog._build_dom_tree_without_highlights()
enhanced_dom_tree = dom_watchdog.enhanced_dom_tree
assert enhanced_dom_tree is not None, 'Enhanced DOM tree not available'
return enhanced_dom_tree
# Legacy aliases removed - all code now uses the unified extract_clean_markdown function
def _preprocess_markdown_content(content: str, max_newlines: int = 3) -> tuple[str, int]:
"""
Light preprocessing of markdown output - minimal cleanup with JSON blob removal.
Args:
content: Markdown content to lightly filter
max_newlines: Maximum consecutive newlines to allow
Returns:
tuple: (filtered_content, chars_filtered)
"""
original_length = len(content)
# Remove JSON blobs (common in SPAs like LinkedIn, Facebook, etc.)
# These are often embedded as `{"key":"value",...}` and can be massive
# Match JSON objects/arrays that are at least 100 chars long
# This catches SPA state/config data without removing small inline JSON
content = re.sub(r'`\{["\w].*?\}`', '', content, flags=re.DOTALL) # Remove JSON in code blocks
content = re.sub(r'\{"\$type":[^}]{100,}\}', '', content) # Remove JSON with $type fields (common pattern)
content = re.sub(r'\{"[^"]{5,}":\{[^}]{100,}\}', '', content) # Remove nested JSON objects
# Compress consecutive newlines (4+ newlines become max_newlines)
content = re.sub(r'\n{4,}', '\n' * max_newlines, content)
# Remove lines that are only whitespace
lines = content.split('\n')
filtered_lines = []
for line in lines:
stripped = line.strip()
# Keep all non-empty lines
if stripped:
# Skip lines that look like JSON (start with { or [ and are very long)
if (stripped.startswith('{') or stripped.startswith('[')) and len(stripped) > 100:
continue
filtered_lines.append(line)
content = '\n'.join(filtered_lines)
content = content.strip()
chars_filtered = original_length - len(content)
return content, chars_filtered
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/dom/serializer/paint_order.py | browser_use/dom/serializer/paint_order.py | from collections import defaultdict
from dataclasses import dataclass
from browser_use.dom.views import SimplifiedNode
"""
Helper class for maintaining a union of rectangles (used for order of elements calculation)
"""
@dataclass(frozen=True, slots=True)
class Rect:
"""Closed axis-aligned rectangle with (x1,y1) bottom-left, (x2,y2) top-right."""
x1: float
y1: float
x2: float
y2: float
def __post_init__(self):
if not (self.x1 <= self.x2 and self.y1 <= self.y2):
return False
# --- fast relations ----------------------------------------------------
def area(self) -> float:
return (self.x2 - self.x1) * (self.y2 - self.y1)
def intersects(self, other: 'Rect') -> bool:
return not (self.x2 <= other.x1 or other.x2 <= self.x1 or self.y2 <= other.y1 or other.y2 <= self.y1)
def contains(self, other: 'Rect') -> bool:
return self.x1 <= other.x1 and self.y1 <= other.y1 and self.x2 >= other.x2 and self.y2 >= other.y2
class RectUnionPure:
"""
Maintains a *disjoint* set of rectangles.
No external dependencies - fine for a few thousand rectangles.
"""
__slots__ = ('_rects',)
def __init__(self):
self._rects: list[Rect] = []
# -----------------------------------------------------------------
def _split_diff(self, a: Rect, b: Rect) -> list[Rect]:
r"""
Return list of up to 4 rectangles = a \ b.
Assumes a intersects b.
"""
parts = []
# Bottom slice
if a.y1 < b.y1:
parts.append(Rect(a.x1, a.y1, a.x2, b.y1))
# Top slice
if b.y2 < a.y2:
parts.append(Rect(a.x1, b.y2, a.x2, a.y2))
# Middle (vertical) strip: y overlap is [max(a.y1,b.y1), min(a.y2,b.y2)]
y_lo = max(a.y1, b.y1)
y_hi = min(a.y2, b.y2)
# Left slice
if a.x1 < b.x1:
parts.append(Rect(a.x1, y_lo, b.x1, y_hi))
# Right slice
if b.x2 < a.x2:
parts.append(Rect(b.x2, y_lo, a.x2, y_hi))
return parts
# -----------------------------------------------------------------
def contains(self, r: Rect) -> bool:
"""
True iff r is fully covered by the current union.
"""
if not self._rects:
return False
stack = [r]
for s in self._rects:
new_stack = []
for piece in stack:
if s.contains(piece):
# piece completely gone
continue
if piece.intersects(s):
new_stack.extend(self._split_diff(piece, s))
else:
new_stack.append(piece)
if not new_stack: # everything eaten โ covered
return True
stack = new_stack
return False # something survived
# -----------------------------------------------------------------
def add(self, r: Rect) -> bool:
"""
Insert r unless it is already covered.
Returns True if the union grew.
"""
if self.contains(r):
return False
pending = [r]
i = 0
while i < len(self._rects):
s = self._rects[i]
new_pending = []
changed = False
for piece in pending:
if piece.intersects(s):
new_pending.extend(self._split_diff(piece, s))
changed = True
else:
new_pending.append(piece)
pending = new_pending
if changed:
# s unchanged; proceed with next existing rectangle
i += 1
else:
i += 1
# Any leftโover pieces are new, nonโoverlapping areas
self._rects.extend(pending)
return True
class PaintOrderRemover:
"""
Calculates which elements should be removed based on the paint order parameter.
"""
def __init__(self, root: SimplifiedNode):
self.root = root
def calculate_paint_order(self) -> None:
all_simplified_nodes_with_paint_order: list[SimplifiedNode] = []
def collect_paint_order(node: SimplifiedNode) -> None:
if (
node.original_node.snapshot_node
and node.original_node.snapshot_node.paint_order is not None
and node.original_node.snapshot_node.bounds is not None
):
all_simplified_nodes_with_paint_order.append(node)
for child in node.children:
collect_paint_order(child)
collect_paint_order(self.root)
grouped_by_paint_order: defaultdict[int, list[SimplifiedNode]] = defaultdict(list)
for node in all_simplified_nodes_with_paint_order:
if node.original_node.snapshot_node and node.original_node.snapshot_node.paint_order is not None:
grouped_by_paint_order[node.original_node.snapshot_node.paint_order].append(node)
rect_union = RectUnionPure()
for paint_order, nodes in sorted(grouped_by_paint_order.items(), key=lambda x: -x[0]):
rects_to_add = []
for node in nodes:
if not node.original_node.snapshot_node or not node.original_node.snapshot_node.bounds:
continue # shouldn't happen by how we filter them out in the first place
rect = Rect(
x1=node.original_node.snapshot_node.bounds.x,
y1=node.original_node.snapshot_node.bounds.y,
x2=node.original_node.snapshot_node.bounds.x + node.original_node.snapshot_node.bounds.width,
y2=node.original_node.snapshot_node.bounds.y + node.original_node.snapshot_node.bounds.height,
)
if rect_union.contains(rect):
node.ignored_by_paint_order = True
# don't add to the nodes if opacity is less then 0.95 or background-color is transparent
if (
node.original_node.snapshot_node.computed_styles
and node.original_node.snapshot_node.computed_styles.get('background-color', 'rgba(0, 0, 0, 0)')
== 'rgba(0, 0, 0, 0)'
) or (
node.original_node.snapshot_node.computed_styles
and float(node.original_node.snapshot_node.computed_styles.get('opacity', '1'))
< 0.8 # this is highly vibes based number
):
continue
rects_to_add.append(rect)
for rect in rects_to_add:
rect_union.add(rect)
return None
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/dom/serializer/serializer.py | browser_use/dom/serializer/serializer.py | # @file purpose: Serializes enhanced DOM trees to string format for LLM consumption
from typing import Any
from browser_use.dom.serializer.clickable_elements import ClickableElementDetector
from browser_use.dom.serializer.paint_order import PaintOrderRemover
from browser_use.dom.utils import cap_text_length
from browser_use.dom.views import (
DOMRect,
DOMSelectorMap,
EnhancedDOMTreeNode,
NodeType,
PropagatingBounds,
SerializedDOMState,
SimplifiedNode,
)
DISABLED_ELEMENTS = {'style', 'script', 'head', 'meta', 'link', 'title'}
# SVG child elements to skip (decorative only, no interaction value)
SVG_ELEMENTS = {
'path',
'rect',
'g',
'circle',
'ellipse',
'line',
'polyline',
'polygon',
'use',
'defs',
'clipPath',
'mask',
'pattern',
'image',
'text',
'tspan',
}
class DOMTreeSerializer:
"""Serializes enhanced DOM trees to string format."""
# Configuration - elements that propagate bounds to their children
PROPAGATING_ELEMENTS = [
{'tag': 'a', 'role': None}, # Any <a> tag
{'tag': 'button', 'role': None}, # Any <button> tag
{'tag': 'div', 'role': 'button'}, # <div role="button">
{'tag': 'div', 'role': 'combobox'}, # <div role="combobox"> - dropdowns/selects
{'tag': 'span', 'role': 'button'}, # <span role="button">
{'tag': 'span', 'role': 'combobox'}, # <span role="combobox">
{'tag': 'input', 'role': 'combobox'}, # <input role="combobox"> - autocomplete inputs
{'tag': 'input', 'role': 'combobox'}, # <input type="text"> - text inputs with suggestions
# {'tag': 'div', 'role': 'link'}, # <div role="link">
# {'tag': 'span', 'role': 'link'}, # <span role="link">
]
DEFAULT_CONTAINMENT_THRESHOLD = 0.99 # 99% containment by default
def __init__(
self,
root_node: EnhancedDOMTreeNode,
previous_cached_state: SerializedDOMState | None = None,
enable_bbox_filtering: bool = True,
containment_threshold: float | None = None,
paint_order_filtering: bool = True,
session_id: str | None = None,
):
self.root_node = root_node
self._interactive_counter = 1
self._selector_map: DOMSelectorMap = {}
self._previous_cached_selector_map = previous_cached_state.selector_map if previous_cached_state else None
# Add timing tracking
self.timing_info: dict[str, float] = {}
# Cache for clickable element detection to avoid redundant calls
self._clickable_cache: dict[int, bool] = {}
# Bounding box filtering configuration
self.enable_bbox_filtering = enable_bbox_filtering
self.containment_threshold = containment_threshold or self.DEFAULT_CONTAINMENT_THRESHOLD
# Paint order filtering configuration
self.paint_order_filtering = paint_order_filtering
# Session ID for session-specific exclude attribute
self.session_id = session_id
def _safe_parse_number(self, value_str: str, default: float) -> float:
"""Parse string to float, handling negatives and decimals."""
try:
return float(value_str)
except (ValueError, TypeError):
return default
def _safe_parse_optional_number(self, value_str: str | None) -> float | None:
"""Parse string to float, returning None for invalid values."""
if not value_str:
return None
try:
return float(value_str)
except (ValueError, TypeError):
return None
def serialize_accessible_elements(self) -> tuple[SerializedDOMState, dict[str, float]]:
import time
start_total = time.time()
# Reset state
self._interactive_counter = 1
self._selector_map = {}
self._semantic_groups = []
self._clickable_cache = {} # Clear cache for new serialization
# Step 1: Create simplified tree (includes clickable element detection)
start_step1 = time.time()
simplified_tree = self._create_simplified_tree(self.root_node)
end_step1 = time.time()
self.timing_info['create_simplified_tree'] = end_step1 - start_step1
# Step 2: Remove elements based on paint order
start_step3 = time.time()
if self.paint_order_filtering and simplified_tree:
PaintOrderRemover(simplified_tree).calculate_paint_order()
end_step3 = time.time()
self.timing_info['calculate_paint_order'] = end_step3 - start_step3
# Step 3: Optimize tree (remove unnecessary parents)
start_step2 = time.time()
optimized_tree = self._optimize_tree(simplified_tree)
end_step2 = time.time()
self.timing_info['optimize_tree'] = end_step2 - start_step2
# Step 3: Apply bounding box filtering (NEW)
if self.enable_bbox_filtering and optimized_tree:
start_step3 = time.time()
filtered_tree = self._apply_bounding_box_filtering(optimized_tree)
end_step3 = time.time()
self.timing_info['bbox_filtering'] = end_step3 - start_step3
else:
filtered_tree = optimized_tree
# Step 4: Assign interactive indices to clickable elements
start_step4 = time.time()
self._assign_interactive_indices_and_mark_new_nodes(filtered_tree)
end_step4 = time.time()
self.timing_info['assign_interactive_indices'] = end_step4 - start_step4
end_total = time.time()
self.timing_info['serialize_accessible_elements_total'] = end_total - start_total
return SerializedDOMState(_root=filtered_tree, selector_map=self._selector_map), self.timing_info
def _add_compound_components(self, simplified: SimplifiedNode, node: EnhancedDOMTreeNode) -> None:
"""Enhance compound controls with information from their child components."""
# Only process elements that might have compound components
if node.tag_name not in ['input', 'select', 'details', 'audio', 'video']:
return
# For input elements, check for compound input types
if node.tag_name == 'input':
if not node.attributes or node.attributes.get('type') not in [
'date',
'time',
'datetime-local',
'month',
'week',
'range',
'number',
'color',
'file',
]:
return
# For other elements, check if they have AX child indicators
elif not node.ax_node or not node.ax_node.child_ids:
return
# Add compound component information based on element type
element_type = node.tag_name
input_type = node.attributes.get('type', '') if node.attributes else ''
if element_type == 'input':
# NOTE: For date/time inputs, we DON'T add compound components because:
# 1. They confuse the model (seeing "Day, Month, Year" suggests DD.MM.YYYY format)
# 2. HTML5 date/time inputs ALWAYS require ISO format (YYYY-MM-DD, HH:MM, etc.)
# 3. The placeholder attribute clearly shows the required format
# 4. These inputs use direct value assignment, not sequential typing
if input_type in ['date', 'time', 'datetime-local', 'month', 'week']:
# Skip compound components for date/time inputs - format is shown in placeholder
pass
elif input_type == 'range':
# Range slider with value indicator
min_val = node.attributes.get('min', '0') if node.attributes else '0'
max_val = node.attributes.get('max', '100') if node.attributes else '100'
node._compound_children.append(
{
'role': 'slider',
'name': 'Value',
'valuemin': self._safe_parse_number(min_val, 0.0),
'valuemax': self._safe_parse_number(max_val, 100.0),
'valuenow': None,
}
)
simplified.is_compound_component = True
elif input_type == 'number':
# Number input with increment/decrement buttons
min_val = node.attributes.get('min') if node.attributes else None
max_val = node.attributes.get('max') if node.attributes else None
node._compound_children.extend(
[
{'role': 'button', 'name': 'Increment', 'valuemin': None, 'valuemax': None, 'valuenow': None},
{'role': 'button', 'name': 'Decrement', 'valuemin': None, 'valuemax': None, 'valuenow': None},
{
'role': 'textbox',
'name': 'Value',
'valuemin': self._safe_parse_optional_number(min_val),
'valuemax': self._safe_parse_optional_number(max_val),
'valuenow': None,
},
]
)
simplified.is_compound_component = True
elif input_type == 'color':
# Color picker with components
node._compound_children.extend(
[
{'role': 'textbox', 'name': 'Hex Value', 'valuemin': None, 'valuemax': None, 'valuenow': None},
{'role': 'button', 'name': 'Color Picker', 'valuemin': None, 'valuemax': None, 'valuenow': None},
]
)
simplified.is_compound_component = True
elif input_type == 'file':
# File input with browse button
multiple = 'multiple' in node.attributes if node.attributes else False
# Extract current file selection state from AX tree
current_value = 'None' # Default to explicit "None" string for clarity
if node.ax_node and node.ax_node.properties:
for prop in node.ax_node.properties:
# Try valuetext first (human-readable display like "file.pdf")
if prop.name == 'valuetext' and prop.value:
value_str = str(prop.value).strip()
if value_str and value_str.lower() not in ['', 'no file chosen', 'no file selected']:
current_value = value_str
break
# Also try 'value' property (may include full path)
elif prop.name == 'value' and prop.value:
value_str = str(prop.value).strip()
if value_str:
# For file inputs, value might be a full path - extract just filename
if '\\' in value_str:
current_value = value_str.split('\\')[-1]
elif '/' in value_str:
current_value = value_str.split('/')[-1]
else:
current_value = value_str
break
node._compound_children.extend(
[
{'role': 'button', 'name': 'Browse Files', 'valuemin': None, 'valuemax': None, 'valuenow': None},
{
'role': 'textbox',
'name': f'{"Files" if multiple else "File"} Selected',
'valuemin': None,
'valuemax': None,
'valuenow': current_value, # Always shows state: filename or "None"
},
]
)
simplified.is_compound_component = True
elif element_type == 'select':
# Select dropdown with option list and detailed option information
base_components = [
{'role': 'button', 'name': 'Dropdown Toggle', 'valuemin': None, 'valuemax': None, 'valuenow': None}
]
# Extract option information from child nodes
options_info = self._extract_select_options(node)
if options_info:
options_component = {
'role': 'listbox',
'name': 'Options',
'valuemin': None,
'valuemax': None,
'valuenow': None,
'options_count': options_info['count'],
'first_options': options_info['first_options'],
}
if options_info['format_hint']:
options_component['format_hint'] = options_info['format_hint']
base_components.append(options_component)
else:
base_components.append(
{'role': 'listbox', 'name': 'Options', 'valuemin': None, 'valuemax': None, 'valuenow': None}
)
node._compound_children.extend(base_components)
simplified.is_compound_component = True
elif element_type == 'details':
# Details/summary disclosure widget
node._compound_children.extend(
[
{'role': 'button', 'name': 'Toggle Disclosure', 'valuemin': None, 'valuemax': None, 'valuenow': None},
{'role': 'region', 'name': 'Content Area', 'valuemin': None, 'valuemax': None, 'valuenow': None},
]
)
simplified.is_compound_component = True
elif element_type == 'audio':
# Audio player controls
node._compound_children.extend(
[
{'role': 'button', 'name': 'Play/Pause', 'valuemin': None, 'valuemax': None, 'valuenow': None},
{'role': 'slider', 'name': 'Progress', 'valuemin': 0, 'valuemax': 100, 'valuenow': None},
{'role': 'button', 'name': 'Mute', 'valuemin': None, 'valuemax': None, 'valuenow': None},
{'role': 'slider', 'name': 'Volume', 'valuemin': 0, 'valuemax': 100, 'valuenow': None},
]
)
simplified.is_compound_component = True
elif element_type == 'video':
# Video player controls
node._compound_children.extend(
[
{'role': 'button', 'name': 'Play/Pause', 'valuemin': None, 'valuemax': None, 'valuenow': None},
{'role': 'slider', 'name': 'Progress', 'valuemin': 0, 'valuemax': 100, 'valuenow': None},
{'role': 'button', 'name': 'Mute', 'valuemin': None, 'valuemax': None, 'valuenow': None},
{'role': 'slider', 'name': 'Volume', 'valuemin': 0, 'valuemax': 100, 'valuenow': None},
{'role': 'button', 'name': 'Fullscreen', 'valuemin': None, 'valuemax': None, 'valuenow': None},
]
)
simplified.is_compound_component = True
def _extract_select_options(self, select_node: EnhancedDOMTreeNode) -> dict[str, Any] | None:
"""Extract option information from a select element."""
if not select_node.children:
return None
options = []
option_values = []
def extract_options_recursive(node: EnhancedDOMTreeNode) -> None:
"""Recursively extract option elements, including from optgroups."""
if node.tag_name.lower() == 'option':
# Extract option text and value
option_text = ''
option_value = ''
# Get value attribute if present
if node.attributes and 'value' in node.attributes:
option_value = str(node.attributes['value']).strip()
# Get text content from direct child text nodes only to avoid duplication
def get_direct_text_content(n: EnhancedDOMTreeNode) -> str:
text = ''
for child in n.children:
if child.node_type == NodeType.TEXT_NODE and child.node_value:
text += child.node_value.strip() + ' '
return text.strip()
option_text = get_direct_text_content(node)
# Use text as value if no explicit value
if not option_value and option_text:
option_value = option_text
if option_text or option_value:
options.append({'text': option_text, 'value': option_value})
option_values.append(option_value)
elif node.tag_name.lower() == 'optgroup':
# Process optgroup children
for child in node.children:
extract_options_recursive(child)
else:
# Process other children that might contain options
for child in node.children:
extract_options_recursive(child)
# Extract all options from select children
for child in select_node.children:
extract_options_recursive(child)
if not options:
return None
# Prepare first 4 options for display
first_options = []
for option in options[:4]:
# Always use text if available, otherwise use value
display_text = option['text'] if option['text'] else option['value']
if display_text:
# Limit individual option text to avoid overly long attributes
text = display_text[:30] + ('...' if len(display_text) > 30 else '')
first_options.append(text)
# Add ellipsis indicator if there are more options than shown
if len(options) > 4:
first_options.append(f'... {len(options) - 4} more options...')
# Try to infer format hint from option values
format_hint = None
if len(option_values) >= 2:
# Check for common patterns
if all(val.isdigit() for val in option_values[:5] if val):
format_hint = 'numeric'
elif all(len(val) == 2 and val.isupper() for val in option_values[:5] if val):
format_hint = 'country/state codes'
elif all('/' in val or '-' in val for val in option_values[:5] if val):
format_hint = 'date/path format'
elif any('@' in val for val in option_values[:5] if val):
format_hint = 'email addresses'
return {'count': len(options), 'first_options': first_options, 'format_hint': format_hint}
def _is_interactive_cached(self, node: EnhancedDOMTreeNode) -> bool:
"""Cached version of clickable element detection to avoid redundant calls."""
if node.node_id not in self._clickable_cache:
import time
start_time = time.time()
result = ClickableElementDetector.is_interactive(node)
end_time = time.time()
if 'clickable_detection_time' not in self.timing_info:
self.timing_info['clickable_detection_time'] = 0
self.timing_info['clickable_detection_time'] += end_time - start_time
self._clickable_cache[node.node_id] = result
return self._clickable_cache[node.node_id]
def _create_simplified_tree(self, node: EnhancedDOMTreeNode, depth: int = 0) -> SimplifiedNode | None:
"""Step 1: Create a simplified tree with enhanced element detection."""
if node.node_type == NodeType.DOCUMENT_NODE:
# for all cldren including shadow roots
for child in node.children_and_shadow_roots:
simplified_child = self._create_simplified_tree(child, depth + 1)
if simplified_child:
return simplified_child
return None
if node.node_type == NodeType.DOCUMENT_FRAGMENT_NODE:
# ENHANCED shadow DOM processing - always include shadow content
simplified = SimplifiedNode(original_node=node, children=[])
for child in node.children_and_shadow_roots:
simplified_child = self._create_simplified_tree(child, depth + 1)
if simplified_child:
simplified.children.append(simplified_child)
# Always return shadow DOM fragments, even if children seem empty
# Shadow DOM often contains the actual interactive content in SPAs
return simplified if simplified.children else SimplifiedNode(original_node=node, children=[])
elif node.node_type == NodeType.ELEMENT_NODE:
# Skip non-content elements
if node.node_name.lower() in DISABLED_ELEMENTS:
return None
# Skip SVG child elements entirely (path, rect, g, circle, etc.)
if node.node_name.lower() in SVG_ELEMENTS:
return None
attributes = node.attributes or {}
# Check for session-specific exclude attribute first, then fall back to legacy attribute
exclude_attr = None
attr_type = None
if self.session_id:
session_specific_attr = f'data-browser-use-exclude-{self.session_id}'
exclude_attr = attributes.get(session_specific_attr)
if exclude_attr:
attr_type = 'session-specific'
# Fall back to legacy attribute if session-specific not found
if not exclude_attr:
exclude_attr = attributes.get('data-browser-use-exclude')
if isinstance(exclude_attr, str) and exclude_attr.lower() == 'true':
return None
if node.node_name == 'IFRAME' or node.node_name == 'FRAME':
if node.content_document:
simplified = SimplifiedNode(original_node=node, children=[])
for child in node.content_document.children_nodes or []:
simplified_child = self._create_simplified_tree(child, depth + 1)
if simplified_child is not None:
simplified.children.append(simplified_child)
return simplified
is_visible = node.is_visible
is_scrollable = node.is_actually_scrollable
has_shadow_content = bool(node.children_and_shadow_roots)
# ENHANCED SHADOW DOM DETECTION: Include shadow hosts even if not visible
is_shadow_host = any(child.node_type == NodeType.DOCUMENT_FRAGMENT_NODE for child in node.children_and_shadow_roots)
# Override visibility for elements with validation attributes
if not is_visible and node.attributes:
has_validation_attrs = any(attr.startswith(('aria-', 'pseudo')) for attr in node.attributes.keys())
if has_validation_attrs:
is_visible = True # Force visibility for validation elements
# EXCEPTION: File inputs are often hidden with opacity:0 but are still functional
# Bootstrap and other frameworks use this pattern with custom-styled file pickers
is_file_input = (
node.tag_name and node.tag_name.lower() == 'input' and node.attributes and node.attributes.get('type') == 'file'
)
if not is_visible and is_file_input:
is_visible = True # Force visibility for file inputs
# Include if visible, scrollable, has children, or is shadow host
if is_visible or is_scrollable or has_shadow_content or is_shadow_host:
simplified = SimplifiedNode(original_node=node, children=[], is_shadow_host=is_shadow_host)
# Process ALL children including shadow roots with enhanced logging
for child in node.children_and_shadow_roots:
simplified_child = self._create_simplified_tree(child, depth + 1)
if simplified_child:
simplified.children.append(simplified_child)
# COMPOUND CONTROL PROCESSING: Add virtual components for compound controls
self._add_compound_components(simplified, node)
# SHADOW DOM SPECIAL CASE: Always include shadow hosts even if not visible
# Many SPA frameworks (React, Vue) render content in shadow DOM
if is_shadow_host and simplified.children:
return simplified
# Return if meaningful or has meaningful children
if is_visible or is_scrollable or simplified.children:
return simplified
elif node.node_type == NodeType.TEXT_NODE:
# Include meaningful text nodes
is_visible = node.snapshot_node and node.is_visible
if is_visible and node.node_value and node.node_value.strip() and len(node.node_value.strip()) > 1:
return SimplifiedNode(original_node=node, children=[])
return None
def _optimize_tree(self, node: SimplifiedNode | None) -> SimplifiedNode | None:
"""Step 2: Optimize tree structure."""
if not node:
return None
# Process children
optimized_children = []
for child in node.children:
optimized_child = self._optimize_tree(child)
if optimized_child:
optimized_children.append(optimized_child)
node.children = optimized_children
# Keep meaningful nodes
is_visible = node.original_node.snapshot_node and node.original_node.is_visible
# EXCEPTION: File inputs are often hidden with opacity:0 but are still functional
is_file_input = (
node.original_node.tag_name
and node.original_node.tag_name.lower() == 'input'
and node.original_node.attributes
and node.original_node.attributes.get('type') == 'file'
)
if (
is_visible # Keep all visible nodes
or node.original_node.is_actually_scrollable
or node.original_node.node_type == NodeType.TEXT_NODE
or node.children
or is_file_input # Keep file inputs even if not visible
):
return node
return None
def _collect_interactive_elements(self, node: SimplifiedNode, elements: list[SimplifiedNode]) -> None:
"""Recursively collect interactive elements that are also visible."""
is_interactive = self._is_interactive_cached(node.original_node)
is_visible = node.original_node.snapshot_node and node.original_node.is_visible
# Only collect elements that are both interactive AND visible
if is_interactive and is_visible:
elements.append(node)
for child in node.children:
self._collect_interactive_elements(child, elements)
def _has_interactive_descendants(self, node: SimplifiedNode) -> bool:
"""Check if a node has any interactive descendants (not including the node itself)."""
# Check children for interactivity
for child in node.children:
# Check if child itself is interactive
if self._is_interactive_cached(child.original_node):
return True
# Recursively check child's descendants
if self._has_interactive_descendants(child):
return True
return False
def _is_inside_shadow_dom(self, node: SimplifiedNode) -> bool:
"""Check if a node is inside a shadow DOM by walking up the parent chain.
Shadow DOM elements are descendants of a #document-fragment node (shadow root).
The shadow root node has node_type == DOCUMENT_FRAGMENT_NODE and shadow_root_type set.
"""
current = node.original_node.parent_node
while current is not None:
# Shadow roots are DOCUMENT_FRAGMENT nodes with shadow_root_type
if current.node_type == NodeType.DOCUMENT_FRAGMENT_NODE and current.shadow_root_type is not None:
return True
current = current.parent_node
return False
def _assign_interactive_indices_and_mark_new_nodes(self, node: SimplifiedNode | None) -> None:
"""Assign interactive indices to clickable elements that are also visible."""
if not node:
return
# Skip assigning index to excluded nodes, or ignored by paint order
if not node.excluded_by_parent and not node.ignored_by_paint_order:
# Regular interactive element assignment (including enhanced compound controls)
is_interactive_assign = self._is_interactive_cached(node.original_node)
is_visible = node.original_node.snapshot_node and node.original_node.is_visible
is_scrollable = node.original_node.is_actually_scrollable
# DIAGNOSTIC: Log when interactive elements don't have snapshot_node
if is_interactive_assign and not node.original_node.snapshot_node:
import logging
logger = logging.getLogger('browser_use.dom.serializer')
attrs = node.original_node.attributes or {}
attr_str = f'name={attrs.get("name", "")} id={attrs.get("id", "")} type={attrs.get("type", "")}'
in_shadow = self._is_inside_shadow_dom(node)
if (
in_shadow
and node.original_node.tag_name
and node.original_node.tag_name.lower() in ['input', 'button', 'select', 'textarea', 'a']
):
logger.debug(
f'๐ INCLUDING shadow DOM <{node.original_node.tag_name}> (no snapshot_node but in shadow DOM): '
f'backendNodeId={node.original_node.backend_node_id} {attr_str}'
)
else:
logger.debug(
f'๐ SKIPPING interactive <{node.original_node.tag_name}> (no snapshot_node, not in shadow DOM): '
f'backendNodeId={node.original_node.backend_node_id} {attr_str}'
)
# EXCEPTION: File inputs are often hidden with opacity:0 but are still functional
# Bootstrap and other frameworks use this pattern with custom-styled file pickers
is_file_input = (
node.original_node.tag_name
and node.original_node.tag_name.lower() == 'input'
and node.original_node.attributes
and node.original_node.attributes.get('type') == 'file'
)
# EXCEPTION: Shadow DOM form elements may not have snapshot layout data from CDP's
# DOMSnapshot.captureSnapshot, but they're still functional/interactive.
# This handles login forms, custom web components, etc. inside shadow DOM.
is_shadow_dom_element = (
is_interactive_assign
and not node.original_node.snapshot_node
and node.original_node.tag_name
and node.original_node.tag_name.lower() in ['input', 'button', 'select', 'textarea', 'a']
and self._is_inside_shadow_dom(node)
)
# Check if scrollable container should be made interactive
# For scrollable elements, ONLY make them interactive if they have no interactive descendants
should_make_interactive = False
if is_scrollable:
# For scrollable elements, check if they have interactive children
has_interactive_desc = self._has_interactive_descendants(node)
# Only make scrollable container interactive if it has NO interactive descendants
if not has_interactive_desc:
should_make_interactive = True
elif is_interactive_assign and (is_visible or is_file_input or is_shadow_dom_element):
# Non-scrollable interactive elements: make interactive if visible (or file input or shadow DOM form element)
should_make_interactive = True
# Add to selector map if element should be interactive
if should_make_interactive:
# Mark node as interactive
node.is_interactive = True
# Store backend_node_id in selector map (model outputs backend_node_id)
self._selector_map[node.original_node.backend_node_id] = node.original_node
self._interactive_counter += 1
# Mark compound components as new for visibility
if node.is_compound_component:
node.is_new = True
elif self._previous_cached_selector_map:
# Check if node is new for regular elements
previous_backend_node_ids = {node.backend_node_id for node in self._previous_cached_selector_map.values()}
if node.original_node.backend_node_id not in previous_backend_node_ids:
node.is_new = True
# Process children
for child in node.children:
self._assign_interactive_indices_and_mark_new_nodes(child)
def _apply_bounding_box_filtering(self, node: SimplifiedNode | None) -> SimplifiedNode | None:
"""Filter children contained within propagating parent bounds."""
if not node:
return None
# Start with no active bounds
self._filter_tree_recursive(node, active_bounds=None, depth=0)
# Log statistics
excluded_count = self._count_excluded_nodes(node)
if excluded_count > 0:
import logging
logging.debug(f'BBox filtering excluded {excluded_count} nodes')
return node
def _filter_tree_recursive(self, node: SimplifiedNode, active_bounds: PropagatingBounds | None = None, depth: int = 0):
"""
Recursively filter tree with bounding box propagation.
Bounds propagate to ALL descendants until overridden.
"""
# Check if this node should be excluded by active bounds
if active_bounds and self._should_exclude_child(node, active_bounds):
node.excluded_by_parent = True
# Important: Still check if this node starts NEW propagation
# Check if this node starts new propagation (even if excluded!)
new_bounds = None
tag = node.original_node.tag_name.lower()
role = node.original_node.attributes.get('role') if node.original_node.attributes else None
attributes = {
'tag': tag,
'role': role,
}
# Check if this element matches any propagating element pattern
if self._is_propagating_element(attributes):
# This node propagates bounds to ALL its descendants
if node.original_node.snapshot_node and node.original_node.snapshot_node.bounds:
new_bounds = PropagatingBounds(
tag=tag,
bounds=node.original_node.snapshot_node.bounds,
node_id=node.original_node.node_id,
depth=depth,
)
# Propagate to ALL children
# Use new_bounds if this node starts propagation, otherwise continue with active_bounds
propagate_bounds = new_bounds if new_bounds else active_bounds
for child in node.children:
self._filter_tree_recursive(child, propagate_bounds, depth + 1)
def _should_exclude_child(self, node: SimplifiedNode, active_bounds: PropagatingBounds) -> bool:
"""
Determine if child should be excluded based on propagating bounds.
"""
# Never exclude text nodes - we always want to preserve text content
if node.original_node.node_type == NodeType.TEXT_NODE:
return False
# Get child bounds
if not node.original_node.snapshot_node or not node.original_node.snapshot_node.bounds:
return False # No bounds = can't determine containment
child_bounds = node.original_node.snapshot_node.bounds
# Check containment with configured threshold
if not self._is_contained(child_bounds, active_bounds.bounds, self.containment_threshold):
return False # Not sufficiently contained
# EXCEPTION RULES - Keep these even if contained:
child_tag = node.original_node.tag_name.lower()
child_role = node.original_node.attributes.get('role') if node.original_node.attributes else None
child_attributes = {
'tag': child_tag,
'role': child_role,
}
# 1. Never exclude form elements (they need individual interaction)
if child_tag in ['input', 'select', 'textarea', 'label']:
return False
# 2. Keep if child is also a propagating element
# (might have stopPropagation, e.g., button in button)
if self._is_propagating_element(child_attributes):
return False
# 3. Keep if has explicit onclick handler
if node.original_node.attributes and 'onclick' in node.original_node.attributes:
return False
# 4. Keep if has aria-label suggesting it's independently interactive
if node.original_node.attributes:
aria_label = node.original_node.attributes.get('aria-label')
if aria_label and aria_label.strip():
# Has meaningful aria-label, likely interactive
return False
# 5. Keep if has role suggesting interactivity
if node.original_node.attributes:
role = node.original_node.attributes.get('role')
if role in ['button', 'link', 'checkbox', 'radio', 'tab', 'menuitem', 'option']:
return False
# Default: exclude this child
return True
def _is_contained(self, child: DOMRect, parent: DOMRect, threshold: float) -> bool:
"""
Check if child is contained within parent bounds.
Args:
threshold: Percentage (0.0-1.0) of child that must be within parent
"""
# Calculate intersection
x_overlap = max(0, min(child.x + child.width, parent.x + parent.width) - max(child.x, parent.x))
y_overlap = max(0, min(child.y + child.height, parent.y + parent.height) - max(child.y, parent.y))
intersection_area = x_overlap * y_overlap
child_area = child.width * child.height
if child_area == 0:
return False # Zero-area element
containment_ratio = intersection_area / child_area
return containment_ratio >= threshold
def _count_excluded_nodes(self, node: SimplifiedNode, count: int = 0) -> int:
"""Count how many nodes were excluded (for debugging)."""
if hasattr(node, 'excluded_by_parent') and node.excluded_by_parent:
count += 1
for child in node.children:
count = self._count_excluded_nodes(child, count)
return count
def _is_propagating_element(self, attributes: dict[str, str | None]) -> bool:
"""
Check if an element should propagate bounds based on attributes.
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | true |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/dom/serializer/code_use_serializer.py | browser_use/dom/serializer/code_use_serializer.py | # @file purpose: Ultra-compact serializer optimized for code-use agents
# Focuses on minimal token usage while preserving essential interactive context
from browser_use.dom.utils import cap_text_length
from browser_use.dom.views import (
EnhancedDOMTreeNode,
NodeType,
SimplifiedNode,
)
# Minimal but sufficient attribute list for code agents
CODE_USE_KEY_ATTRIBUTES = [
'id', # Essential for element selection
'name', # For form inputs
'type', # For input types
'placeholder', # For empty inputs
'aria-label', # For buttons without text
'value', # Current values
'alt', # For images
'class', # Keep top 2 classes for common selectors
]
# Interactive elements agent can use
INTERACTIVE_ELEMENTS = {
'a',
'button',
'input',
'textarea',
'select',
'form',
}
# Semantic structure elements - expanded to include more content containers
SEMANTIC_STRUCTURE = {
'h1',
'h2',
'h3',
'h4',
'h5',
'h6',
'nav',
'main',
'header',
'footer',
'article',
'section',
'p', # Paragraphs often contain prices and product info
'span', # Spans often contain prices and labels
'div', # Divs with useful attributes (id/class) should be shown
'ul',
'ol',
'li',
'label',
'img',
}
class DOMCodeAgentSerializer:
"""Optimized DOM serializer for code-use agents - balances token efficiency with context."""
@staticmethod
def serialize_tree(node: SimplifiedNode | None, include_attributes: list[str], depth: int = 0) -> str:
"""
Serialize DOM tree with smart token optimization.
Strategy:
- Keep top 2 CSS classes for querySelector compatibility
- Show div/span/p elements with useful attributes or text
- Show all interactive + semantic elements
- Inline text up to 80 chars for better context
"""
if not node:
return ''
# Skip excluded/hidden nodes
if hasattr(node, 'excluded_by_parent') and node.excluded_by_parent:
return DOMCodeAgentSerializer._serialize_children(node, include_attributes, depth)
if not node.should_display:
return DOMCodeAgentSerializer._serialize_children(node, include_attributes, depth)
formatted_text = []
depth_str = ' ' * depth # Use 2 spaces instead of tabs for compactness
if node.original_node.node_type == NodeType.ELEMENT_NODE:
tag = node.original_node.tag_name.lower()
is_visible = node.original_node.snapshot_node and node.original_node.is_visible
# Skip invisible (except iframes)
if not is_visible and tag not in ['iframe', 'frame']:
return DOMCodeAgentSerializer._serialize_children(node, include_attributes, depth)
# Special handling for iframes
if tag in ['iframe', 'frame']:
return DOMCodeAgentSerializer._serialize_iframe(node, include_attributes, depth)
# Build minimal attributes
attributes_str = DOMCodeAgentSerializer._build_minimal_attributes(node.original_node)
# Decide if element should be shown
is_interactive = tag in INTERACTIVE_ELEMENTS
is_semantic = tag in SEMANTIC_STRUCTURE
has_useful_attrs = bool(attributes_str)
has_text = DOMCodeAgentSerializer._has_direct_text(node)
# Skip non-semantic, non-interactive containers without attributes
if not is_interactive and not is_semantic and not has_useful_attrs and not has_text:
return DOMCodeAgentSerializer._serialize_children(node, include_attributes, depth)
# Collapse pointless wrappers
if tag in {'div', 'span'} and not has_useful_attrs and not has_text and len(node.children) == 1:
return DOMCodeAgentSerializer._serialize_children(node, include_attributes, depth)
# Build element
line = f'{depth_str}<{tag}'
if attributes_str:
line += f' {attributes_str}'
# Inline text
inline_text = DOMCodeAgentSerializer._get_inline_text(node)
if inline_text:
line += f'>{inline_text}'
else:
line += '>'
formatted_text.append(line)
# Children (only if no inline text)
if node.children and not inline_text:
children_text = DOMCodeAgentSerializer._serialize_children(node, include_attributes, depth + 1)
if children_text:
formatted_text.append(children_text)
elif node.original_node.node_type == NodeType.TEXT_NODE:
# Handled inline with parent
pass
elif node.original_node.node_type == NodeType.DOCUMENT_FRAGMENT_NODE:
# Shadow DOM - minimal marker
if node.children:
formatted_text.append(f'{depth_str}#shadow')
children_text = DOMCodeAgentSerializer._serialize_children(node, include_attributes, depth + 1)
if children_text:
formatted_text.append(children_text)
return '\n'.join(formatted_text)
@staticmethod
def _serialize_children(node: SimplifiedNode, include_attributes: list[str], depth: int) -> str:
"""Serialize children."""
children_output = []
for child in node.children:
child_text = DOMCodeAgentSerializer.serialize_tree(child, include_attributes, depth)
if child_text:
children_output.append(child_text)
return '\n'.join(children_output)
@staticmethod
def _build_minimal_attributes(node: EnhancedDOMTreeNode) -> str:
"""Build minimal but useful attributes - keep top 2 classes for selectors."""
attrs = []
if node.attributes:
for attr in CODE_USE_KEY_ATTRIBUTES:
if attr in node.attributes:
value = str(node.attributes[attr]).strip()
if value:
# Special handling for class - keep only first 2 classes
if attr == 'class':
classes = value.split()[:2]
value = ' '.join(classes)
# Cap at 25 chars
value = cap_text_length(value, 25)
attrs.append(f'{attr}="{value}"')
return ' '.join(attrs)
@staticmethod
def _has_direct_text(node: SimplifiedNode) -> bool:
"""Check if node has direct text children."""
for child in node.children:
if child.original_node.node_type == NodeType.TEXT_NODE:
text = child.original_node.node_value.strip() if child.original_node.node_value else ''
if len(text) > 1:
return True
return False
@staticmethod
def _get_inline_text(node: SimplifiedNode) -> str:
"""Get inline text (max 80 chars for better context)."""
text_parts = []
for child in node.children:
if child.original_node.node_type == NodeType.TEXT_NODE:
text = child.original_node.node_value.strip() if child.original_node.node_value else ''
if text and len(text) > 1:
text_parts.append(text)
if not text_parts:
return ''
combined = ' '.join(text_parts)
return cap_text_length(combined, 40)
@staticmethod
def _serialize_iframe(node: SimplifiedNode, include_attributes: list[str], depth: int) -> str:
"""Handle iframe minimally."""
formatted_text = []
depth_str = ' ' * depth
tag = node.original_node.tag_name.lower()
# Minimal iframe marker
attributes_str = DOMCodeAgentSerializer._build_minimal_attributes(node.original_node)
line = f'{depth_str}<{tag}'
if attributes_str:
line += f' {attributes_str}'
line += '>'
formatted_text.append(line)
# Iframe content
if node.original_node.content_document:
formatted_text.append(f'{depth_str} #iframe-content')
# Find and serialize body content only
for child_node in node.original_node.content_document.children_nodes or []:
if child_node.tag_name.lower() == 'html':
for html_child in child_node.children:
if html_child.tag_name.lower() == 'body':
for body_child in html_child.children:
DOMCodeAgentSerializer._serialize_document_node(
body_child, formatted_text, include_attributes, depth + 2
)
break
return '\n'.join(formatted_text)
@staticmethod
def _serialize_document_node(
dom_node: EnhancedDOMTreeNode, output: list[str], include_attributes: list[str], depth: int
) -> None:
"""Serialize document node without SimplifiedNode wrapper."""
depth_str = ' ' * depth
if dom_node.node_type == NodeType.ELEMENT_NODE:
tag = dom_node.tag_name.lower()
# Skip invisible
is_visible = dom_node.snapshot_node and dom_node.is_visible
if not is_visible:
return
# Check if worth showing
is_interactive = tag in INTERACTIVE_ELEMENTS
is_semantic = tag in SEMANTIC_STRUCTURE
attributes_str = DOMCodeAgentSerializer._build_minimal_attributes(dom_node)
if not is_interactive and not is_semantic and not attributes_str:
# Skip but process children
for child in dom_node.children:
DOMCodeAgentSerializer._serialize_document_node(child, output, include_attributes, depth)
return
# Build element
line = f'{depth_str}<{tag}'
if attributes_str:
line += f' {attributes_str}'
# Get text
text_parts = []
for child in dom_node.children:
if child.node_type == NodeType.TEXT_NODE and child.node_value:
text = child.node_value.strip()
if text and len(text) > 1:
text_parts.append(text)
if text_parts:
combined = ' '.join(text_parts)
line += f'>{cap_text_length(combined, 25)}'
else:
line += '>'
output.append(line)
# Process non-text children
for child in dom_node.children:
if child.node_type != NodeType.TEXT_NODE:
DOMCodeAgentSerializer._serialize_document_node(child, output, include_attributes, depth + 1)
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/dom/serializer/clickable_elements.py | browser_use/dom/serializer/clickable_elements.py | from browser_use.dom.views import EnhancedDOMTreeNode, NodeType
class ClickableElementDetector:
@staticmethod
def is_interactive(node: EnhancedDOMTreeNode) -> bool:
"""Check if this node is clickable/interactive using enhanced scoring."""
# Skip non-element nodes
if node.node_type != NodeType.ELEMENT_NODE:
return False
# # if ax ignored skip
# if node.ax_node and node.ax_node.ignored:
# return False
# remove html and body nodes
if node.tag_name in {'html', 'body'}:
return False
# IFRAME elements should be interactive if they're large enough to potentially need scrolling
# Small iframes (< 100px width or height) are unlikely to have scrollable content
if node.tag_name and node.tag_name.upper() == 'IFRAME' or node.tag_name.upper() == 'FRAME':
if node.snapshot_node and node.snapshot_node.bounds:
width = node.snapshot_node.bounds.width
height = node.snapshot_node.bounds.height
# Only include iframes larger than 100x100px
if width > 100 and height > 100:
return True
# RELAXED SIZE CHECK: Allow all elements including size 0 (they might be interactive overlays, etc.)
# Note: Size 0 elements can still be interactive (e.g., invisible clickable overlays)
# Visibility is determined separately by CSS styles, not just bounding box size
# SEARCH ELEMENT DETECTION: Check for search-related classes and attributes
if node.attributes:
search_indicators = {
'search',
'magnify',
'glass',
'lookup',
'find',
'query',
'search-icon',
'search-btn',
'search-button',
'searchbox',
}
# Check class names for search indicators
class_list = node.attributes.get('class', '').lower().split()
if any(indicator in ' '.join(class_list) for indicator in search_indicators):
return True
# Check id for search indicators
element_id = node.attributes.get('id', '').lower()
if any(indicator in element_id for indicator in search_indicators):
return True
# Check data attributes for search functionality
for attr_name, attr_value in node.attributes.items():
if attr_name.startswith('data-') and any(indicator in attr_value.lower() for indicator in search_indicators):
return True
# Enhanced accessibility property checks - direct clear indicators only
if node.ax_node and node.ax_node.properties:
for prop in node.ax_node.properties:
try:
# aria disabled
if prop.name == 'disabled' and prop.value:
return False
# aria hidden
if prop.name == 'hidden' and prop.value:
return False
# Direct interactiveness indicators
if prop.name in ['focusable', 'editable', 'settable'] and prop.value:
return True
# Interactive state properties (presence indicates interactive widget)
if prop.name in ['checked', 'expanded', 'pressed', 'selected']:
# These properties only exist on interactive elements
return True
# Form-related interactiveness
if prop.name in ['required', 'autocomplete'] and prop.value:
return True
# Elements with keyboard shortcuts are interactive
if prop.name == 'keyshortcuts' and prop.value:
return True
except (AttributeError, ValueError):
# Skip properties we can't process
continue
# ENHANCED TAG CHECK: Include truly interactive elements
# Note: 'label' removed - labels are handled by other attribute checks below - other wise labels with "for" attribute can destroy the real clickable element on apartments.com
interactive_tags = {
'button',
'input',
'select',
'textarea',
'a',
'details',
'summary',
'option',
'optgroup',
}
# Check with case-insensitive comparison
if node.tag_name and node.tag_name.lower() in interactive_tags:
return True
# SVG elements need special handling - only interactive if they have explicit handlers
# svg_tags = {'svg', 'path', 'circle', 'rect', 'polygon', 'ellipse', 'line', 'polyline', 'g'}
# if node.tag_name in svg_tags:
# # Only consider SVG elements interactive if they have:
# # 1. Explicit event handlers
# # 2. Interactive role attributes
# # 3. Cursor pointer style
# if node.attributes:
# # Check for event handlers
# if any(attr.startswith('on') for attr in node.attributes):
# return True
# # Check for interactive roles
# if node.attributes.get('role') in {'button', 'link', 'menuitem'}:
# return True
# # Check for cursor pointer (indicating clickability)
# if node.attributes.get('style') and 'cursor: pointer' in node.attributes.get('style', ''):
# return True
# # Otherwise, SVG elements are decorative
# return False
# Tertiary check: elements with interactive attributes
if node.attributes:
# Check for event handlers or interactive attributes
interactive_attributes = {'onclick', 'onmousedown', 'onmouseup', 'onkeydown', 'onkeyup', 'tabindex'}
if any(attr in node.attributes for attr in interactive_attributes):
return True
# Check for interactive ARIA roles
if 'role' in node.attributes:
interactive_roles = {
'button',
'link',
'menuitem',
'option',
'radio',
'checkbox',
'tab',
'textbox',
'combobox',
'slider',
'spinbutton',
'search',
'searchbox',
}
if node.attributes['role'] in interactive_roles:
return True
# Quaternary check: accessibility tree roles
if node.ax_node and node.ax_node.role:
interactive_ax_roles = {
'button',
'link',
'menuitem',
'option',
'radio',
'checkbox',
'tab',
'textbox',
'combobox',
'slider',
'spinbutton',
'listbox',
'search',
'searchbox',
}
if node.ax_node.role in interactive_ax_roles:
return True
# ICON AND SMALL ELEMENT CHECK: Elements that might be icons
if (
node.snapshot_node
and node.snapshot_node.bounds
and 10 <= node.snapshot_node.bounds.width <= 50 # Icon-sized elements
and 10 <= node.snapshot_node.bounds.height <= 50
):
# Check if this small element has interactive properties
if node.attributes:
# Small elements with these attributes are likely interactive icons
icon_attributes = {'class', 'role', 'onclick', 'data-action', 'aria-label'}
if any(attr in node.attributes for attr in icon_attributes):
return True
# Final fallback: cursor style indicates interactivity (for cases Chrome missed)
if node.snapshot_node and node.snapshot_node.cursor_style and node.snapshot_node.cursor_style == 'pointer':
return True
return False
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/dom/serializer/html_serializer.py | browser_use/dom/serializer/html_serializer.py | # @file purpose: Serializes enhanced DOM trees to HTML format including shadow roots
from browser_use.dom.views import EnhancedDOMTreeNode, NodeType
class HTMLSerializer:
"""Serializes enhanced DOM trees back to HTML format.
This serializer reconstructs HTML from the enhanced DOM tree, including:
- Shadow DOM content (both open and closed)
- Iframe content documents
- All attributes and text nodes
- Proper HTML structure
Unlike getOuterHTML which only captures light DOM, this captures the full
enhanced tree including shadow roots that are crucial for modern SPAs.
"""
def __init__(self, extract_links: bool = False):
"""Initialize the HTML serializer.
Args:
extract_links: If True, preserves all links. If False, removes href attributes.
"""
self.extract_links = extract_links
def serialize(self, node: EnhancedDOMTreeNode, depth: int = 0) -> str:
"""Serialize an enhanced DOM tree node to HTML.
Args:
node: The enhanced DOM tree node to serialize
depth: Current depth for indentation (internal use)
Returns:
HTML string representation of the node and its descendants
"""
if node.node_type == NodeType.DOCUMENT_NODE:
# Process document root - serialize all children
parts = []
for child in node.children_and_shadow_roots:
child_html = self.serialize(child, depth)
if child_html:
parts.append(child_html)
return ''.join(parts)
elif node.node_type == NodeType.DOCUMENT_FRAGMENT_NODE:
# Shadow DOM root - wrap in template with shadowrootmode attribute
parts = []
# Add shadow root opening
shadow_type = node.shadow_root_type or 'open'
parts.append(f'<template shadowroot="{shadow_type.lower()}">')
# Serialize shadow children
for child in node.children:
child_html = self.serialize(child, depth + 1)
if child_html:
parts.append(child_html)
# Close shadow root
parts.append('</template>')
return ''.join(parts)
elif node.node_type == NodeType.ELEMENT_NODE:
parts = []
tag_name = node.tag_name.lower()
# Skip non-content elements
if tag_name in {'style', 'script', 'head', 'meta', 'link', 'title'}:
return ''
# Skip code tags with display:none - these often contain JSON state for SPAs
if tag_name == 'code' and node.attributes:
style = node.attributes.get('style', '')
# Check if element is hidden (display:none) - likely JSON data
if 'display:none' in style.replace(' ', '') or 'display: none' in style:
return ''
# Also check for bpr-guid IDs (LinkedIn's JSON data pattern)
element_id = node.attributes.get('id', '')
if 'bpr-guid' in element_id or 'data' in element_id or 'state' in element_id:
return ''
# Skip base64 inline images - these are usually placeholders or tracking pixels
if tag_name == 'img' and node.attributes:
src = node.attributes.get('src', '')
if src.startswith('data:image/'):
return ''
# Opening tag
parts.append(f'<{tag_name}')
# Add attributes
if node.attributes:
attrs = self._serialize_attributes(node.attributes)
if attrs:
parts.append(' ' + attrs)
# Handle void elements (self-closing)
void_elements = {
'area',
'base',
'br',
'col',
'embed',
'hr',
'img',
'input',
'link',
'meta',
'param',
'source',
'track',
'wbr',
}
if tag_name in void_elements:
parts.append(' />')
return ''.join(parts)
parts.append('>')
# Handle iframe content document
if tag_name in {'iframe', 'frame'} and node.content_document:
# Serialize iframe content
for child in node.content_document.children_nodes or []:
child_html = self.serialize(child, depth + 1)
if child_html:
parts.append(child_html)
else:
# Serialize shadow roots FIRST (for declarative shadow DOM)
if node.shadow_roots:
for shadow_root in node.shadow_roots:
child_html = self.serialize(shadow_root, depth + 1)
if child_html:
parts.append(child_html)
# Then serialize light DOM children (for slot projection)
for child in node.children:
child_html = self.serialize(child, depth + 1)
if child_html:
parts.append(child_html)
# Closing tag
parts.append(f'</{tag_name}>')
return ''.join(parts)
elif node.node_type == NodeType.TEXT_NODE:
# Return text content with basic HTML escaping
if node.node_value:
return self._escape_html(node.node_value)
return ''
elif node.node_type == NodeType.COMMENT_NODE:
# Skip comments to reduce noise
return ''
else:
# Unknown node type - skip
return ''
def _serialize_attributes(self, attributes: dict[str, str]) -> str:
"""Serialize element attributes to HTML attribute string.
Args:
attributes: Dictionary of attribute names to values
Returns:
HTML attribute string (e.g., 'class="foo" id="bar"')
"""
parts = []
for key, value in attributes.items():
# Skip href if not extracting links
if not self.extract_links and key == 'href':
continue
# Skip data-* attributes as they often contain JSON payloads
# These are used by modern SPAs (React, Vue, Angular) for state management
if key.startswith('data-'):
continue
# Handle boolean attributes
if value == '' or value is None:
parts.append(key)
else:
# Escape attribute value
escaped_value = self._escape_attribute(value)
parts.append(f'{key}="{escaped_value}"')
return ' '.join(parts)
def _escape_html(self, text: str) -> str:
"""Escape HTML special characters in text content.
Args:
text: Raw text content
Returns:
HTML-escaped text
"""
return text.replace('&', '&').replace('<', '<').replace('>', '>')
def _escape_attribute(self, value: str) -> str:
"""Escape HTML special characters in attribute values.
Args:
value: Raw attribute value
Returns:
HTML-escaped attribute value
"""
return value.replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"').replace("'", ''')
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/dom/serializer/eval_serializer.py | browser_use/dom/serializer/eval_serializer.py | # @file purpose: Concise evaluation serializer for DOM trees - optimized for LLM query writing
from browser_use.dom.utils import cap_text_length
from browser_use.dom.views import (
EnhancedDOMTreeNode,
NodeType,
SimplifiedNode,
)
# Critical attributes for query writing and form interaction
# NOTE: Removed 'id' and 'class' to force more robust structural selectors
EVAL_KEY_ATTRIBUTES = [
'id', # Removed - can have special chars, forces structural selectors
'class', # Removed - can have special chars like +, forces structural selectors
'name',
'type',
'placeholder',
'aria-label',
'role',
'value',
# 'href',
'data-testid',
'alt', # for images
'title', # useful for tooltips/link context
# State attributes (critical for form interaction)
'checked',
'selected',
'disabled',
'required',
'readonly',
# ARIA states
'aria-expanded',
'aria-pressed',
'aria-checked',
'aria-selected',
'aria-invalid',
# Validation attributes (help agents avoid brute force)
'pattern',
'min',
'max',
'minlength',
'maxlength',
'step',
'aria-valuemin',
'aria-valuemax',
'aria-valuenow',
]
# Semantic elements that should always be shown
SEMANTIC_ELEMENTS = {
'html', # Always show document root
'body', # Always show body
'h1',
'h2',
'h3',
'h4',
'h5',
'h6',
'a',
'button',
'input',
'textarea',
'select',
'form',
'label',
'nav',
'header',
'footer',
'main',
'article',
'section',
'table',
'thead',
'tbody',
'tr',
'th',
'td',
'ul',
'ol',
'li',
'img',
'iframe',
'video',
'audio',
}
# Container elements that can be collapsed if they only wrap one child
COLLAPSIBLE_CONTAINERS = {'div', 'span', 'section', 'article'}
# SVG child elements to skip (decorative only, no interaction value)
SVG_ELEMENTS = {
'path',
'rect',
'g',
'circle',
'ellipse',
'line',
'polyline',
'polygon',
'use',
'defs',
'clipPath',
'mask',
'pattern',
'image',
'text',
'tspan',
}
class DOMEvalSerializer:
"""Ultra-concise DOM serializer for quick LLM query writing."""
@staticmethod
def serialize_tree(node: SimplifiedNode | None, include_attributes: list[str], depth: int = 0) -> str:
"""
Serialize complete DOM tree structure for LLM understanding.
Strategy:
- Show ALL elements to preserve DOM structure
- Non-interactive elements show just tag name
- Interactive elements show full attributes + [index]
- Self-closing tags only (no closing tags)
"""
if not node:
return ''
# Skip excluded nodes but process children
if hasattr(node, 'excluded_by_parent') and node.excluded_by_parent:
return DOMEvalSerializer._serialize_children(node, include_attributes, depth)
# Skip nodes marked as should_display=False
if not node.should_display:
return DOMEvalSerializer._serialize_children(node, include_attributes, depth)
formatted_text = []
depth_str = depth * '\t'
if node.original_node.node_type == NodeType.ELEMENT_NODE:
tag = node.original_node.tag_name.lower()
is_visible = node.original_node.snapshot_node and node.original_node.is_visible
# Container elements that should be shown even if invisible (might have visible children)
container_tags = {'html', 'body', 'div', 'main', 'section', 'article', 'aside', 'header', 'footer', 'nav'}
# Skip invisible elements UNLESS they're containers or iframes (which might have visible children)
if not is_visible and tag not in container_tags and tag not in ['iframe', 'frame']:
return DOMEvalSerializer._serialize_children(node, include_attributes, depth)
# Special handling for iframes - show them with their content
if tag in ['iframe', 'frame']:
return DOMEvalSerializer._serialize_iframe(node, include_attributes, depth)
# Skip SVG elements entirely - they're just decorative graphics with no interaction value
# Show the <svg> tag itself to indicate graphics, but don't recurse into children
if tag == 'svg':
line = f'{depth_str}'
# Add [i_X] for interactive SVG elements only
if node.is_interactive:
line += f'[i_{node.original_node.backend_node_id}] '
line += '<svg'
attributes_str = DOMEvalSerializer._build_compact_attributes(node.original_node)
if attributes_str:
line += f' {attributes_str}'
line += ' /> <!-- SVG content collapsed -->'
return line
# Skip SVG child elements entirely (path, rect, g, circle, etc.)
if tag in SVG_ELEMENTS:
return ''
# Build compact attributes string
attributes_str = DOMEvalSerializer._build_compact_attributes(node.original_node)
# Decide if this element should be shown
is_semantic = tag in SEMANTIC_ELEMENTS
has_useful_attrs = bool(attributes_str)
has_text_content = DOMEvalSerializer._has_direct_text(node)
has_children = len(node.children) > 0
# Build compact element representation
line = f'{depth_str}'
# Add backend node ID notation - [i_X] for interactive elements only
if node.is_interactive:
line += f'[i_{node.original_node.backend_node_id}] '
# Non-interactive elements don't get an index notation
line += f'<{tag}'
if attributes_str:
line += f' {attributes_str}'
# Add scroll info if element is scrollable
if node.original_node.should_show_scroll_info:
scroll_text = node.original_node.get_scroll_info_text()
if scroll_text:
line += f' scroll="{scroll_text}"'
# Add inline text if present (keep it on same line for compactness)
inline_text = DOMEvalSerializer._get_inline_text(node)
# For containers (html, body, div, etc.), always show children even if there's inline text
# For other elements, inline text replaces children (more compact)
is_container = tag in container_tags
if inline_text and not is_container:
line += f'>{inline_text}'
else:
line += ' />'
formatted_text.append(line)
# Process children (always for containers, only if no inline_text for others)
if has_children and (is_container or not inline_text):
children_text = DOMEvalSerializer._serialize_children(node, include_attributes, depth + 1)
if children_text:
formatted_text.append(children_text)
elif node.original_node.node_type == NodeType.TEXT_NODE:
# Text nodes are handled inline with their parent
pass
elif node.original_node.node_type == NodeType.DOCUMENT_FRAGMENT_NODE:
# Shadow DOM - just show children directly with minimal marker
if node.children:
formatted_text.append(f'{depth_str}#shadow')
children_text = DOMEvalSerializer._serialize_children(node, include_attributes, depth + 1)
if children_text:
formatted_text.append(children_text)
return '\n'.join(formatted_text)
@staticmethod
def _serialize_children(node: SimplifiedNode, include_attributes: list[str], depth: int) -> str:
"""Helper to serialize all children of a node."""
children_output = []
# Check if parent is a list container (ul, ol)
is_list_container = node.original_node.node_type == NodeType.ELEMENT_NODE and node.original_node.tag_name.lower() in [
'ul',
'ol',
]
# Track list items and consecutive links
li_count = 0
max_list_items = 50
consecutive_link_count = 0
max_consecutive_links = 50
total_links_skipped = 0
for child in node.children:
# Get tag name for this child
current_tag = None
if child.original_node.node_type == NodeType.ELEMENT_NODE:
current_tag = child.original_node.tag_name.lower()
# If we're in a list container and this child is an li element
if is_list_container and current_tag == 'li':
li_count += 1
# Skip li elements after the 5th one
if li_count > max_list_items:
continue
# Track consecutive anchor tags (links)
if current_tag == 'a':
consecutive_link_count += 1
# Skip links after the 5th consecutive one
if consecutive_link_count > max_consecutive_links:
total_links_skipped += 1
continue
else:
# Reset counter when we hit a non-link element
# But first add truncation message if we skipped links
if total_links_skipped > 0:
depth_str = depth * '\t'
children_output.append(f'{depth_str}... ({total_links_skipped} more links in this list)')
total_links_skipped = 0
consecutive_link_count = 0
child_text = DOMEvalSerializer.serialize_tree(child, include_attributes, depth)
if child_text:
children_output.append(child_text)
# Add truncation message if we skipped items at the end
if is_list_container and li_count > max_list_items:
depth_str = depth * '\t'
children_output.append(
f'{depth_str}... ({li_count - max_list_items} more items in this list (truncated) use evaluate to get more.'
)
# Add truncation message for links if we skipped any at the end
if total_links_skipped > 0:
depth_str = depth * '\t'
children_output.append(
f'{depth_str}... ({total_links_skipped} more links in this list) (truncated) use evaluate to get more.'
)
return '\n'.join(children_output)
@staticmethod
def _build_compact_attributes(node: EnhancedDOMTreeNode) -> str:
"""Build ultra-compact attributes string with only key attributes."""
attrs = []
# Prioritize attributes that help with query writing
if node.attributes:
for attr in EVAL_KEY_ATTRIBUTES:
if attr in node.attributes:
value = str(node.attributes[attr]).strip()
if not value:
continue
# Special handling for different attributes
if attr == 'class':
# For class, limit to first 2 classes to save space
classes = value.split()[:3]
value = ' '.join(classes)
elif attr == 'href':
# For href, cap at 20 chars to save space
value = cap_text_length(value, 80)
else:
# Cap at 25 chars for other attributes
value = cap_text_length(value, 80)
attrs.append(f'{attr}="{value}"')
# Note: We intentionally don't add role from ax_node here because:
# 1. If role is explicitly set in HTML, it's already captured above via EVAL_KEY_ATTRIBUTES
# 2. Inferred roles from AX tree (like link, listitem, LineBreak) are redundant with the tag name
# 3. This reduces noise - <a href="..." role="link"> is redundant, we already know <a> is a link
return ' '.join(attrs)
@staticmethod
def _has_direct_text(node: SimplifiedNode) -> bool:
"""Check if node has direct text children (not nested in other elements)."""
for child in node.children:
if child.original_node.node_type == NodeType.TEXT_NODE:
text = child.original_node.node_value.strip() if child.original_node.node_value else ''
if len(text) > 1:
return True
return False
@staticmethod
def _get_inline_text(node: SimplifiedNode) -> str:
"""Get text content to display inline (max 40 chars)."""
text_parts = []
for child in node.children:
if child.original_node.node_type == NodeType.TEXT_NODE:
text = child.original_node.node_value.strip() if child.original_node.node_value else ''
if text and len(text) > 1:
text_parts.append(text)
if not text_parts:
return ''
combined = ' '.join(text_parts)
return cap_text_length(combined, 80)
@staticmethod
def _serialize_iframe(node: SimplifiedNode, include_attributes: list[str], depth: int) -> str:
"""Handle iframe serialization with content document."""
formatted_text = []
depth_str = depth * '\t'
tag = node.original_node.tag_name.lower()
# Build minimal iframe marker with key attributes
attributes_str = DOMEvalSerializer._build_compact_attributes(node.original_node)
line = f'{depth_str}<{tag}'
if attributes_str:
line += f' {attributes_str}'
# Add scroll info for iframe content
if node.original_node.should_show_scroll_info:
scroll_text = node.original_node.get_scroll_info_text()
if scroll_text:
line += f' scroll="{scroll_text}"'
line += ' />'
formatted_text.append(line)
# If iframe has content document, serialize its content
if node.original_node.content_document:
# Add marker for iframe content
formatted_text.append(f'{depth_str}\t#iframe-content')
# Process content document children
for child_node in node.original_node.content_document.children_nodes or []:
# Process html documents
if child_node.tag_name.lower() == 'html':
# Find and serialize body content only (skip head)
for html_child in child_node.children:
if html_child.tag_name.lower() == 'body':
for body_child in html_child.children:
# Recursively process body children (iframe content)
DOMEvalSerializer._serialize_document_node(
body_child, formatted_text, include_attributes, depth + 2, is_iframe_content=True
)
break # Stop after processing body
else:
# Not an html element - serialize directly
DOMEvalSerializer._serialize_document_node(
child_node, formatted_text, include_attributes, depth + 1, is_iframe_content=True
)
return '\n'.join(formatted_text)
@staticmethod
def _serialize_document_node(
dom_node: EnhancedDOMTreeNode,
output: list[str],
include_attributes: list[str],
depth: int,
is_iframe_content: bool = True,
) -> None:
"""Helper to serialize a document node without SimplifiedNode wrapper.
Args:
is_iframe_content: If True, be more permissive with visibility checks since
iframe content might not have snapshot data from parent page.
"""
depth_str = depth * '\t'
if dom_node.node_type == NodeType.ELEMENT_NODE:
tag = dom_node.tag_name.lower()
# For iframe content, be permissive - show all semantic elements even without snapshot data
# For regular content, skip invisible elements
if is_iframe_content:
# Only skip if we have snapshot data AND it's explicitly invisible
# If no snapshot data, assume visible (cross-origin iframe content)
is_visible = (not dom_node.snapshot_node) or dom_node.is_visible
else:
# Regular strict visibility check
is_visible = dom_node.snapshot_node and dom_node.is_visible
if not is_visible:
return
# Check if semantic or has useful attributes
is_semantic = tag in SEMANTIC_ELEMENTS
attributes_str = DOMEvalSerializer._build_compact_attributes(dom_node)
if not is_semantic and not attributes_str:
# Skip but process children
for child in dom_node.children:
DOMEvalSerializer._serialize_document_node(
child, output, include_attributes, depth, is_iframe_content=is_iframe_content
)
return
# Build element line
line = f'{depth_str}<{tag}'
if attributes_str:
line += f' {attributes_str}'
# Get direct text content
text_parts = []
for child in dom_node.children:
if child.node_type == NodeType.TEXT_NODE and child.node_value:
text = child.node_value.strip()
if text and len(text) > 1:
text_parts.append(text)
if text_parts:
combined = ' '.join(text_parts)
line += f'>{cap_text_length(combined, 100)}'
else:
line += ' />'
output.append(line)
# Process non-text children
for child in dom_node.children:
if child.node_type != NodeType.TEXT_NODE:
DOMEvalSerializer._serialize_document_node(
child, output, include_attributes, depth + 1, is_iframe_content=is_iframe_content
)
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/dom/playground/extraction.py | browser_use/dom/playground/extraction.py | import asyncio
import json
import os
import time
import anyio
import pyperclip
import tiktoken
from browser_use.agent.prompts import AgentMessagePrompt
from browser_use.browser import BrowserProfile, BrowserSession
from browser_use.browser.events import ClickElementEvent, TypeTextEvent
from browser_use.browser.profile import ViewportSize
from browser_use.dom.service import DomService
from browser_use.dom.views import DEFAULT_INCLUDE_ATTRIBUTES
from browser_use.filesystem.file_system import FileSystem
TIMEOUT = 60
async def test_focus_vs_all_elements():
browser_session = BrowserSession(
browser_profile=BrowserProfile(
# executable_path='/Applications/Google Chrome.app/Contents/MacOS/Google Chrome',
window_size=ViewportSize(width=1100, height=1000),
disable_security=False,
wait_for_network_idle_page_load_time=1,
headless=False,
args=['--incognito'],
paint_order_filtering=True,
),
)
# 10 Sample websites with various interactive elements
sample_websites = [
'https://browser-use.github.io/stress-tests/challenges/iframe-inception-level2.html',
'https://www.google.com/travel/flights',
'https://v0-simple-ui-test-site.vercel.app',
'https://browser-use.github.io/stress-tests/challenges/iframe-inception-level1.html',
'https://browser-use.github.io/stress-tests/challenges/angular-form.html',
'https://www.google.com/travel/flights',
'https://www.amazon.com/s?k=laptop',
'https://github.com/trending',
'https://www.reddit.com',
'https://www.ycombinator.com/companies',
'https://www.kayak.com/flights',
'https://www.booking.com',
'https://www.airbnb.com',
'https://www.linkedin.com/jobs',
'https://stackoverflow.com/questions',
]
# 5 Difficult websites with complex elements (iframes, canvas, dropdowns, etc.)
difficult_websites = [
'https://www.w3schools.com/html/tryit.asp?filename=tryhtml_iframe', # Nested iframes
'https://semantic-ui.com/modules/dropdown.html', # Complex dropdowns
'https://www.dezlearn.com/nested-iframes-example/', # Cross-origin nested iframes
'https://codepen.io/towc/pen/mJzOWJ', # Canvas elements with interactions
'https://jqueryui.com/accordion/', # Complex accordion/dropdown widgets
'https://v0-simple-landing-page-seven-xi.vercel.app/', # Simple landing page with iframe
'https://www.unesco.org/en',
]
# Descriptions for difficult websites
difficult_descriptions = {
'https://www.w3schools.com/html/tryit.asp?filename=tryhtml_iframe': '๐ธ NESTED IFRAMES: Multiple iframe layers',
'https://semantic-ui.com/modules/dropdown.html': '๐ธ COMPLEX DROPDOWNS: Custom dropdown components',
'https://www.dezlearn.com/nested-iframes-example/': '๐ธ CROSS-ORIGIN IFRAMES: Different domain iframes',
'https://codepen.io/towc/pen/mJzOWJ': '๐ธ CANVAS ELEMENTS: Interactive canvas graphics',
'https://jqueryui.com/accordion/': '๐ธ ACCORDION WIDGETS: Collapsible content sections',
}
websites = sample_websites + difficult_websites
current_website_index = 0
def get_website_list_for_prompt() -> str:
"""Get a compact website list for the input prompt."""
lines = []
lines.append('๐ Websites:')
# Sample websites (1-10)
for i, site in enumerate(sample_websites, 1):
current_marker = ' โ' if (i - 1) == current_website_index else ''
domain = site.replace('https://', '').split('/')[0]
lines.append(f' {i:2d}.{domain[:15]:<15}{current_marker}')
# Difficult websites (11-15)
for i, site in enumerate(difficult_websites, len(sample_websites) + 1):
current_marker = ' โ' if (i - 1) == current_website_index else ''
domain = site.replace('https://', '').split('/')[0]
desc = difficult_descriptions.get(site, '')
challenge = desc.split(': ')[1][:15] if ': ' in desc else ''
lines.append(f' {i:2d}.{domain[:15]:<15} ({challenge}){current_marker}')
return '\n'.join(lines)
await browser_session.start()
# Show startup info
print('\n๐ BROWSER-USE DOM EXTRACTION TESTER')
print(f'๐ {len(websites)} websites total: {len(sample_websites)} standard + {len(difficult_websites)} complex')
print('๐ง Controls: Type 1-15 to jump | Enter to re-run | "n" next | "q" quit')
print('๐พ Outputs: tmp/user_message.txt & tmp/element_tree.json\n')
dom_service = DomService(browser_session)
while True:
# Cycle through websites
if current_website_index >= len(websites):
current_website_index = 0
print('Cycled back to first website!')
website = websites[current_website_index]
# sleep 2
await browser_session._cdp_navigate(website)
await asyncio.sleep(1)
last_clicked_index = None # Track the index for text input
while True:
try:
# all_elements_state = await dom_service.get_serialized_dom_tree()
website_type = 'DIFFICULT' if website in difficult_websites else 'SAMPLE'
print(f'\n{"=" * 60}')
print(f'[{current_website_index + 1}/{len(websites)}] [{website_type}] Testing: {website}')
if website in difficult_descriptions:
print(f'{difficult_descriptions[website]}')
print(f'{"=" * 60}')
# Get/refresh the state (includes removing old highlights)
print('\nGetting page state...')
start_time = time.time()
all_elements_state = await browser_session.get_browser_state_summary(True)
end_time = time.time()
get_state_time = end_time - start_time
print(f'get_state_summary took {get_state_time:.2f} seconds')
# Get detailed timing info from DOM service
print('\nGetting detailed DOM timing...')
serialized_state, _, timing_info = await dom_service.get_serialized_dom_tree()
# Combine all timing info
all_timing = {'get_state_summary_total': get_state_time, **timing_info}
selector_map = all_elements_state.dom_state.selector_map
total_elements = len(selector_map.keys())
print(f'Total number of elements: {total_elements}')
# print(all_elements_state.element_tree.clickable_elements_to_string())
prompt = AgentMessagePrompt(
browser_state_summary=all_elements_state,
file_system=FileSystem(base_dir='./tmp'),
include_attributes=DEFAULT_INCLUDE_ATTRIBUTES,
step_info=None,
)
# Write the user message to a file for analysis
user_message = prompt.get_user_message(use_vision=False).text
# clickable_elements_str = all_elements_state.element_tree.clickable_elements_to_string()
text_to_save = user_message
os.makedirs('./tmp', exist_ok=True)
async with await anyio.open_file('./tmp/user_message.txt', 'w', encoding='utf-8') as f:
await f.write(text_to_save)
# save pure clickable elements to a file
if all_elements_state.dom_state._root:
async with await anyio.open_file('./tmp/simplified_element_tree.json', 'w', encoding='utf-8') as f:
await f.write(json.dumps(all_elements_state.dom_state._root.__json__(), indent=2))
async with await anyio.open_file('./tmp/original_element_tree.json', 'w', encoding='utf-8') as f:
await f.write(json.dumps(all_elements_state.dom_state._root.original_node.__json__(), indent=2))
# copy the user message to the clipboard
# pyperclip.copy(text_to_save)
encoding = tiktoken.encoding_for_model('gpt-4.1-mini')
token_count = len(encoding.encode(text_to_save))
print(f'Token count: {token_count}')
print('User message written to ./tmp/user_message.txt')
print('Element tree written to ./tmp/simplified_element_tree.json')
print('Original element tree written to ./tmp/original_element_tree.json')
# Save timing information
timing_text = '๐ DOM EXTRACTION PERFORMANCE ANALYSIS\n'
timing_text += f'{"=" * 50}\n\n'
timing_text += f'๐ Website: {website}\n'
timing_text += f'๐ Total Elements: {total_elements}\n'
timing_text += f'๐ฏ Token Count: {token_count}\n\n'
timing_text += 'โฑ๏ธ TIMING BREAKDOWN:\n'
timing_text += f'{"โ" * 30}\n'
for key, value in all_timing.items():
timing_text += f'{key:<35}: {value * 1000:>8.2f} ms\n'
# Calculate percentages
total_time = all_timing.get('get_state_summary_total', 0)
if total_time > 0 and total_elements > 0:
timing_text += '\n๐ PERCENTAGE BREAKDOWN:\n'
timing_text += f'{"โ" * 30}\n'
for key, value in all_timing.items():
if key != 'get_state_summary_total':
percentage = (value / total_time) * 100
timing_text += f'{key:<35}: {percentage:>7.1f}%\n'
timing_text += '\n๐ฏ CLICKABLE DETECTION ANALYSIS:\n'
timing_text += f'{"โ" * 35}\n'
clickable_time = all_timing.get('clickable_detection_time', 0)
if clickable_time > 0 and total_elements > 0:
avg_per_element = (clickable_time / total_elements) * 1000000 # microseconds
timing_text += f'Total clickable detection time: {clickable_time * 1000:.2f} ms\n'
timing_text += f'Average per element: {avg_per_element:.2f} ฮผs\n'
timing_text += f'Clickable detection calls: ~{total_elements} (approx)\n'
async with await anyio.open_file('./tmp/timing_analysis.txt', 'w', encoding='utf-8') as f:
await f.write(timing_text)
print('Timing analysis written to ./tmp/timing_analysis.txt')
# also save all_elements_state.element_tree.clickable_elements_to_string() to a file
# with open('./tmp/clickable_elements.json', 'w', encoding='utf-8') as f:
# f.write(json.dumps(all_elements_state.element_tree.__json__(), indent=2))
# print('Clickable elements written to ./tmp/clickable_elements.json')
website_list = get_website_list_for_prompt()
answer = input(
"๐ฎ Enter: element index | 'index' click (clickable) | 'index,text' input | 'c,index' copy | Enter re-run | 'n' next | 'q' quit: "
)
if answer.lower() == 'q':
return # Exit completely
elif answer.lower() == 'n':
print('Moving to next website...')
current_website_index += 1
break # Break inner loop to go to next website
elif answer.strip() == '':
print('Re-running extraction on current page state...')
continue # Continue inner loop to re-extract DOM without reloading page
elif answer.strip().isdigit():
# Click element format: index
try:
clicked_index = int(answer)
if clicked_index in selector_map:
element_node = selector_map[clicked_index]
print(f'Clicking element {clicked_index}: {element_node.tag_name}')
event = browser_session.event_bus.dispatch(ClickElementEvent(node=element_node))
await event
print('Click successful.')
except ValueError:
print(f"Invalid input: '{answer}'. Enter an index, 'index,text', 'c,index', or 'q'.")
continue
try:
if answer.lower().startswith('c,'):
# Copy element JSON format: c,index
parts = answer.split(',', 1)
if len(parts) == 2:
try:
target_index = int(parts[1].strip())
if target_index in selector_map:
element_node = selector_map[target_index]
element_json = json.dumps(element_node.__json__(), indent=2, default=str)
pyperclip.copy(element_json)
print(f'Copied element {target_index} JSON to clipboard: {element_node.tag_name}')
else:
print(f'Invalid index: {target_index}')
except ValueError:
print(f'Invalid index format: {parts[1]}')
else:
print("Invalid input format. Use 'c,index'.")
elif ',' in answer:
# Input text format: index,text
parts = answer.split(',', 1)
if len(parts) == 2:
try:
target_index = int(parts[0].strip())
text_to_input = parts[1]
if target_index in selector_map:
element_node = selector_map[target_index]
print(
f"Inputting text '{text_to_input}' into element {target_index}: {element_node.tag_name}"
)
event = await browser_session.event_bus.dispatch(
TypeTextEvent(node=element_node, text=text_to_input)
)
print('Input successful.')
else:
print(f'Invalid index: {target_index}')
except ValueError:
print(f'Invalid index format: {parts[0]}')
else:
print("Invalid input format. Use 'index,text'.")
except Exception as action_e:
print(f'Action failed: {action_e}')
# No explicit highlight removal here, get_state handles it at the start of the loop
except Exception as e:
print(f'Error in loop: {e}')
# Optionally add a small delay before retrying
await asyncio.sleep(1)
if __name__ == '__main__':
asyncio.run(test_focus_vs_all_elements())
# asyncio.run(test_process_html_file()) # Commented out the other test
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/dom/playground/multi_act.py | browser_use/dom/playground/multi_act.py | from browser_use import Agent
from browser_use.browser import BrowserProfile, BrowserSession
from browser_use.browser.profile import ViewportSize
from browser_use.llm import ChatAzureOpenAI
# Initialize the Azure OpenAI client
llm = ChatAzureOpenAI(
model='gpt-4.1-mini',
)
TASK = """
Go to https://browser-use.github.io/stress-tests/challenges/react-native-web-form.html and complete the React Native Web form by filling in all required fields and submitting.
"""
async def main():
browser = BrowserSession(
browser_profile=BrowserProfile(
window_size=ViewportSize(width=1100, height=1000),
)
)
agent = Agent(task=TASK, llm=llm)
await agent.run()
if __name__ == '__main__':
import asyncio
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/tokens/views.py | browser_use/tokens/views.py | from datetime import datetime
from typing import Any, TypeVar
from pydantic import BaseModel, Field
from browser_use.llm.views import ChatInvokeUsage
T = TypeVar('T', bound=BaseModel)
class TokenUsageEntry(BaseModel):
"""Single token usage entry"""
model: str
timestamp: datetime
usage: ChatInvokeUsage
class TokenCostCalculated(BaseModel):
"""Token cost"""
new_prompt_tokens: int
new_prompt_cost: float
prompt_read_cached_tokens: int | None
prompt_read_cached_cost: float | None
prompt_cached_creation_tokens: int | None
prompt_cache_creation_cost: float | None
"""Anthropic only: The cost of creating the cache."""
completion_tokens: int
completion_cost: float
@property
def prompt_cost(self) -> float:
return self.new_prompt_cost + (self.prompt_read_cached_cost or 0) + (self.prompt_cache_creation_cost or 0)
@property
def total_cost(self) -> float:
return (
self.new_prompt_cost
+ (self.prompt_read_cached_cost or 0)
+ (self.prompt_cache_creation_cost or 0)
+ self.completion_cost
)
class ModelPricing(BaseModel):
"""Pricing information for a model"""
model: str
input_cost_per_token: float | None
output_cost_per_token: float | None
cache_read_input_token_cost: float | None
cache_creation_input_token_cost: float | None
max_tokens: int | None
max_input_tokens: int | None
max_output_tokens: int | None
class CachedPricingData(BaseModel):
"""Cached pricing data with timestamp"""
timestamp: datetime
data: dict[str, Any]
class ModelUsageStats(BaseModel):
"""Usage statistics for a single model"""
model: str
prompt_tokens: int = 0
completion_tokens: int = 0
total_tokens: int = 0
cost: float = 0.0
invocations: int = 0
average_tokens_per_invocation: float = 0.0
class ModelUsageTokens(BaseModel):
"""Usage tokens for a single model"""
model: str
prompt_tokens: int
prompt_cached_tokens: int
completion_tokens: int
total_tokens: int
class UsageSummary(BaseModel):
"""Summary of token usage and costs"""
total_prompt_tokens: int
total_prompt_cost: float
total_prompt_cached_tokens: int
total_prompt_cached_cost: float
total_completion_tokens: int
total_completion_cost: float
total_tokens: int
total_cost: float
entry_count: int
by_model: dict[str, ModelUsageStats] = Field(default_factory=dict)
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/tokens/service.py | browser_use/tokens/service.py | """
Token cost service that tracks LLM token usage and costs.
Fetches pricing data from LiteLLM repository and caches it for 1 day.
Automatically tracks token usage when LLMs are registered and invoked.
"""
import logging
import os
from datetime import datetime, timedelta
from pathlib import Path
from typing import Any
import anyio
import httpx
from dotenv import load_dotenv
from browser_use.llm.base import BaseChatModel
from browser_use.llm.views import ChatInvokeUsage
from browser_use.tokens.custom_pricing import CUSTOM_MODEL_PRICING
from browser_use.tokens.mappings import MODEL_TO_LITELLM
from browser_use.tokens.views import (
CachedPricingData,
ModelPricing,
ModelUsageStats,
ModelUsageTokens,
TokenCostCalculated,
TokenUsageEntry,
UsageSummary,
)
from browser_use.utils import create_task_with_error_handling
load_dotenv()
from browser_use.config import CONFIG
logger = logging.getLogger(__name__)
cost_logger = logging.getLogger('cost')
def xdg_cache_home() -> Path:
default = Path.home() / '.cache'
if CONFIG.XDG_CACHE_HOME and (path := Path(CONFIG.XDG_CACHE_HOME)).is_absolute():
return path
return default
class TokenCost:
"""Service for tracking token usage and calculating costs"""
CACHE_DIR_NAME = 'browser_use/token_cost'
CACHE_DURATION = timedelta(days=1)
PRICING_URL = 'https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json'
def __init__(self, include_cost: bool = False):
self.include_cost = include_cost or os.getenv('BROWSER_USE_CALCULATE_COST', 'false').lower() == 'true'
self.usage_history: list[TokenUsageEntry] = []
self.registered_llms: dict[str, BaseChatModel] = {}
self._pricing_data: dict[str, Any] | None = None
self._initialized = False
self._cache_dir = xdg_cache_home() / self.CACHE_DIR_NAME
async def initialize(self) -> None:
"""Initialize the service by loading pricing data"""
if not self._initialized:
if self.include_cost:
await self._load_pricing_data()
self._initialized = True
async def _load_pricing_data(self) -> None:
"""Load pricing data from cache or fetch from GitHub"""
# Try to find a valid cache file
cache_file = await self._find_valid_cache()
if cache_file:
await self._load_from_cache(cache_file)
else:
await self._fetch_and_cache_pricing_data()
async def _find_valid_cache(self) -> Path | None:
"""Find the most recent valid cache file"""
try:
# Ensure cache directory exists
self._cache_dir.mkdir(parents=True, exist_ok=True)
# List all JSON files in the cache directory
cache_files = list(self._cache_dir.glob('*.json'))
if not cache_files:
return None
# Sort by modification time (most recent first)
cache_files.sort(key=lambda f: f.stat().st_mtime, reverse=True)
# Check each file until we find a valid one
for cache_file in cache_files:
if await self._is_cache_valid(cache_file):
return cache_file
else:
# Clean up old cache files
try:
os.remove(cache_file)
except Exception:
pass
return None
except Exception:
return None
async def _is_cache_valid(self, cache_file: Path) -> bool:
"""Check if a specific cache file is valid and not expired"""
try:
if not cache_file.exists():
return False
# Read the cached data
cached = CachedPricingData.model_validate_json(await anyio.Path(cache_file).read_text())
# Check if cache is still valid
return datetime.now() - cached.timestamp < self.CACHE_DURATION
except Exception:
return False
async def _load_from_cache(self, cache_file: Path) -> None:
"""Load pricing data from a specific cache file"""
try:
content = await anyio.Path(cache_file).read_text()
cached = CachedPricingData.model_validate_json(content)
self._pricing_data = cached.data
except Exception as e:
logger.debug(f'Error loading cached pricing data from {cache_file}: {e}')
# Fall back to fetching
await self._fetch_and_cache_pricing_data()
async def _fetch_and_cache_pricing_data(self) -> None:
"""Fetch pricing data from LiteLLM GitHub and cache it with timestamp"""
try:
async with httpx.AsyncClient() as client:
response = await client.get(self.PRICING_URL, timeout=30)
response.raise_for_status()
self._pricing_data = response.json()
# Create cache object with timestamp
cached = CachedPricingData(timestamp=datetime.now(), data=self._pricing_data or {})
# Ensure cache directory exists
self._cache_dir.mkdir(parents=True, exist_ok=True)
# Create cache file with timestamp in filename
timestamp_str = datetime.now().strftime('%Y%m%d_%H%M%S')
cache_file = self._cache_dir / f'pricing_{timestamp_str}.json'
await anyio.Path(cache_file).write_text(cached.model_dump_json(indent=2))
except Exception as e:
logger.debug(f'Error fetching pricing data: {e}')
# Fall back to empty pricing data
self._pricing_data = {}
async def get_model_pricing(self, model_name: str) -> ModelPricing | None:
"""Get pricing information for a specific model"""
# Ensure we're initialized
if not self._initialized:
await self.initialize()
# Check custom pricing first
if model_name in CUSTOM_MODEL_PRICING:
data = CUSTOM_MODEL_PRICING[model_name]
return ModelPricing(
model=model_name,
input_cost_per_token=data.get('input_cost_per_token'),
output_cost_per_token=data.get('output_cost_per_token'),
max_tokens=data.get('max_tokens'),
max_input_tokens=data.get('max_input_tokens'),
max_output_tokens=data.get('max_output_tokens'),
cache_read_input_token_cost=data.get('cache_read_input_token_cost'),
cache_creation_input_token_cost=data.get('cache_creation_input_token_cost'),
)
# Map model name to LiteLLM model name if needed
litellm_model_name = MODEL_TO_LITELLM.get(model_name, model_name)
if not self._pricing_data or litellm_model_name not in self._pricing_data:
return None
data = self._pricing_data[litellm_model_name]
return ModelPricing(
model=model_name,
input_cost_per_token=data.get('input_cost_per_token'),
output_cost_per_token=data.get('output_cost_per_token'),
max_tokens=data.get('max_tokens'),
max_input_tokens=data.get('max_input_tokens'),
max_output_tokens=data.get('max_output_tokens'),
cache_read_input_token_cost=data.get('cache_read_input_token_cost'),
cache_creation_input_token_cost=data.get('cache_creation_input_token_cost'),
)
async def calculate_cost(self, model: str, usage: ChatInvokeUsage) -> TokenCostCalculated | None:
if not self.include_cost:
return None
data = await self.get_model_pricing(model)
if data is None:
return None
uncached_prompt_tokens = usage.prompt_tokens - (usage.prompt_cached_tokens or 0)
return TokenCostCalculated(
new_prompt_tokens=usage.prompt_tokens,
new_prompt_cost=uncached_prompt_tokens * (data.input_cost_per_token or 0),
# Cached tokens
prompt_read_cached_tokens=usage.prompt_cached_tokens,
prompt_read_cached_cost=usage.prompt_cached_tokens * data.cache_read_input_token_cost
if usage.prompt_cached_tokens and data.cache_read_input_token_cost
else None,
# Cache creation tokens
prompt_cached_creation_tokens=usage.prompt_cache_creation_tokens,
prompt_cache_creation_cost=usage.prompt_cache_creation_tokens * data.cache_creation_input_token_cost
if data.cache_creation_input_token_cost and usage.prompt_cache_creation_tokens
else None,
# Completion tokens
completion_tokens=usage.completion_tokens,
completion_cost=usage.completion_tokens * float(data.output_cost_per_token or 0),
)
def add_usage(self, model: str, usage: ChatInvokeUsage) -> TokenUsageEntry:
"""Add token usage entry to history (without calculating cost)"""
entry = TokenUsageEntry(
model=model,
timestamp=datetime.now(),
usage=usage,
)
self.usage_history.append(entry)
return entry
# async def _log_non_usage_llm(self, llm: BaseChatModel) -> None:
# """Log non-usage to the logger"""
# C_CYAN = '\033[96m'
# C_RESET = '\033[0m'
# cost_logger.debug(f'๐ง llm : {C_CYAN}{llm.model}{C_RESET} (no usage found)')
async def _log_usage(self, model: str, usage: TokenUsageEntry) -> None:
"""Log usage to the logger"""
if not self._initialized:
await self.initialize()
# ANSI color codes
C_CYAN = '\033[96m'
C_YELLOW = '\033[93m'
C_GREEN = '\033[92m'
C_BLUE = '\033[94m'
C_RESET = '\033[0m'
# Always get cost breakdown for token details (even if not showing costs)
cost = await self.calculate_cost(model, usage.usage)
# Build input tokens breakdown
input_part = self._build_input_tokens_display(usage.usage, cost)
# Build output tokens display
completion_tokens_fmt = self._format_tokens(usage.usage.completion_tokens)
if self.include_cost and cost and cost.completion_cost > 0:
output_part = f'๐ค {C_GREEN}{completion_tokens_fmt} (${cost.completion_cost:.4f}){C_RESET}'
else:
output_part = f'๐ค {C_GREEN}{completion_tokens_fmt}{C_RESET}'
cost_logger.debug(f'๐ง {C_CYAN}{model}{C_RESET} | {input_part} | {output_part}')
def _build_input_tokens_display(self, usage: ChatInvokeUsage, cost: TokenCostCalculated | None) -> str:
"""Build a clear display of input tokens breakdown with emojis and optional costs"""
C_YELLOW = '\033[93m'
C_BLUE = '\033[94m'
C_RESET = '\033[0m'
parts = []
# Always show token breakdown if we have cache information, regardless of cost tracking
if usage.prompt_cached_tokens or usage.prompt_cache_creation_tokens:
# Calculate actual new tokens (non-cached)
new_tokens = usage.prompt_tokens - (usage.prompt_cached_tokens or 0)
if new_tokens > 0:
new_tokens_fmt = self._format_tokens(new_tokens)
if self.include_cost and cost and cost.new_prompt_cost > 0:
parts.append(f'๐ {C_YELLOW}{new_tokens_fmt} (${cost.new_prompt_cost:.4f}){C_RESET}')
else:
parts.append(f'๐ {C_YELLOW}{new_tokens_fmt}{C_RESET}')
if usage.prompt_cached_tokens:
cached_tokens_fmt = self._format_tokens(usage.prompt_cached_tokens)
if self.include_cost and cost and cost.prompt_read_cached_cost:
parts.append(f'๐พ {C_BLUE}{cached_tokens_fmt} (${cost.prompt_read_cached_cost:.4f}){C_RESET}')
else:
parts.append(f'๐พ {C_BLUE}{cached_tokens_fmt}{C_RESET}')
if usage.prompt_cache_creation_tokens:
creation_tokens_fmt = self._format_tokens(usage.prompt_cache_creation_tokens)
if self.include_cost and cost and cost.prompt_cache_creation_cost:
parts.append(f'๐ง {C_BLUE}{creation_tokens_fmt} (${cost.prompt_cache_creation_cost:.4f}){C_RESET}')
else:
parts.append(f'๐ง {C_BLUE}{creation_tokens_fmt}{C_RESET}')
if not parts:
# Fallback to simple display when no cache information available
total_tokens_fmt = self._format_tokens(usage.prompt_tokens)
if self.include_cost and cost and cost.new_prompt_cost > 0:
parts.append(f'๐ฅ {C_YELLOW}{total_tokens_fmt} (${cost.new_prompt_cost:.4f}){C_RESET}')
else:
parts.append(f'๐ฅ {C_YELLOW}{total_tokens_fmt}{C_RESET}')
return ' + '.join(parts)
def register_llm(self, llm: BaseChatModel) -> BaseChatModel:
"""
Register an LLM to automatically track its token usage
@dev Guarantees that the same instance is not registered multiple times
"""
# Use instance ID as key to avoid collisions between multiple instances
instance_id = str(id(llm))
# Check if this exact instance is already registered
if instance_id in self.registered_llms:
logger.debug(f'LLM instance {instance_id} ({llm.provider}_{llm.model}) is already registered')
return llm
self.registered_llms[instance_id] = llm
# Store the original method
original_ainvoke = llm.ainvoke
# Store reference to self for use in the closure
token_cost_service = self
# Create a wrapped version that tracks usage
async def tracked_ainvoke(messages, output_format=None, **kwargs):
# Call the original method, passing through any additional kwargs
result = await original_ainvoke(messages, output_format, **kwargs)
# Track usage if available (no await needed since add_usage is now sync)
# Use llm.model instead of llm.name for consistency with get_usage_tokens_for_model()
if result.usage:
usage = token_cost_service.add_usage(llm.model, result.usage)
logger.debug(f'Token cost service: {usage}')
create_task_with_error_handling(
token_cost_service._log_usage(llm.model, usage), name='log_token_usage', suppress_exceptions=True
)
# else:
# await token_cost_service._log_non_usage_llm(llm)
return result
# Replace the method with our tracked version
# Using setattr to avoid type checking issues with overloaded methods
setattr(llm, 'ainvoke', tracked_ainvoke)
return llm
def get_usage_tokens_for_model(self, model: str) -> ModelUsageTokens:
"""Get usage tokens for a specific model"""
filtered_usage = [u for u in self.usage_history if u.model == model]
return ModelUsageTokens(
model=model,
prompt_tokens=sum(u.usage.prompt_tokens for u in filtered_usage),
prompt_cached_tokens=sum(u.usage.prompt_cached_tokens or 0 for u in filtered_usage),
completion_tokens=sum(u.usage.completion_tokens for u in filtered_usage),
total_tokens=sum(u.usage.prompt_tokens + u.usage.completion_tokens for u in filtered_usage),
)
async def get_usage_summary(self, model: str | None = None, since: datetime | None = None) -> UsageSummary:
"""Get summary of token usage and costs (costs calculated on-the-fly)"""
filtered_usage = self.usage_history
if model:
filtered_usage = [u for u in filtered_usage if u.model == model]
if since:
filtered_usage = [u for u in filtered_usage if u.timestamp >= since]
if not filtered_usage:
return UsageSummary(
total_prompt_tokens=0,
total_prompt_cost=0.0,
total_prompt_cached_tokens=0,
total_prompt_cached_cost=0.0,
total_completion_tokens=0,
total_completion_cost=0.0,
total_tokens=0,
total_cost=0.0,
entry_count=0,
)
# Calculate totals
total_prompt = sum(u.usage.prompt_tokens for u in filtered_usage)
total_completion = sum(u.usage.completion_tokens for u in filtered_usage)
total_tokens = total_prompt + total_completion
total_prompt_cached = sum(u.usage.prompt_cached_tokens or 0 for u in filtered_usage)
models = list({u.model for u in filtered_usage})
# Calculate per-model stats with record-by-record cost calculation
model_stats: dict[str, ModelUsageStats] = {}
total_prompt_cost = 0.0
total_completion_cost = 0.0
total_prompt_cached_cost = 0.0
for entry in filtered_usage:
if entry.model not in model_stats:
model_stats[entry.model] = ModelUsageStats(model=entry.model)
stats = model_stats[entry.model]
stats.prompt_tokens += entry.usage.prompt_tokens
stats.completion_tokens += entry.usage.completion_tokens
stats.total_tokens += entry.usage.prompt_tokens + entry.usage.completion_tokens
stats.invocations += 1
if self.include_cost:
# Calculate cost record by record using the updated calculate_cost function
cost = await self.calculate_cost(entry.model, entry.usage)
if cost:
stats.cost += cost.total_cost
total_prompt_cost += cost.prompt_cost
total_completion_cost += cost.completion_cost
total_prompt_cached_cost += cost.prompt_read_cached_cost or 0
# Calculate averages
for stats in model_stats.values():
if stats.invocations > 0:
stats.average_tokens_per_invocation = stats.total_tokens / stats.invocations
return UsageSummary(
total_prompt_tokens=total_prompt,
total_prompt_cost=total_prompt_cost,
total_prompt_cached_tokens=total_prompt_cached,
total_prompt_cached_cost=total_prompt_cached_cost,
total_completion_tokens=total_completion,
total_completion_cost=total_completion_cost,
total_tokens=total_tokens,
total_cost=total_prompt_cost + total_completion_cost + total_prompt_cached_cost,
entry_count=len(filtered_usage),
by_model=model_stats,
)
def _format_tokens(self, tokens: int) -> str:
"""Format token count with k suffix for thousands"""
if tokens >= 1000000000:
return f'{tokens / 1000000000:.1f}B'
if tokens >= 1000000:
return f'{tokens / 1000000:.1f}M'
if tokens >= 1000:
return f'{tokens / 1000:.1f}k'
return str(tokens)
async def log_usage_summary(self) -> None:
"""Log a comprehensive usage summary per model with colors and nice formatting"""
if not self.usage_history:
return
summary = await self.get_usage_summary()
if summary.entry_count == 0:
return
# ANSI color codes
C_CYAN = '\033[96m'
C_YELLOW = '\033[93m'
C_GREEN = '\033[92m'
C_BLUE = '\033[94m'
C_MAGENTA = '\033[95m'
C_RESET = '\033[0m'
C_BOLD = '\033[1m'
# Log overall summary
total_tokens_fmt = self._format_tokens(summary.total_tokens)
prompt_tokens_fmt = self._format_tokens(summary.total_prompt_tokens)
completion_tokens_fmt = self._format_tokens(summary.total_completion_tokens)
# Format cost breakdowns for input and output (only if cost tracking is enabled)
if self.include_cost and summary.total_cost > 0:
total_cost_part = f' (${C_MAGENTA}{summary.total_cost:.4f}{C_RESET})'
prompt_cost_part = f' (${summary.total_prompt_cost:.4f})'
completion_cost_part = f' (${summary.total_completion_cost:.4f})'
else:
total_cost_part = ''
prompt_cost_part = ''
completion_cost_part = ''
if len(summary.by_model) > 1:
cost_logger.debug(
f'๐ฒ {C_BOLD}Total Usage Summary{C_RESET}: {C_BLUE}{total_tokens_fmt} tokens{C_RESET}{total_cost_part} | '
f'โฌ
๏ธ {C_YELLOW}{prompt_tokens_fmt}{prompt_cost_part}{C_RESET} | โก๏ธ {C_GREEN}{completion_tokens_fmt}{completion_cost_part}{C_RESET}'
)
for model, stats in summary.by_model.items():
# Format tokens
model_total_fmt = self._format_tokens(stats.total_tokens)
model_prompt_fmt = self._format_tokens(stats.prompt_tokens)
model_completion_fmt = self._format_tokens(stats.completion_tokens)
avg_tokens_fmt = self._format_tokens(int(stats.average_tokens_per_invocation))
# Format cost display (only if cost tracking is enabled)
if self.include_cost:
# Calculate per-model costs on-the-fly
total_model_cost = 0.0
model_prompt_cost = 0.0
model_completion_cost = 0.0
# Calculate costs for this model
for entry in self.usage_history:
if entry.model == model:
cost = await self.calculate_cost(entry.model, entry.usage)
if cost:
model_prompt_cost += cost.prompt_cost
model_completion_cost += cost.completion_cost
total_model_cost = model_prompt_cost + model_completion_cost
if total_model_cost > 0:
cost_part = f' (${C_MAGENTA}{total_model_cost:.4f}{C_RESET})'
prompt_part = f'{C_YELLOW}{model_prompt_fmt} (${model_prompt_cost:.4f}){C_RESET}'
completion_part = f'{C_GREEN}{model_completion_fmt} (${model_completion_cost:.4f}){C_RESET}'
else:
cost_part = ''
prompt_part = f'{C_YELLOW}{model_prompt_fmt}{C_RESET}'
completion_part = f'{C_GREEN}{model_completion_fmt}{C_RESET}'
else:
cost_part = ''
prompt_part = f'{C_YELLOW}{model_prompt_fmt}{C_RESET}'
completion_part = f'{C_GREEN}{model_completion_fmt}{C_RESET}'
cost_logger.debug(
f' ๐ค {C_CYAN}{model}{C_RESET}: {C_BLUE}{model_total_fmt} tokens{C_RESET}{cost_part} | '
f'โฌ
๏ธ {prompt_part} | โก๏ธ {completion_part} | '
f'๐ {stats.invocations} calls | ๐ {avg_tokens_fmt}/call'
)
async def get_cost_by_model(self) -> dict[str, ModelUsageStats]:
"""Get cost breakdown by model"""
summary = await self.get_usage_summary()
return summary.by_model
def clear_history(self) -> None:
"""Clear usage history"""
self.usage_history = []
async def refresh_pricing_data(self) -> None:
"""Force refresh of pricing data from GitHub"""
if self.include_cost:
await self._fetch_and_cache_pricing_data()
async def clean_old_caches(self, keep_count: int = 3) -> None:
"""Clean up old cache files, keeping only the most recent ones"""
try:
# List all JSON files in the cache directory
cache_files = list(self._cache_dir.glob('*.json'))
if len(cache_files) <= keep_count:
return
# Sort by modification time (oldest first)
cache_files.sort(key=lambda f: f.stat().st_mtime)
# Remove all but the most recent files
for cache_file in cache_files[:-keep_count]:
try:
os.remove(cache_file)
except Exception:
pass
except Exception as e:
logger.debug(f'Error cleaning old cache files: {e}')
async def ensure_pricing_loaded(self) -> None:
"""Ensure pricing data is loaded in the background. Call this after creating the service."""
if not self._initialized and self.include_cost:
# This will run in the background and won't block
await self.initialize()
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/tokens/custom_pricing.py | browser_use/tokens/custom_pricing.py | """
Custom model pricing for models not available in LiteLLM's pricing data.
Prices are per token (not per 1M tokens).
"""
from typing import Any
# Custom model pricing data
# Format matches LiteLLM's model_prices_and_context_window.json structure
CUSTOM_MODEL_PRICING: dict[str, dict[str, Any]] = {
'bu-1-0': {
'input_cost_per_token': 0.2 / 1_000_000, # $0.50 per 1M tokens
'output_cost_per_token': 2.00 / 1_000_000, # $3.00 per 1M tokens
'cache_read_input_token_cost': 0.02 / 1_000_000, # $0.10 per 1M tokens
'cache_creation_input_token_cost': None, # Not specified
'max_tokens': None, # Not specified
'max_input_tokens': None, # Not specified
'max_output_tokens': None, # Not specified
}
}
CUSTOM_MODEL_PRICING['bu-latest'] = CUSTOM_MODEL_PRICING['bu-1-0']
CUSTOM_MODEL_PRICING['smart'] = CUSTOM_MODEL_PRICING['bu-1-0']
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/tokens/mappings.py | browser_use/tokens/mappings.py | # Mapping from model_name to LiteLLM model name
MODEL_TO_LITELLM: dict[str, str] = {
'gemini-flash-latest': 'gemini/gemini-flash-latest',
}
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/tokens/__init__.py | browser_use/tokens/__init__.py | python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false | |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/tokens/tests/test_cost.py | browser_use/tokens/tests/test_cost.py | """
Simple test for token cost tracking with real LLM calls.
Tests ChatOpenAI and ChatGoogle by iteratively generating countries.
"""
import asyncio
import logging
from browser_use.llm import ChatGoogle, ChatOpenAI
from browser_use.llm.messages import AssistantMessage, SystemMessage, UserMessage
from browser_use.tokens.service import TokenCost
# Optional OCI import
try:
from examples.models.oci_models import meta_llm
OCI_MODELS_AVAILABLE = True
except ImportError:
meta_llm = None
OCI_MODELS_AVAILABLE = False
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def get_oci_model_if_available():
"""Create OCI model for testing if credentials are available."""
if not OCI_MODELS_AVAILABLE:
return None
# Try to create OCI model with mock/test configuration
# These values should be replaced with real ones if testing with actual OCI
try:
# get any of the llm xai_llm or cohere_llm
return meta_llm
except Exception as e:
logger.info(f'OCI model not available for testing: {e}')
return None
async def test_iterative_country_generation():
"""Test token cost tracking with iterative country generation"""
# Initialize token cost service
tc = TokenCost(include_cost=True)
# System prompt that explains the iterative task
system_prompt = """You are a country name generator. When asked, you will provide exactly ONE country name and nothing else.
Each time you're asked to continue, provide the next country name that hasn't been mentioned yet.
Keep track of which countries you've already said and don't repeat them.
Only output the country name, no numbers, no punctuation, just the name."""
# Test with different models
models = []
models.append(ChatOpenAI(model='gpt-4.1')) # Commented out - requires OPENAI_API_KEY
models.append(ChatGoogle(model='gemini-2.0-flash-exp'))
# Add OCI model if available
oci_model = get_oci_model_if_available()
if oci_model:
models.append(oci_model)
print(f'โ
OCI model added to test: {oci_model.name}')
else:
print('โน๏ธ OCI model not available (install with pip install browser-use[oci] and configure credentials)')
print('\n๐ Iterative Country Generation Test')
print('=' * 80)
for llm in models:
print(f'\n๐ Testing {llm.model}')
print('-' * 60)
# Register the LLM for automatic tracking
tc.register_llm(llm)
# Initialize conversation
messages = [SystemMessage(content=system_prompt), UserMessage(content='Give me a country name')]
countries = []
# Generate 10 countries iteratively
for i in range(10):
# Call the LLM
result = await llm.ainvoke(messages)
country = result.completion.strip()
countries.append(country)
# Add the response to messages
messages.append(AssistantMessage(content=country))
# Add the next request (except for the last iteration)
if i < 9:
messages.append(UserMessage(content='Next country please'))
print(f' Country {i + 1}: {country}')
print(f'\n Generated countries: {", ".join(countries)}')
# Display cost summary
print('\n๐ฐ Cost Summary')
print('=' * 80)
summary = await tc.get_usage_summary()
print(f'Total calls: {summary.entry_count}')
print(f'Total tokens: {summary.total_tokens:,}')
print(f'Total cost: ${summary.total_cost:.6f}')
expected_cost = 0
expected_invocations = 0
print('\n๐ Cost breakdown by model:')
for model, stats in summary.by_model.items():
expected_cost += stats.cost
expected_invocations += stats.invocations
print(f'\n{model}:')
print(f' Calls: {stats.invocations}')
print(f' Prompt tokens: {stats.prompt_tokens:,}')
print(f' Completion tokens: {stats.completion_tokens:,}')
print(f' Total tokens: {stats.total_tokens:,}')
print(f' Cost: ${stats.cost:.6f}')
print(f' Average tokens per call: {stats.average_tokens_per_invocation:.1f}')
assert summary.entry_count == expected_invocations, f'Expected {expected_invocations} invocations, got {summary.entry_count}'
assert abs(summary.total_cost - expected_cost) < 1e-6, (
f'Expected total cost ${expected_cost:.6f}, got ${summary.total_cost:.6f}'
)
if __name__ == '__main__':
# Run the test
asyncio.run(test_iterative_country_generation())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/sandbox/views.py | browser_use/sandbox/views.py | """Type-safe event models for sandbox execution SSE streaming"""
import json
from enum import Enum
from typing import Any
from pydantic import BaseModel
class SandboxError(Exception):
pass
class SSEEventType(str, Enum):
"""Event types for Server-Sent Events"""
BROWSER_CREATED = 'browser_created'
INSTANCE_CREATED = 'instance_created'
INSTANCE_READY = 'instance_ready'
LOG = 'log'
RESULT = 'result'
ERROR = 'error'
STREAM_COMPLETE = 'stream_complete'
class BrowserCreatedData(BaseModel):
"""Data for browser_created event"""
session_id: str
live_url: str
status: str
class LogData(BaseModel):
"""Data for log event"""
message: str
level: str = 'info' # stdout, stderr, info, warning, error
class ExecutionResponse(BaseModel):
"""Execution result from the executor"""
success: bool
result: Any = None
error: str | None = None
traceback: str | None = None
class ResultData(BaseModel):
"""Data for result event"""
execution_response: ExecutionResponse
class ErrorData(BaseModel):
"""Data for error event"""
error: str
traceback: str | None = None
status_code: int = 500
class SSEEvent(BaseModel):
"""Type-safe SSE Event
Usage:
# Parse from JSON
event = SSEEvent.from_json(event_json_string)
# Type-safe access with type guards
if event.is_browser_created():
assert isinstance(event.data, BrowserCreatedData)
print(event.data.live_url)
# Or check event type directly
if event.type == SSEEventType.LOG:
assert isinstance(event.data, LogData)
print(event.data.message)
"""
type: SSEEventType
data: BrowserCreatedData | LogData | ResultData | ErrorData | dict[str, Any]
timestamp: str | None = None
@classmethod
def from_json(cls, event_json: str) -> 'SSEEvent':
"""Parse SSE event from JSON string with proper type discrimination
Args:
event_json: JSON string from SSE stream
Returns:
Typed SSEEvent with appropriate data model
Raises:
json.JSONDecodeError: If JSON is malformed
ValueError: If event type is invalid
"""
raw_data = json.loads(event_json)
event_type = SSEEventType(raw_data.get('type'))
data_dict = raw_data.get('data', {})
# Parse data based on event type
if event_type == SSEEventType.BROWSER_CREATED:
data = BrowserCreatedData(**data_dict)
elif event_type == SSEEventType.LOG:
data = LogData(**data_dict)
elif event_type == SSEEventType.RESULT:
data = ResultData(**data_dict)
elif event_type == SSEEventType.ERROR:
data = ErrorData(**data_dict)
else:
data = data_dict
return cls(type=event_type, data=data, timestamp=raw_data.get('timestamp'))
def is_browser_created(self) -> bool:
"""Type guard for BrowserCreatedData"""
return self.type == SSEEventType.BROWSER_CREATED and isinstance(self.data, BrowserCreatedData)
def is_log(self) -> bool:
"""Type guard for LogData"""
return self.type == SSEEventType.LOG and isinstance(self.data, LogData)
def is_result(self) -> bool:
"""Type guard for ResultData"""
return self.type == SSEEventType.RESULT and isinstance(self.data, ResultData)
def is_error(self) -> bool:
"""Type guard for ErrorData"""
return self.type == SSEEventType.ERROR and isinstance(self.data, ErrorData)
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/sandbox/sandbox.py | browser_use/sandbox/sandbox.py | import ast
import asyncio
import base64
import dataclasses
import enum
import inspect
import json
import os
import sys
import textwrap
from collections.abc import Callable, Coroutine
from functools import wraps
from typing import TYPE_CHECKING, Any, Concatenate, ParamSpec, TypeVar, Union, cast, get_args, get_origin
import cloudpickle
import httpx
from browser_use.sandbox.views import (
BrowserCreatedData,
ErrorData,
LogData,
ResultData,
SandboxError,
SSEEvent,
SSEEventType,
)
if TYPE_CHECKING:
from browser_use.browser import BrowserSession
T = TypeVar('T')
P = ParamSpec('P')
def get_terminal_width() -> int:
"""Get terminal width, default to 80 if unable to detect"""
try:
return os.get_terminal_size().columns
except (AttributeError, OSError):
return 80
async def _call_callback(callback: Callable[..., Any], *args: Any) -> None:
"""Call a callback that can be either sync or async"""
result = callback(*args)
if asyncio.iscoroutine(result):
await result
def _get_function_source_without_decorator(func: Callable) -> str:
"""Get function source code with decorator removed"""
source = inspect.getsource(func)
source = textwrap.dedent(source)
# Parse and remove decorator
tree = ast.parse(source)
for node in ast.walk(tree):
if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)):
node.decorator_list = []
break
return ast.unparse(tree)
def _get_imports_used_in_function(func: Callable) -> str:
"""Extract only imports that are referenced in the function body or type annotations"""
# Get all names referenced in the function
code = func.__code__
referenced_names = set(code.co_names)
# Also get names from type annotations (recursively for complex types like Union, Literal, etc.)
def extract_type_names(annotation):
"""Recursively extract all type names from annotation"""
if annotation is None or annotation == inspect.Parameter.empty:
return
# Handle Pydantic generics (e.g., AgentHistoryList[MyModel]) - check this FIRST
# Pydantic generics have __pydantic_generic_metadata__ with 'origin' and 'args'
pydantic_meta = getattr(annotation, '__pydantic_generic_metadata__', None)
if pydantic_meta and pydantic_meta.get('origin'):
# Add the origin class name (e.g., 'AgentHistoryList')
origin_class = pydantic_meta['origin']
if hasattr(origin_class, '__name__'):
referenced_names.add(origin_class.__name__)
# Recursively extract from generic args (e.g., MyModel)
for arg in pydantic_meta.get('args', ()):
extract_type_names(arg)
return
# Handle simple types with __name__
if hasattr(annotation, '__name__'):
referenced_names.add(annotation.__name__)
# Handle string annotations
if isinstance(annotation, str):
referenced_names.add(annotation)
# Handle generic types like Union[X, Y], Literal['x'], etc.
origin = get_origin(annotation)
args = get_args(annotation)
if origin:
# Add the origin type name (e.g., 'Union', 'Literal')
if hasattr(origin, '__name__'):
referenced_names.add(origin.__name__)
# Recursively extract from generic args
if args:
for arg in args:
extract_type_names(arg)
sig = inspect.signature(func)
for param in sig.parameters.values():
if param.annotation != inspect.Parameter.empty:
extract_type_names(param.annotation)
# Get return annotation (also extract recursively)
if 'return' in func.__annotations__:
extract_type_names(func.__annotations__['return'])
# Get the module where function is defined
module = inspect.getmodule(func)
if not module or not hasattr(module, '__file__') or module.__file__ is None:
return ''
try:
with open(module.__file__) as f:
module_source = f.read()
tree = ast.parse(module_source)
needed_imports: list[str] = []
for node in tree.body:
if isinstance(node, ast.Import):
# import X, Y
for alias in node.names:
import_name = alias.asname if alias.asname else alias.name
if import_name in referenced_names:
needed_imports.append(ast.unparse(node))
break
elif isinstance(node, ast.ImportFrom):
# from X import Y, Z
imported_names = []
for alias in node.names:
import_name = alias.asname if alias.asname else alias.name
if import_name in referenced_names:
imported_names.append(alias)
if imported_names:
# Create filtered import statement
filtered_import = ast.ImportFrom(module=node.module, names=imported_names, level=node.level)
needed_imports.append(ast.unparse(filtered_import))
return '\n'.join(needed_imports)
except Exception:
return ''
def _extract_all_params(func: Callable, args: tuple, kwargs: dict) -> dict[str, Any]:
"""Extract all parameters including explicit params and closure variables
Args:
func: The function being decorated
args: Positional arguments passed to the function
kwargs: Keyword arguments passed to the function
Returns:
Dictionary of all parameters {name: value}
"""
sig = inspect.signature(func)
bound_args = sig.bind_partial(*args, **kwargs)
bound_args.apply_defaults()
all_params: dict[str, Any] = {}
# 1. Extract explicit parameters (skip 'browser' and 'self')
for param_name, param_value in bound_args.arguments.items():
if param_name == 'browser':
continue
if param_name == 'self' and hasattr(param_value, '__dict__'):
# Extract self attributes as individual variables
for attr_name, attr_value in param_value.__dict__.items():
all_params[attr_name] = attr_value
else:
all_params[param_name] = param_value
# 2. Extract closure variables
if func.__closure__:
closure_vars = func.__code__.co_freevars
closure_values = [cell.cell_contents for cell in func.__closure__]
for name, value in zip(closure_vars, closure_values):
# Skip if already captured from explicit params
if name in all_params:
continue
# Special handling for 'self' in closures
if name == 'self' and hasattr(value, '__dict__'):
for attr_name, attr_value in value.__dict__.items():
if attr_name not in all_params:
all_params[attr_name] = attr_value
else:
all_params[name] = value
# 3. Extract referenced globals (like logger, module-level vars, etc.)
# Let cloudpickle handle serialization instead of special-casing
for name in func.__code__.co_names:
if name in all_params:
continue
if name in func.__globals__:
all_params[name] = func.__globals__[name]
return all_params
def sandbox(
BROWSER_USE_API_KEY: str | None = None,
cloud_profile_id: str | None = None,
cloud_proxy_country_code: str | None = None,
cloud_timeout: int | None = None,
server_url: str | None = None,
log_level: str = 'INFO',
quiet: bool = False,
headers: dict[str, str] | None = None,
on_browser_created: Callable[[BrowserCreatedData], None]
| Callable[[BrowserCreatedData], Coroutine[Any, Any, None]]
| None = None,
on_instance_ready: Callable[[], None] | Callable[[], Coroutine[Any, Any, None]] | None = None,
on_log: Callable[[LogData], None] | Callable[[LogData], Coroutine[Any, Any, None]] | None = None,
on_result: Callable[[ResultData], None] | Callable[[ResultData], Coroutine[Any, Any, None]] | None = None,
on_error: Callable[[ErrorData], None] | Callable[[ErrorData], Coroutine[Any, Any, None]] | None = None,
**env_vars: str,
) -> Callable[[Callable[Concatenate['BrowserSession', P], Coroutine[Any, Any, T]]], Callable[P, Coroutine[Any, Any, T]]]:
"""Decorator to execute browser automation code in a sandbox environment.
The decorated function MUST have 'browser: Browser' as its first parameter.
The browser parameter will be automatically injected - do NOT pass it when calling the decorated function.
All other parameters (explicit or from closure) will be captured and sent via cloudpickle.
Args:
BROWSER_USE_API_KEY: API key (defaults to BROWSER_USE_API_KEY env var)
cloud_profile_id: The ID of the profile to use for the browser session
cloud_proxy_country_code: Country code for proxy location (e.g., 'us', 'uk', 'fr')
cloud_timeout: The timeout for the browser session in minutes (max 240 = 4 hours)
server_url: Sandbox server URL (defaults to https://sandbox.api.browser-use.com/sandbox-stream)
log_level: Logging level (INFO, DEBUG, WARNING, ERROR)
quiet: Suppress console output
headers: Additional HTTP headers to send with the request
on_browser_created: Callback when browser is created
on_instance_ready: Callback when instance is ready
on_log: Callback for log events
on_result: Callback when execution completes
on_error: Callback for errors
**env_vars: Additional environment variables
Example:
@sandbox()
async def task(browser: Browser, url: str, max_steps: int) -> str:
agent = Agent(task=url, browser=browser)
await agent.run(max_steps=max_steps)
return "done"
# Call with:
result = await task(url="https://example.com", max_steps=10)
# With cloud parameters:
@sandbox(cloud_proxy_country_code='us', cloud_timeout=60)
async def task_with_proxy(browser: Browser) -> str:
...
"""
def decorator(
func: Callable[Concatenate['BrowserSession', P], Coroutine[Any, Any, T]],
) -> Callable[P, Coroutine[Any, Any, T]]:
# Validate function has browser parameter
sig = inspect.signature(func)
if 'browser' not in sig.parameters:
raise TypeError(f'{func.__name__}() must have a "browser" parameter')
browser_param = sig.parameters['browser']
if browser_param.annotation != inspect.Parameter.empty:
annotation_str = str(browser_param.annotation)
if 'Browser' not in annotation_str:
raise TypeError(f'{func.__name__}() browser parameter must be typed as Browser, got {annotation_str}')
@wraps(func)
async def wrapper(*args, **kwargs) -> T:
# 1. Get API key
api_key = BROWSER_USE_API_KEY or os.getenv('BROWSER_USE_API_KEY')
if not api_key:
raise SandboxError('BROWSER_USE_API_KEY is required')
# 2. Extract all parameters (explicit + closure)
all_params = _extract_all_params(func, args, kwargs)
# 3. Get function source without decorator and only needed imports
func_source = _get_function_source_without_decorator(func)
needed_imports = _get_imports_used_in_function(func)
# Always include Browser import since it's required for the function signature
if needed_imports:
needed_imports = 'from browser_use import Browser\n' + needed_imports
else:
needed_imports = 'from browser_use import Browser'
# 4. Pickle parameters using cloudpickle for robust serialization
pickled_params = base64.b64encode(cloudpickle.dumps(all_params)).decode()
# 5. Determine which params are in the function signature vs closure/globals
func_param_names = {p.name for p in sig.parameters.values() if p.name != 'browser'}
non_explicit_params = {k: v for k, v in all_params.items() if k not in func_param_names}
explicit_params = {k: v for k, v in all_params.items() if k in func_param_names}
# Inject closure variables and globals as module-level vars
var_injections = []
for var_name in non_explicit_params.keys():
var_injections.append(f"{var_name} = _params['{var_name}']")
var_injection_code = '\n'.join(var_injections) if var_injections else '# No closure variables or globals'
# Build function call
if explicit_params:
function_call = (
f'await {func.__name__}(browser=browser, **{{k: _params[k] for k in {list(explicit_params.keys())!r}}})'
)
else:
function_call = f'await {func.__name__}(browser=browser)'
# 6. Create wrapper code that unpickles params and calls function
execution_code = f"""import cloudpickle
import base64
# Imports used in function
{needed_imports}
# Unpickle all parameters (explicit, closure, and globals)
_pickled_params = base64.b64decode({repr(pickled_params)})
_params = cloudpickle.loads(_pickled_params)
# Inject closure variables and globals into module scope
{var_injection_code}
# Original function (decorator removed)
{func_source}
# Wrapper function that passes explicit params
async def run(browser):
return {function_call}
"""
# 9. Send to server
payload: dict[str, Any] = {'code': base64.b64encode(execution_code.encode()).decode()}
combined_env: dict[str, str] = env_vars.copy() if env_vars else {}
combined_env['LOG_LEVEL'] = log_level.upper()
payload['env'] = combined_env
# Add cloud parameters if provided
if cloud_profile_id is not None:
payload['cloud_profile_id'] = cloud_profile_id
if cloud_proxy_country_code is not None:
payload['cloud_proxy_country_code'] = cloud_proxy_country_code
if cloud_timeout is not None:
payload['cloud_timeout'] = cloud_timeout
url = server_url or 'https://sandbox.api.browser-use.com/sandbox-stream'
request_headers = {'X-API-Key': api_key}
if headers:
request_headers.update(headers)
# 10. Handle SSE streaming
_NO_RESULT = object()
execution_result = _NO_RESULT
live_url_shown = False
execution_started = False
received_final_event = False
async with httpx.AsyncClient(timeout=1800.0) as client:
async with client.stream('POST', url, json=payload, headers=request_headers) as response:
response.raise_for_status()
try:
async for line in response.aiter_lines():
if not line or not line.startswith('data: '):
continue
event_json = line[6:]
try:
event = SSEEvent.from_json(event_json)
if event.type == SSEEventType.BROWSER_CREATED:
assert isinstance(event.data, BrowserCreatedData)
if on_browser_created:
try:
await _call_callback(on_browser_created, event.data)
except Exception as e:
if not quiet:
print(f'โ ๏ธ Error in on_browser_created callback: {e}')
if not quiet and event.data.live_url and not live_url_shown:
width = get_terminal_width()
print('\n' + 'โ' * width)
print('๐๏ธ LIVE BROWSER VIEW (Click to watch)')
print(f'๐ {event.data.live_url}')
print('โ' * width)
live_url_shown = True
elif event.type == SSEEventType.LOG:
assert isinstance(event.data, LogData)
message = event.data.message
level = event.data.level
if on_log:
try:
await _call_callback(on_log, event.data)
except Exception as e:
if not quiet:
print(f'โ ๏ธ Error in on_log callback: {e}')
if level == 'stdout':
if not quiet:
if not execution_started:
width = get_terminal_width()
print('\n' + 'โ' * width)
print('โก Runtime Output')
print('โ' * width)
execution_started = True
print(f' {message}', end='')
elif level == 'stderr':
if not quiet:
if not execution_started:
width = get_terminal_width()
print('\n' + 'โ' * width)
print('โก Runtime Output')
print('โ' * width)
execution_started = True
print(f'โ ๏ธ {message}', end='', file=sys.stderr)
elif level == 'info':
if not quiet:
if 'credit' in message.lower():
import re
match = re.search(r'\$[\d,]+\.?\d*', message)
if match:
print(f'๐ฐ You have {match.group()} credits')
else:
print(f'โน๏ธ {message}')
else:
if not quiet:
print(f' {message}')
elif event.type == SSEEventType.INSTANCE_READY:
if on_instance_ready:
try:
await _call_callback(on_instance_ready)
except Exception as e:
if not quiet:
print(f'โ ๏ธ Error in on_instance_ready callback: {e}')
if not quiet:
print('โ
Browser ready, starting execution...\n')
elif event.type == SSEEventType.RESULT:
assert isinstance(event.data, ResultData)
exec_response = event.data.execution_response
received_final_event = True
if on_result:
try:
await _call_callback(on_result, event.data)
except Exception as e:
if not quiet:
print(f'โ ๏ธ Error in on_result callback: {e}')
if exec_response.success:
execution_result = exec_response.result
if not quiet and execution_started:
width = get_terminal_width()
print('\n' + 'โ' * width)
print()
else:
error_msg = exec_response.error or 'Unknown error'
raise SandboxError(f'Execution failed: {error_msg}')
elif event.type == SSEEventType.ERROR:
assert isinstance(event.data, ErrorData)
received_final_event = True
if on_error:
try:
await _call_callback(on_error, event.data)
except Exception as e:
if not quiet:
print(f'โ ๏ธ Error in on_error callback: {e}')
raise SandboxError(f'Execution failed: {event.data.error}')
except (json.JSONDecodeError, ValueError):
continue
except (httpx.RemoteProtocolError, httpx.ReadError, httpx.StreamClosed) as e:
# With deterministic handshake, these should never happen
# If they do, it's a real error
raise SandboxError(
f'Stream error: {e.__class__.__name__}: {e or "connection closed unexpectedly"}'
) from e
# 11. Parse result with type annotation
if execution_result is not _NO_RESULT:
return_annotation = func.__annotations__.get('return')
if return_annotation:
parsed_result = _parse_with_type_annotation(execution_result, return_annotation)
return parsed_result
return execution_result # type: ignore[return-value]
raise SandboxError('No result received from execution')
# Update wrapper signature to remove browser parameter
wrapper.__annotations__ = func.__annotations__.copy()
if 'browser' in wrapper.__annotations__:
del wrapper.__annotations__['browser']
params = [p for p in sig.parameters.values() if p.name != 'browser']
wrapper.__signature__ = sig.replace(parameters=params) # type: ignore[attr-defined]
return cast(Callable[P, Coroutine[Any, Any, T]], wrapper)
return decorator
def _parse_with_type_annotation(data: Any, annotation: Any) -> Any:
"""Parse data with type annotation without validation, recursively handling nested types
This function reconstructs Pydantic models, dataclasses, and enums from JSON dicts
without running validation logic. It recursively parses nested fields to ensure
complete type fidelity.
"""
try:
if data is None:
return None
origin = get_origin(annotation)
args = get_args(annotation)
# Handle Union types
if origin is Union or (hasattr(annotation, '__class__') and annotation.__class__.__name__ == 'UnionType'):
union_args = args or getattr(annotation, '__args__', [])
for arg in union_args:
if arg is type(None) and data is None:
return None
if arg is not type(None):
try:
return _parse_with_type_annotation(data, arg)
except Exception:
continue
return data
# Handle List types
if origin is list:
if not isinstance(data, list):
return data
if args:
return [_parse_with_type_annotation(item, args[0]) for item in data]
return data
# Handle Tuple types (JSON serializes tuples as lists)
if origin is tuple:
if not isinstance(data, (list, tuple)):
return data
if args:
# Parse each element according to its type annotation
parsed_items = []
for i, item in enumerate(data):
# Use the corresponding type arg, or the last one if fewer args than items
type_arg = args[i] if i < len(args) else args[-1] if args else Any
parsed_items.append(_parse_with_type_annotation(item, type_arg))
return tuple(parsed_items)
return tuple(data) if isinstance(data, list) else data
# Handle Dict types
if origin is dict:
if not isinstance(data, dict):
return data
if len(args) == 2:
return {_parse_with_type_annotation(k, args[0]): _parse_with_type_annotation(v, args[1]) for k, v in data.items()}
return data
# Handle Enum types
if inspect.isclass(annotation) and issubclass(annotation, enum.Enum):
if isinstance(data, str):
try:
return annotation[data] # By name
except KeyError:
return annotation(data) # By value
return annotation(data) # By value
# Handle Pydantic v2 - use model_construct to skip validation and recursively parse nested fields
# Get the actual class (unwrap generic if needed)
# For Pydantic generics, get_origin() returns None, so check __pydantic_generic_metadata__ first
pydantic_generic_meta = getattr(annotation, '__pydantic_generic_metadata__', None)
if pydantic_generic_meta and pydantic_generic_meta.get('origin'):
actual_class = pydantic_generic_meta['origin']
generic_args = pydantic_generic_meta.get('args', ())
else:
actual_class = get_origin(annotation) or annotation
generic_args = get_args(annotation)
if hasattr(actual_class, 'model_construct'):
if not isinstance(data, dict):
return data
# Recursively parse each field according to its type annotation
if hasattr(actual_class, 'model_fields'):
parsed_fields = {}
for field_name, field_info in actual_class.model_fields.items():
if field_name in data:
field_annotation = field_info.annotation
parsed_fields[field_name] = _parse_with_type_annotation(data[field_name], field_annotation)
result = actual_class.model_construct(**parsed_fields)
# Special handling for AgentHistoryList: extract and set _output_model_schema from generic type parameter
if actual_class.__name__ == 'AgentHistoryList' and generic_args:
output_model_schema = generic_args[0]
# Only set if it's an actual model class, not a TypeVar
if inspect.isclass(output_model_schema) and hasattr(output_model_schema, 'model_validate_json'):
result._output_model_schema = output_model_schema
return result
# Fallback if model_fields not available
return actual_class.model_construct(**data)
# Handle Pydantic v1 - use construct to skip validation and recursively parse nested fields
if hasattr(annotation, 'construct'):
if not isinstance(data, dict):
return data
# Recursively parse each field if __fields__ is available
if hasattr(annotation, '__fields__'):
parsed_fields = {}
for field_name, field_obj in annotation.__fields__.items():
if field_name in data:
field_annotation = field_obj.outer_type_
parsed_fields[field_name] = _parse_with_type_annotation(data[field_name], field_annotation)
return annotation.construct(**parsed_fields)
# Fallback if __fields__ not available
return annotation.construct(**data)
# Handle dataclasses
if dataclasses.is_dataclass(annotation) and isinstance(data, dict):
# Get field type annotations
field_types = {f.name: f.type for f in dataclasses.fields(annotation)}
# Recursively parse each field
parsed_fields = {}
for field_name, field_type in field_types.items():
if field_name in data:
parsed_fields[field_name] = _parse_with_type_annotation(data[field_name], field_type)
return cast(type[Any], annotation)(**parsed_fields)
# Handle regular classes
if inspect.isclass(annotation) and isinstance(data, dict):
try:
return annotation(**data)
except Exception:
pass
return data
except Exception:
return data
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/sandbox/__init__.py | browser_use/sandbox/__init__.py | """Sandbox execution package for browser-use
This package provides type-safe sandbox code execution with SSE streaming.
Example:
from browser_use.sandbox import sandbox, SSEEvent, SSEEventType
@sandbox(log_level="INFO")
async def my_task(browser: Browser) -> str:
page = await browser.get_current_page()
await page.goto("https://example.com")
return await page.title()
result = await my_task()
"""
from browser_use.sandbox.sandbox import SandboxError, sandbox
from browser_use.sandbox.views import (
BrowserCreatedData,
ErrorData,
ExecutionResponse,
LogData,
ResultData,
SSEEvent,
SSEEventType,
)
__all__ = [
# Main decorator
'sandbox',
'SandboxError',
# Event types
'SSEEvent',
'SSEEventType',
# Event data models
'BrowserCreatedData',
'LogData',
'ResultData',
'ErrorData',
'ExecutionResponse',
]
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/browser/watchdog_base.py | browser_use/browser/watchdog_base.py | """Base watchdog class for browser monitoring components."""
import inspect
import time
from collections.abc import Iterable
from typing import Any, ClassVar
from bubus import BaseEvent, EventBus
from pydantic import BaseModel, ConfigDict, Field
from browser_use.browser.session import BrowserSession
class BaseWatchdog(BaseModel):
"""Base class for all browser watchdogs.
Watchdogs monitor browser state and emit events based on changes.
They automatically register event handlers based on method names.
Handler methods should be named: on_EventTypeName(self, event: EventTypeName)
"""
model_config = ConfigDict(
arbitrary_types_allowed=True, # allow non-serializable objects like EventBus/BrowserSession in fields
extra='forbid', # dont allow implicit class/instance state, everything must be a properly typed Field or PrivateAttr
validate_assignment=False, # avoid re-triggering __init__ / validators on values on every assignment
revalidate_instances='never', # avoid re-triggering __init__ / validators and erasing private attrs
)
# Class variables to statically define the list of events relevant to each watchdog
# (not enforced, just to make it easier to understand the code and debug watchdogs at runtime)
LISTENS_TO: ClassVar[list[type[BaseEvent[Any]]]] = [] # Events this watchdog listens to
EMITS: ClassVar[list[type[BaseEvent[Any]]]] = [] # Events this watchdog emits
# Core dependencies
event_bus: EventBus = Field()
browser_session: BrowserSession = Field()
# Shared state that other watchdogs might need to access should not be defined on BrowserSession, not here!
# Shared helper methods needed by other watchdogs should be defined on BrowserSession, not here!
# Alternatively, expose some events on the watchdog to allow access to state/helpers via event_bus system.
# Private state internal to the watchdog can be defined like this on BaseWatchdog subclasses:
# _screenshot_cache: dict[str, bytes] = PrivateAttr(default_factory=dict)
# _browser_crash_watcher_task: asyncio.Task | None = PrivateAttr(default=None)
# _cdp_download_tasks: WeakSet[asyncio.Task] = PrivateAttr(default_factory=WeakSet)
# ...
@property
def logger(self):
"""Get the logger from the browser session."""
return self.browser_session.logger
@staticmethod
def attach_handler_to_session(browser_session: 'BrowserSession', event_class: type[BaseEvent[Any]], handler) -> None:
"""Attach a single event handler to a browser session.
Args:
browser_session: The browser session to attach to
event_class: The event class to listen for
handler: The handler method (must start with 'on_' and end with event type)
"""
event_bus = browser_session.event_bus
# Validate handler naming convention
assert hasattr(handler, '__name__'), 'Handler must have a __name__ attribute'
assert handler.__name__.startswith('on_'), f'Handler {handler.__name__} must start with "on_"'
assert handler.__name__.endswith(event_class.__name__), (
f'Handler {handler.__name__} must end with event type {event_class.__name__}'
)
# Get the watchdog instance if this is a bound method
watchdog_instance = getattr(handler, '__self__', None)
watchdog_class_name = watchdog_instance.__class__.__name__ if watchdog_instance else 'Unknown'
# Create a wrapper function with unique name to avoid duplicate handler warnings
# Capture handler by value to avoid closure issues
def make_unique_handler(actual_handler):
async def unique_handler(event):
# just for debug logging, not used for anything else
parent_event = event_bus.event_history.get(event.event_parent_id) if event.event_parent_id else None
grandparent_event = (
event_bus.event_history.get(parent_event.event_parent_id)
if parent_event and parent_event.event_parent_id
else None
)
parent = (
f'โฒ triggered by on_{parent_event.event_type}#{parent_event.event_id[-4:]}'
if parent_event
else '๐ by Agent'
)
grandparent = (
(
f'โฒ under {grandparent_event.event_type}#{grandparent_event.event_id[-4:]}'
if grandparent_event
else '๐ by Agent'
)
if parent_event
else ''
)
event_str = f'#{event.event_id[-4:]}'
time_start = time.time()
watchdog_and_handler_str = f'[{watchdog_class_name}.{actual_handler.__name__}({event_str})]'.ljust(54)
browser_session.logger.debug(f'๐ {watchdog_and_handler_str} โณ Starting... {parent} {grandparent}')
try:
# **EXECUTE THE EVENT HANDLER FUNCTION**
result = await actual_handler(event)
if isinstance(result, Exception):
raise result
# just for debug logging, not used for anything else
time_end = time.time()
time_elapsed = time_end - time_start
result_summary = '' if result is None else f' โก๏ธ <{type(result).__name__}>'
parents_summary = f' {parent}'.replace('โฒ triggered by ', 'โคด returned to ').replace(
'๐ by Agent', '๐ returned to Agent'
)
browser_session.logger.debug(
f'๐ {watchdog_and_handler_str} Succeeded ({time_elapsed:.2f}s){result_summary}{parents_summary}'
)
return result
except Exception as e:
time_end = time.time()
time_elapsed = time_end - time_start
original_error = e
browser_session.logger.error(
f'๐ {watchdog_and_handler_str} โ Failed ({time_elapsed:.2f}s): {type(e).__name__}: {e}'
)
# attempt to repair potentially crashed CDP session
try:
if browser_session.agent_focus_target_id:
# With event-driven sessions, Chrome will send detach/attach events
# SessionManager handles pool cleanup automatically
target_id_to_restore = browser_session.agent_focus_target_id
browser_session.logger.debug(
f'๐ {watchdog_and_handler_str} โ ๏ธ Session error detected, waiting for CDP events to sync (target: {target_id_to_restore})'
)
# Wait for new attach event to restore the session
# This will raise ValueError if target doesn't re-attach
await browser_session.get_or_create_cdp_session(target_id=target_id_to_restore, focus=True)
else:
# Try to get any available session
await browser_session.get_or_create_cdp_session(target_id=None, focus=True)
except Exception as sub_error:
if 'ConnectionClosedError' in str(type(sub_error)) or 'ConnectionError' in str(type(sub_error)):
browser_session.logger.error(
f'๐ {watchdog_and_handler_str} โ Browser closed or CDP Connection disconnected by remote. {type(sub_error).__name__}: {sub_error}\n'
)
raise
else:
browser_session.logger.error(
f'๐ {watchdog_and_handler_str} โ CDP connected but failed to re-create CDP session after error "{type(original_error).__name__}: {original_error}" in {actual_handler.__name__}({event.event_type}#{event.event_id[-4:]}): due to {type(sub_error).__name__}: {sub_error}\n'
)
# Always re-raise the original error with its traceback preserved
raise
return unique_handler
unique_handler = make_unique_handler(handler)
unique_handler.__name__ = f'{watchdog_class_name}.{handler.__name__}'
# Check if this handler is already registered - throw error if duplicate
existing_handlers = event_bus.handlers.get(event_class.__name__, [])
handler_names = [getattr(h, '__name__', str(h)) for h in existing_handlers]
if unique_handler.__name__ in handler_names:
raise RuntimeError(
f'[{watchdog_class_name}] Duplicate handler registration attempted! '
f'Handler {unique_handler.__name__} is already registered for {event_class.__name__}. '
f'This likely means attach_to_session() was called multiple times.'
)
event_bus.on(event_class, unique_handler)
def attach_to_session(self) -> None:
"""Attach watchdog to its browser session and start monitoring.
This method handles event listener registration. The watchdog is already
bound to a browser session via self.browser_session from initialization.
"""
# Register event handlers automatically based on method names
assert self.browser_session is not None, 'Root CDP client not initialized - browser may not be connected yet'
from browser_use.browser import events
event_classes = {}
for name in dir(events):
obj = getattr(events, name)
if inspect.isclass(obj) and issubclass(obj, BaseEvent) and obj is not BaseEvent:
event_classes[name] = obj
# Find all handler methods (on_EventName)
registered_events = set()
for method_name in dir(self):
if method_name.startswith('on_') and callable(getattr(self, method_name)):
# Extract event name from method name (on_EventName -> EventName)
event_name = method_name[3:] # Remove 'on_' prefix
if event_name in event_classes:
event_class = event_classes[event_name]
# ASSERTION: If LISTENS_TO is defined, enforce it
if self.LISTENS_TO:
assert event_class in self.LISTENS_TO, (
f'[{self.__class__.__name__}] Handler {method_name} listens to {event_name} '
f'but {event_name} is not declared in LISTENS_TO: {[e.__name__ for e in self.LISTENS_TO]}'
)
handler = getattr(self, method_name)
# Use the static helper to attach the handler
self.attach_handler_to_session(self.browser_session, event_class, handler)
registered_events.add(event_class)
# ASSERTION: If LISTENS_TO is defined, ensure all declared events have handlers
if self.LISTENS_TO:
missing_handlers = set(self.LISTENS_TO) - registered_events
if missing_handlers:
missing_names = [e.__name__ for e in missing_handlers]
self.logger.warning(
f'[{self.__class__.__name__}] LISTENS_TO declares {missing_names} '
f'but no handlers found (missing on_{"_, on_".join(missing_names)} methods)'
)
def __del__(self) -> None:
"""Clean up any running tasks during garbage collection."""
# A BIT OF MAGIC: Cancel any private attributes that look like asyncio tasks
try:
for attr_name in dir(self):
# e.g. _browser_crash_watcher_task = asyncio.Task
if attr_name.startswith('_') and attr_name.endswith('_task'):
try:
task = getattr(self, attr_name)
if hasattr(task, 'cancel') and callable(task.cancel) and not task.done():
task.cancel()
# self.logger.debug(f'[{self.__class__.__name__}] Cancelled {attr_name} during cleanup')
except Exception:
pass # Ignore errors during cleanup
# e.g. _cdp_download_tasks = WeakSet[asyncio.Task] or list[asyncio.Task]
if attr_name.startswith('_') and attr_name.endswith('_tasks') and isinstance(getattr(self, attr_name), Iterable):
for task in getattr(self, attr_name):
try:
if hasattr(task, 'cancel') and callable(task.cancel) and not task.done():
task.cancel()
# self.logger.debug(f'[{self.__class__.__name__}] Cancelled {attr_name} during cleanup')
except Exception:
pass # Ignore errors during cleanup
except Exception as e:
from browser_use.utils import logger
logger.error(f'โ ๏ธ Error during BrowserSession {self.__class__.__name__} garbage collection __del__(): {type(e)}: {e}')
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/browser/views.py | browser_use/browser/views.py | from dataclasses import dataclass, field
from typing import Any
from bubus import BaseEvent
from cdp_use.cdp.target import TargetID
from pydantic import AliasChoices, BaseModel, ConfigDict, Field, field_serializer
from browser_use.dom.views import DOMInteractedElement, SerializedDOMState
# Known placeholder image data for about:blank pages - a 4x4 white PNG
PLACEHOLDER_4PX_SCREENSHOT = (
'iVBORw0KGgoAAAANSUhEUgAAAAQAAAAECAIAAAAmkwkpAAAAFElEQVR4nGP8//8/AwwwMSAB3BwAlm4DBfIlvvkAAAAASUVORK5CYII='
)
# Pydantic
class TabInfo(BaseModel):
"""Represents information about a browser tab"""
model_config = ConfigDict(
extra='forbid',
validate_by_name=True,
validate_by_alias=True,
populate_by_name=True,
)
# Original fields
url: str
title: str
target_id: TargetID = Field(serialization_alias='tab_id', validation_alias=AliasChoices('tab_id', 'target_id'))
parent_target_id: TargetID | None = Field(
default=None, serialization_alias='parent_tab_id', validation_alias=AliasChoices('parent_tab_id', 'parent_target_id')
) # parent page that contains this popup or cross-origin iframe
@field_serializer('target_id')
def serialize_target_id(self, target_id: TargetID, _info: Any) -> str:
return target_id[-4:]
@field_serializer('parent_target_id')
def serialize_parent_target_id(self, parent_target_id: TargetID | None, _info: Any) -> str | None:
return parent_target_id[-4:] if parent_target_id else None
class PageInfo(BaseModel):
"""Comprehensive page size and scroll information"""
# Current viewport dimensions
viewport_width: int
viewport_height: int
# Total page dimensions
page_width: int
page_height: int
# Current scroll position
scroll_x: int
scroll_y: int
# Calculated scroll information
pixels_above: int
pixels_below: int
pixels_left: int
pixels_right: int
# Page statistics are now computed dynamically instead of stored
@dataclass
class NetworkRequest:
"""Information about a pending network request"""
url: str
method: str = 'GET'
loading_duration_ms: float = 0.0 # How long this request has been loading (ms since request started, max 10s)
resource_type: str | None = None # e.g., 'Document', 'Stylesheet', 'Image', 'Script', 'XHR', 'Fetch'
@dataclass
class PaginationButton:
"""Information about a pagination button detected on the page"""
button_type: str # 'next', 'prev', 'first', 'last', 'page_number'
backend_node_id: int # Backend node ID for clicking
text: str # Button text/label
selector: str # XPath or other selector to locate the element
is_disabled: bool = False # Whether the button appears disabled
@dataclass
class BrowserStateSummary:
"""The summary of the browser's current state designed for an LLM to process"""
# provided by SerializedDOMState:
dom_state: SerializedDOMState
url: str
title: str
tabs: list[TabInfo]
screenshot: str | None = field(default=None, repr=False)
page_info: PageInfo | None = None # Enhanced page information
# Keep legacy fields for backward compatibility
pixels_above: int = 0
pixels_below: int = 0
browser_errors: list[str] = field(default_factory=list)
is_pdf_viewer: bool = False # Whether the current page is a PDF viewer
recent_events: str | None = None # Text summary of recent browser events
pending_network_requests: list[NetworkRequest] = field(default_factory=list) # Currently loading network requests
pagination_buttons: list[PaginationButton] = field(default_factory=list) # Detected pagination buttons
closed_popup_messages: list[str] = field(default_factory=list) # Messages from auto-closed JavaScript dialogs
@dataclass
class BrowserStateHistory:
"""The summary of the browser's state at a past point in time to usse in LLM message history"""
url: str
title: str
tabs: list[TabInfo]
interacted_element: list[DOMInteractedElement | None] | list[None]
screenshot_path: str | None = None
def get_screenshot(self) -> str | None:
"""Load screenshot from disk and return as base64 string"""
if not self.screenshot_path:
return None
import base64
from pathlib import Path
path_obj = Path(self.screenshot_path)
if not path_obj.exists():
return None
try:
with open(path_obj, 'rb') as f:
screenshot_data = f.read()
return base64.b64encode(screenshot_data).decode('utf-8')
except Exception:
return None
def to_dict(self) -> dict[str, Any]:
data = {}
data['tabs'] = [tab.model_dump() for tab in self.tabs]
data['screenshot_path'] = self.screenshot_path
data['interacted_element'] = [el.to_dict() if el else None for el in self.interacted_element]
data['url'] = self.url
data['title'] = self.title
return data
class BrowserError(Exception):
"""Browser error with structured memory for LLM context management.
This exception class provides separate memory contexts for browser actions:
- short_term_memory: Immediate context shown once to the LLM for the next action
- long_term_memory: Persistent error information stored across steps
"""
message: str
short_term_memory: str | None = None
long_term_memory: str | None = None
details: dict[str, Any] | None = None
while_handling_event: BaseEvent[Any] | None = None
def __init__(
self,
message: str,
short_term_memory: str | None = None,
long_term_memory: str | None = None,
details: dict[str, Any] | None = None,
event: BaseEvent[Any] | None = None,
):
"""Initialize a BrowserError with structured memory contexts.
Args:
message: Technical error message for logging and debugging
short_term_memory: Context shown once to LLM (e.g., available actions, options)
long_term_memory: Persistent error info stored in agent memory
details: Additional metadata for debugging
event: The browser event that triggered this error
"""
self.message = message
self.short_term_memory = short_term_memory
self.long_term_memory = long_term_memory
self.details = details
self.while_handling_event = event
super().__init__(message)
def __str__(self) -> str:
if self.details:
return f'{self.message} ({self.details}) during: {self.while_handling_event}'
elif self.while_handling_event:
return f'{self.message} (while handling: {self.while_handling_event})'
else:
return self.message
class URLNotAllowedError(BrowserError):
"""Error raised when a URL is not allowed"""
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/browser/profile.py | browser_use/browser/profile.py | import os
import sys
import tempfile
from collections.abc import Iterable
from enum import Enum
from functools import cache
from pathlib import Path
from typing import Annotated, Any, Literal, Self
from urllib.parse import urlparse
from pydantic import AfterValidator, AliasChoices, BaseModel, ConfigDict, Field, field_validator, model_validator
from browser_use.browser.cloud.views import CloudBrowserParams
from browser_use.config import CONFIG
from browser_use.utils import _log_pretty_path, logger
def _get_enable_default_extensions_default() -> bool:
"""Get the default value for enable_default_extensions from env var or True."""
env_val = os.getenv('BROWSER_USE_DISABLE_EXTENSIONS')
if env_val is not None:
# If DISABLE_EXTENSIONS is truthy, return False (extensions disabled)
return env_val.lower() in ('0', 'false', 'no', 'off', '')
return True
CHROME_DEBUG_PORT = 9242 # use a non-default port to avoid conflicts with other tools / devs using 9222
DOMAIN_OPTIMIZATION_THRESHOLD = 100 # Convert domain lists to sets for O(1) lookup when >= this size
CHROME_DISABLED_COMPONENTS = [
# Playwright defaults: https://github.com/microsoft/playwright/blob/41008eeddd020e2dee1c540f7c0cdfa337e99637/packages/playwright-core/src/server/chromium/chromiumSwitches.ts#L76
# AcceptCHFrame,AutoExpandDetailsElement,AvoidUnnecessaryBeforeUnloadCheckSync,CertificateTransparencyComponentUpdater,DeferRendererTasksAfterInput,DestroyProfileOnBrowserClose,DialMediaRouteProvider,ExtensionManifestV2Disabled,GlobalMediaControls,HttpsUpgrades,ImprovedCookieControls,LazyFrameLoading,LensOverlay,MediaRouter,PaintHolding,ThirdPartyStoragePartitioning,Translate
# See https:#github.com/microsoft/playwright/pull/10380
'AcceptCHFrame',
# See https:#github.com/microsoft/playwright/pull/10679
'AutoExpandDetailsElement',
# See https:#github.com/microsoft/playwright/issues/14047
'AvoidUnnecessaryBeforeUnloadCheckSync',
# See https:#github.com/microsoft/playwright/pull/12992
'CertificateTransparencyComponentUpdater',
'DestroyProfileOnBrowserClose',
# See https:#github.com/microsoft/playwright/pull/13854
'DialMediaRouteProvider',
# Chromium is disabling manifest version 2. Allow testing it as long as Chromium can actually run it.
# Disabled in https:#chromium-review.googlesource.com/c/chromium/src/+/6265903.
'ExtensionManifestV2Disabled',
'GlobalMediaControls',
# See https:#github.com/microsoft/playwright/pull/27605
'HttpsUpgrades',
'ImprovedCookieControls',
'LazyFrameLoading',
# Hides the Lens feature in the URL address bar. Its not working in unofficial builds.
'LensOverlay',
# See https:#github.com/microsoft/playwright/pull/8162
'MediaRouter',
# See https:#github.com/microsoft/playwright/issues/28023
'PaintHolding',
# See https:#github.com/microsoft/playwright/issues/32230
'ThirdPartyStoragePartitioning',
# See https://github.com/microsoft/playwright/issues/16126
'Translate',
# 3
# Added by us:
'AutomationControlled',
'BackForwardCache',
'OptimizationHints',
'ProcessPerSiteUpToMainFrameThreshold',
'InterestFeedContentSuggestions',
'CalculateNativeWinOcclusion', # chrome normally stops rendering tabs if they are not visible (occluded by a foreground window or other app)
# 'BackForwardCache', # agent does actually use back/forward navigation, but we can disable if we ever remove that
'HeavyAdPrivacyMitigations',
'PrivacySandboxSettings4',
'AutofillServerCommunication',
'CrashReporting',
'OverscrollHistoryNavigation',
'InfiniteSessionRestore',
'ExtensionDisableUnsupportedDeveloper',
'ExtensionManifestV2Unsupported',
]
CHROME_HEADLESS_ARGS = [
'--headless=new',
]
CHROME_DOCKER_ARGS = [
# '--disable-gpu', # GPU is actually supported in headless docker mode now, but sometimes useful to test without it
'--no-sandbox',
'--disable-gpu-sandbox',
'--disable-setuid-sandbox',
'--disable-dev-shm-usage',
'--no-xshm',
'--no-zygote',
# '--single-process', # might be the cause of "Target page, context or browser has been closed" errors during CDP page.captureScreenshot https://stackoverflow.com/questions/51629151/puppeteer-protocol-error-page-navigate-target-closed
'--disable-site-isolation-trials', # lowers RAM use by 10-16% in docker, but could lead to easier bot blocking if pages can detect it?
]
CHROME_DISABLE_SECURITY_ARGS = [
'--disable-site-isolation-trials',
'--disable-web-security',
'--disable-features=IsolateOrigins,site-per-process',
'--allow-running-insecure-content',
'--ignore-certificate-errors',
'--ignore-ssl-errors',
'--ignore-certificate-errors-spki-list',
]
CHROME_DETERMINISTIC_RENDERING_ARGS = [
'--deterministic-mode',
'--js-flags=--random-seed=1157259159',
'--force-device-scale-factor=2',
'--enable-webgl',
# '--disable-skia-runtime-opts',
# '--disable-2d-canvas-clip-aa',
'--font-render-hinting=none',
'--force-color-profile=srgb',
]
CHROME_DEFAULT_ARGS = [
# # provided by playwright by default: https://github.com/microsoft/playwright/blob/41008eeddd020e2dee1c540f7c0cdfa337e99637/packages/playwright-core/src/server/chromium/chromiumSwitches.ts#L76
'--disable-field-trial-config', # https://source.chromium.org/chromium/chromium/src/+/main:testing/variations/README.md
'--disable-background-networking',
'--disable-background-timer-throttling', # agents might be working on background pages if the human switches to another tab
'--disable-backgrounding-occluded-windows', # same deal, agents are often working on backgrounded browser windows
'--disable-back-forward-cache', # Avoids surprises like main request not being intercepted during page.goBack().
'--disable-breakpad',
'--disable-client-side-phishing-detection',
'--disable-component-extensions-with-background-pages',
'--disable-component-update', # Avoids unneeded network activity after startup.
'--no-default-browser-check',
# '--disable-default-apps',
'--disable-dev-shm-usage', # crucial for docker support, harmless in non-docker environments
# '--disable-extensions',
# '--disable-features=' + disabledFeatures(assistantMode).join(','),
# '--allow-pre-commit-input', # duplicate removed
'--disable-hang-monitor',
'--disable-ipc-flooding-protection', # important to be able to make lots of CDP calls in a tight loop
'--disable-popup-blocking',
'--disable-prompt-on-repost',
'--disable-renderer-backgrounding',
# '--force-color-profile=srgb', # moved to CHROME_DETERMINISTIC_RENDERING_ARGS
'--metrics-recording-only',
'--no-first-run',
# // See https://chromium-review.googlesource.com/c/chromium/src/+/2436773
'--no-service-autorun',
'--export-tagged-pdf',
# // https://chromium-review.googlesource.com/c/chromium/src/+/4853540
'--disable-search-engine-choice-screen',
# // https://issues.chromium.org/41491762
'--unsafely-disable-devtools-self-xss-warnings',
# added by us:
'--enable-features=NetworkService,NetworkServiceInProcess',
'--enable-network-information-downlink-max',
'--test-type=gpu',
'--disable-sync',
'--allow-legacy-extension-manifests',
'--allow-pre-commit-input',
'--disable-blink-features=AutomationControlled',
'--install-autogenerated-theme=0,0,0',
# '--hide-scrollbars', # leave them visible! the agent uses them to know when it needs to scroll to see more options
'--log-level=2',
# '--enable-logging=stderr',
'--disable-focus-on-load',
'--disable-window-activation',
'--generate-pdf-document-outline',
'--no-pings',
'--ash-no-nudges',
'--disable-infobars',
'--simulate-outdated-no-au="Tue, 31 Dec 2099 23:59:59 GMT"',
'--hide-crash-restore-bubble',
'--suppress-message-center-popups',
'--disable-domain-reliability',
'--disable-datasaver-prompt',
'--disable-speech-synthesis-api',
'--disable-speech-api',
'--disable-print-preview',
'--safebrowsing-disable-auto-update',
'--disable-external-intent-requests',
'--disable-desktop-notifications',
'--noerrdialogs',
'--silent-debugger-extension-api',
# Extension welcome tab suppression for automation
'--disable-extensions-http-throttling',
'--extensions-on-chrome-urls',
'--disable-default-apps',
f'--disable-features={",".join(CHROME_DISABLED_COMPONENTS)}',
]
class ViewportSize(BaseModel):
width: int = Field(ge=0)
height: int = Field(ge=0)
def __getitem__(self, key: str) -> int:
return dict(self)[key]
def __setitem__(self, key: str, value: int) -> None:
setattr(self, key, value)
@cache
def get_display_size() -> ViewportSize | None:
# macOS
try:
from AppKit import NSScreen # type: ignore[import]
screen = NSScreen.mainScreen().frame()
size = ViewportSize(width=int(screen.size.width), height=int(screen.size.height))
logger.debug(f'Display size: {size}')
return size
except Exception:
pass
# Windows & Linux
try:
from screeninfo import get_monitors
monitors = get_monitors()
monitor = monitors[0]
size = ViewportSize(width=int(monitor.width), height=int(monitor.height))
logger.debug(f'Display size: {size}')
return size
except Exception:
pass
logger.debug('No display size found')
return None
def get_window_adjustments() -> tuple[int, int]:
"""Returns recommended x, y offsets for window positioning"""
if sys.platform == 'darwin': # macOS
return -4, 24 # macOS has a small title bar, no border
elif sys.platform == 'win32': # Windows
return -8, 0 # Windows has a border on the left
else: # Linux
return 0, 0
def validate_url(url: str, schemes: Iterable[str] = ()) -> str:
"""Validate URL format and optionally check for specific schemes."""
parsed_url = urlparse(url)
if not parsed_url.netloc:
raise ValueError(f'Invalid URL format: {url}')
if schemes and parsed_url.scheme and parsed_url.scheme.lower() not in schemes:
raise ValueError(f'URL has invalid scheme: {url} (expected one of {schemes})')
return url
def validate_float_range(value: float, min_val: float, max_val: float) -> float:
"""Validate that float is within specified range."""
if not min_val <= value <= max_val:
raise ValueError(f'Value {value} outside of range {min_val}-{max_val}')
return value
def validate_cli_arg(arg: str) -> str:
"""Validate that arg is a valid CLI argument."""
if not arg.startswith('--'):
raise ValueError(f'Invalid CLI argument: {arg} (should start with --, e.g. --some-key="some value here")')
return arg
# ===== Enum definitions =====
class RecordHarContent(str, Enum):
OMIT = 'omit'
EMBED = 'embed'
ATTACH = 'attach'
class RecordHarMode(str, Enum):
FULL = 'full'
MINIMAL = 'minimal'
class BrowserChannel(str, Enum):
CHROMIUM = 'chromium'
CHROME = 'chrome'
CHROME_BETA = 'chrome-beta'
CHROME_DEV = 'chrome-dev'
CHROME_CANARY = 'chrome-canary'
MSEDGE = 'msedge'
MSEDGE_BETA = 'msedge-beta'
MSEDGE_DEV = 'msedge-dev'
MSEDGE_CANARY = 'msedge-canary'
# Using constants from central location in browser_use.config
BROWSERUSE_DEFAULT_CHANNEL = BrowserChannel.CHROMIUM
# ===== Type definitions with validators =====
UrlStr = Annotated[str, AfterValidator(validate_url)]
NonNegativeFloat = Annotated[float, AfterValidator(lambda x: validate_float_range(x, 0, float('inf')))]
CliArgStr = Annotated[str, AfterValidator(validate_cli_arg)]
# ===== Base Models =====
class BrowserContextArgs(BaseModel):
"""
Base model for common browser context parameters used by
both BrowserType.new_context() and BrowserType.launch_persistent_context().
https://playwright.dev/python/docs/api/class-browser#browser-new-context
"""
model_config = ConfigDict(extra='ignore', validate_assignment=False, revalidate_instances='always', populate_by_name=True)
# Browser context parameters
accept_downloads: bool = True
# Security options
# proxy: ProxySettings | None = None
permissions: list[str] = Field(
default_factory=lambda: ['clipboardReadWrite', 'notifications'],
description='Browser permissions to grant (CDP Browser.grantPermissions).',
# clipboardReadWrite is for google sheets and pyperclip automations
# notifications are to avoid browser fingerprinting
)
# client_certificates: list[ClientCertificate] = Field(default_factory=list)
# http_credentials: HttpCredentials | None = None
# Viewport options
user_agent: str | None = None
screen: ViewportSize | None = None
viewport: ViewportSize | None = Field(default=None)
no_viewport: bool | None = None
device_scale_factor: NonNegativeFloat | None = None
# geolocation: Geolocation | None = None
# Recording Options
record_har_content: RecordHarContent = RecordHarContent.EMBED
record_har_mode: RecordHarMode = RecordHarMode.FULL
record_har_path: str | Path | None = Field(default=None, validation_alias=AliasChoices('save_har_path', 'record_har_path'))
record_video_dir: str | Path | None = Field(
default=None, validation_alias=AliasChoices('save_recording_path', 'record_video_dir')
)
class BrowserConnectArgs(BaseModel):
"""
Base model for common browser connect parameters used by
both connect_over_cdp() and connect_over_ws().
https://playwright.dev/python/docs/api/class-browsertype#browser-type-connect
https://playwright.dev/python/docs/api/class-browsertype#browser-type-connect-over-cdp
"""
model_config = ConfigDict(extra='ignore', validate_assignment=True, revalidate_instances='always', populate_by_name=True)
headers: dict[str, str] | None = Field(default=None, description='Additional HTTP headers to be sent with connect request')
class BrowserLaunchArgs(BaseModel):
"""
Base model for common browser launch parameters used by
both launch() and launch_persistent_context().
https://playwright.dev/python/docs/api/class-browsertype#browser-type-launch
"""
model_config = ConfigDict(
extra='ignore',
validate_assignment=True,
revalidate_instances='always',
from_attributes=True,
validate_by_name=True,
validate_by_alias=True,
populate_by_name=True,
)
env: dict[str, str | float | bool] | None = Field(
default=None,
description='Extra environment variables to set when launching the browser. If None, inherits from the current process.',
)
executable_path: str | Path | None = Field(
default=None,
validation_alias=AliasChoices('browser_binary_path', 'chrome_binary_path'),
description='Path to the chromium-based browser executable to use.',
)
headless: bool | None = Field(default=None, description='Whether to run the browser in headless or windowed mode.')
args: list[CliArgStr] = Field(
default_factory=list, description='List of *extra* CLI args to pass to the browser when launching.'
)
ignore_default_args: list[CliArgStr] | Literal[True] = Field(
default_factory=lambda: [
'--enable-automation', # we mask the automation fingerprint via JS and other flags
'--disable-extensions', # allow browser extensions
'--hide-scrollbars', # always show scrollbars in screenshots so agent knows there is more content below it can scroll down to
'--disable-features=AcceptCHFrame,AutoExpandDetailsElement,AvoidUnnecessaryBeforeUnloadCheckSync,CertificateTransparencyComponentUpdater,DeferRendererTasksAfterInput,DestroyProfileOnBrowserClose,DialMediaRouteProvider,ExtensionManifestV2Disabled,GlobalMediaControls,HttpsUpgrades,ImprovedCookieControls,LazyFrameLoading,LensOverlay,MediaRouter,PaintHolding,ThirdPartyStoragePartitioning,Translate',
],
description='List of default CLI args to stop playwright from applying (see https://github.com/microsoft/playwright/blob/41008eeddd020e2dee1c540f7c0cdfa337e99637/packages/playwright-core/src/server/chromium/chromiumSwitches.ts)',
)
channel: BrowserChannel | None = None # https://playwright.dev/docs/browsers#chromium-headless-shell
chromium_sandbox: bool = Field(
default=not CONFIG.IN_DOCKER, description='Whether to enable Chromium sandboxing (recommended unless inside Docker).'
)
devtools: bool = Field(
default=False, description='Whether to open DevTools panel automatically for every page, only works when headless=False.'
)
# proxy: ProxySettings | None = Field(default=None, description='Proxy settings to use to connect to the browser.')
downloads_path: str | Path | None = Field(
default=None,
description='Directory to save downloads to.',
validation_alias=AliasChoices('downloads_dir', 'save_downloads_path'),
)
traces_dir: str | Path | None = Field(
default=None,
description='Directory for saving playwright trace.zip files (playwright actions, screenshots, DOM snapshots, HAR traces).',
validation_alias=AliasChoices('trace_path', 'traces_dir'),
)
# firefox_user_prefs: dict[str, str | float | bool] = Field(default_factory=dict)
@model_validator(mode='after')
def validate_devtools_headless(self) -> Self:
"""Cannot open devtools when headless is True"""
assert not (self.headless and self.devtools), 'headless=True and devtools=True cannot both be set at the same time'
return self
@model_validator(mode='after')
def set_default_downloads_path(self) -> Self:
"""Set a unique default downloads path if none is provided."""
if self.downloads_path is None:
import uuid
# Create unique directory in /tmp for downloads
unique_id = str(uuid.uuid4())[:8] # 8 characters
downloads_path = Path(f'/tmp/browser-use-downloads-{unique_id}')
# Ensure path doesn't already exist (extremely unlikely but possible)
while downloads_path.exists():
unique_id = str(uuid.uuid4())[:8]
downloads_path = Path(f'/tmp/browser-use-downloads-{unique_id}')
self.downloads_path = downloads_path
self.downloads_path.mkdir(parents=True, exist_ok=True)
return self
@staticmethod
def args_as_dict(args: list[str]) -> dict[str, str]:
"""Return the extra launch CLI args as a dictionary."""
args_dict = {}
for arg in args:
key, value, *_ = [*arg.split('=', 1), '', '', '']
args_dict[key.strip().lstrip('-')] = value.strip()
return args_dict
@staticmethod
def args_as_list(args: dict[str, str]) -> list[str]:
"""Return the extra launch CLI args as a list of strings."""
return [f'--{key.lstrip("-")}={value}' if value else f'--{key.lstrip("-")}' for key, value in args.items()]
# ===== API-specific Models =====
class BrowserNewContextArgs(BrowserContextArgs):
"""
Pydantic model for new_context() arguments.
Extends BaseContextParams with storage_state parameter.
https://playwright.dev/python/docs/api/class-browser#browser-new-context
"""
model_config = ConfigDict(extra='ignore', validate_assignment=False, revalidate_instances='always', populate_by_name=True)
# storage_state is not supported in launch_persistent_context()
storage_state: str | Path | dict[str, Any] | None = None
# TODO: use StorageState type instead of dict[str, Any]
# to apply this to existing contexts (incl cookies, localStorage, IndexedDB), see:
# - https://github.com/microsoft/playwright/pull/34591/files
# - playwright-core/src/server/storageScript.ts restore() function
# - https://github.com/Skn0tt/playwright/blob/c446bc44bac4fbfdf52439ba434f92192459be4e/packages/playwright-core/src/server/storageScript.ts#L84C1-L123C2
# @field_validator('storage_state', mode='after')
# def load_storage_state_from_file(self) -> Self:
# """Load storage_state from file if it's a path."""
# if isinstance(self.storage_state, (str, Path)):
# storage_state_file = Path(self.storage_state)
# try:
# parsed_storage_state = json.loads(storage_state_file.read_text())
# validated_storage_state = StorageState(**parsed_storage_state)
# self.storage_state = validated_storage_state
# except Exception as e:
# raise ValueError(f'Failed to load storage state file {self.storage_state}: {e}') from e
# return self
pass
class BrowserLaunchPersistentContextArgs(BrowserLaunchArgs, BrowserContextArgs):
"""
Pydantic model for launch_persistent_context() arguments.
Combines browser launch parameters and context parameters,
plus adds the user_data_dir parameter.
https://playwright.dev/python/docs/api/class-browsertype#browser-type-launch-persistent-context
"""
model_config = ConfigDict(extra='ignore', validate_assignment=False, revalidate_instances='always')
# Required parameter specific to launch_persistent_context, but can be None to use incognito temp dir
user_data_dir: str | Path | None = None
@field_validator('user_data_dir', mode='after')
@classmethod
def validate_user_data_dir(cls, v: str | Path | None) -> str | Path:
"""Validate user data dir is set to a non-default path."""
if v is None:
return tempfile.mkdtemp(prefix='browser-use-user-data-dir-')
return Path(v).expanduser().resolve()
class ProxySettings(BaseModel):
"""Typed proxy settings for Chromium traffic.
- server: Full proxy URL, e.g. "http://host:8080" or "socks5://host:1080"
- bypass: Comma-separated hosts to bypass (e.g. "localhost,127.0.0.1,*.internal")
- username/password: Optional credentials for authenticated proxies
"""
server: str | None = Field(default=None, description='Proxy URL, e.g. http://host:8080 or socks5://host:1080')
bypass: str | None = Field(default=None, description='Comma-separated hosts to bypass, e.g. localhost,127.0.0.1,*.internal')
username: str | None = Field(default=None, description='Proxy auth username')
password: str | None = Field(default=None, description='Proxy auth password')
def __getitem__(self, key: str) -> str | None:
return getattr(self, key)
class BrowserProfile(BrowserConnectArgs, BrowserLaunchPersistentContextArgs, BrowserLaunchArgs, BrowserNewContextArgs):
"""
A BrowserProfile is a static template collection of kwargs that can be passed to:
- BrowserType.launch(**BrowserLaunchArgs)
- BrowserType.connect(**BrowserConnectArgs)
- BrowserType.connect_over_cdp(**BrowserConnectArgs)
- BrowserType.launch_persistent_context(**BrowserLaunchPersistentContextArgs)
- BrowserContext.new_context(**BrowserNewContextArgs)
- BrowserSession(**BrowserProfile)
"""
model_config = ConfigDict(
extra='ignore',
validate_assignment=True,
revalidate_instances='always',
from_attributes=True,
validate_by_name=True,
validate_by_alias=True,
)
# ... extends options defined in:
# BrowserLaunchPersistentContextArgs, BrowserLaunchArgs, BrowserNewContextArgs, BrowserConnectArgs
# Session/connection configuration
cdp_url: str | None = Field(default=None, description='CDP URL for connecting to existing browser instance')
is_local: bool = Field(default=False, description='Whether this is a local browser instance')
use_cloud: bool = Field(
default=False,
description='Use browser-use cloud browser service instead of local browser',
)
@property
def cloud_browser(self) -> bool:
"""Alias for use_cloud field for compatibility."""
return self.use_cloud
cloud_browser_params: CloudBrowserParams | None = Field(
default=None, description='Parameters for creating a cloud browser instance'
)
# custom options we provide that aren't native playwright kwargs
disable_security: bool = Field(default=False, description='Disable browser security features.')
deterministic_rendering: bool = Field(default=False, description='Enable deterministic rendering flags.')
allowed_domains: list[str] | set[str] | None = Field(
default=None,
description='List of allowed domains for navigation e.g. ["*.google.com", "https://example.com", "chrome-extension://*"]. Lists with 100+ items are auto-optimized to sets (no pattern matching).',
)
prohibited_domains: list[str] | set[str] | None = Field(
default=None,
description='List of prohibited domains for navigation e.g. ["*.google.com", "https://example.com", "chrome-extension://*"]. Allowed domains take precedence over prohibited domains. Lists with 100+ items are auto-optimized to sets (no pattern matching).',
)
block_ip_addresses: bool = Field(
default=False,
description='Block navigation to URLs containing IP addresses (both IPv4 and IPv6). When True, blocks all IP-based URLs including localhost and private networks.',
)
keep_alive: bool | None = Field(default=None, description='Keep browser alive after agent run.')
# --- Proxy settings ---
# New consolidated proxy config (typed)
proxy: ProxySettings | None = Field(
default=None,
description='Proxy settings. Use browser_use.browser.profile.ProxySettings(server, bypass, username, password)',
)
enable_default_extensions: bool = Field(
default_factory=_get_enable_default_extensions_default,
description="Enable automation-optimized extensions: ad blocking (uBlock Origin), cookie handling (I still don't care about cookies), and URL cleaning (ClearURLs). All extensions work automatically without manual intervention. Extensions are automatically downloaded and loaded when enabled. Can be disabled via BROWSER_USE_DISABLE_EXTENSIONS=1 environment variable.",
)
demo_mode: bool = Field(
default=False,
description='Enable demo mode side panel that streams agent logs directly inside the browser window (requires headless=False).',
)
cookie_whitelist_domains: list[str] = Field(
default_factory=lambda: ['nature.com', 'qatarairways.com'],
description='List of domains to whitelist in the "I still don\'t care about cookies" extension, preventing automatic cookie banner handling on these sites.',
)
window_size: ViewportSize | None = Field(
default=None,
description='Browser window size to use when headless=False.',
)
window_height: int | None = Field(default=None, description='DEPRECATED, use window_size["height"] instead', exclude=True)
window_width: int | None = Field(default=None, description='DEPRECATED, use window_size["width"] instead', exclude=True)
window_position: ViewportSize | None = Field(
default=ViewportSize(width=0, height=0),
description='Window position to use for the browser x,y from the top left when headless=False.',
)
cross_origin_iframes: bool = Field(
default=True,
description='Enable cross-origin iframe support (OOPIF/Out-of-Process iframes). When False, only same-origin frames are processed to avoid complexity and hanging.',
)
max_iframes: int = Field(
default=100,
description='Maximum number of iframe documents to process to prevent crashes.',
)
max_iframe_depth: int = Field(
ge=0,
default=5,
description='Maximum depth for cross-origin iframe recursion (default: 5 levels deep).',
)
# --- Page load/wait timings ---
minimum_wait_page_load_time: float = Field(default=0.25, description='Minimum time to wait before capturing page state.')
wait_for_network_idle_page_load_time: float = Field(default=0.5, description='Time to wait for network idle.')
wait_between_actions: float = Field(default=0.1, description='Time to wait between actions.')
# --- UI/viewport/DOM ---
highlight_elements: bool = Field(default=True, description='Highlight interactive elements on the page.')
dom_highlight_elements: bool = Field(
default=False, description='Highlight interactive elements in the DOM (only for debugging purposes).'
)
filter_highlight_ids: bool = Field(
default=True, description='Only show element IDs in highlights if llm_representation is less than 10 characters.'
)
paint_order_filtering: bool = Field(default=True, description='Enable paint order filtering. Slightly experimental.')
interaction_highlight_color: str = Field(
default='rgb(255, 127, 39)',
description='Color to use for highlighting elements during interactions (CSS color string).',
)
interaction_highlight_duration: float = Field(default=1.0, description='Duration in seconds to show interaction highlights.')
# --- Downloads ---
auto_download_pdfs: bool = Field(default=True, description='Automatically download PDFs when navigating to PDF viewer pages.')
profile_directory: str = 'Default' # e.g. 'Profile 1', 'Profile 2', 'Custom Profile', etc.
# these can be found in BrowserLaunchArgs, BrowserLaunchPersistentContextArgs, BrowserNewContextArgs, BrowserConnectArgs:
# save_recording_path: alias of record_video_dir
# save_har_path: alias of record_har_path
# trace_path: alias of traces_dir
# these shadow the old playwright args on BrowserContextArgs, but it's ok
# because we handle them ourselves in a watchdog and we no longer use playwright, so they should live in the scope for our own config in BrowserProfile long-term
record_video_dir: Path | None = Field(
default=None,
description='Directory to save video recordings. If set, a video of the session will be recorded.',
validation_alias=AliasChoices('save_recording_path', 'record_video_dir'),
)
record_video_size: ViewportSize | None = Field(
default=None, description='Video frame size. If not set, it will use the viewport size.'
)
record_video_framerate: int = Field(default=30, description='The framerate to use for the video recording.')
# TODO: finish implementing extension support in extensions.py
# extension_ids_to_preinstall: list[str] = Field(
# default_factory=list, description='List of Chrome extension IDs to preinstall.'
# )
# extensions_dir: Path = Field(
# default_factory=lambda: Path('~/.config/browseruse/cache/extensions').expanduser(),
# description='Directory containing .crx extension files.',
# )
def __repr__(self) -> str:
short_dir = _log_pretty_path(self.user_data_dir) if self.user_data_dir else '<incognito>'
return f'BrowserProfile(user_data_dir= {short_dir}, headless={self.headless})'
def __str__(self) -> str:
return 'BrowserProfile'
@field_validator('allowed_domains', 'prohibited_domains', mode='after')
@classmethod
def optimize_large_domain_lists(cls, v: list[str] | set[str] | None) -> list[str] | set[str] | None:
"""Convert large domain lists (>=100 items) to sets for O(1) lookup performance."""
if v is None or isinstance(v, set):
return v
if len(v) >= DOMAIN_OPTIMIZATION_THRESHOLD:
logger.warning(
f'๐ง Optimizing domain list with {len(v)} items to set for O(1) lookup. '
f'Note: Pattern matching (*.domain.com, etc.) is not supported for lists >= {DOMAIN_OPTIMIZATION_THRESHOLD} items. '
f'Use exact domains only or keep list size < {DOMAIN_OPTIMIZATION_THRESHOLD} for pattern support.'
)
return set(v)
return v
@model_validator(mode='after')
def copy_old_config_names_to_new(self) -> Self:
"""Copy old config window_width & window_height to window_size."""
if self.window_width or self.window_height:
logger.warning(
f'โ ๏ธ BrowserProfile(window_width=..., window_height=...) are deprecated, use BrowserProfile(window_size={"width": 1920, "height": 1080}) instead.'
)
window_size = self.window_size or ViewportSize(width=0, height=0)
window_size['width'] = window_size['width'] or self.window_width or 1920
window_size['height'] = window_size['height'] or self.window_height or 1080
self.window_size = window_size
return self
@model_validator(mode='after')
def warn_storage_state_user_data_dir_conflict(self) -> Self:
"""Warn when both storage_state and user_data_dir are set, as this can cause conflicts."""
has_storage_state = self.storage_state is not None
has_user_data_dir = (self.user_data_dir is not None) and ('tmp' not in str(self.user_data_dir).lower())
if has_storage_state and has_user_data_dir:
logger.warning(
f'โ ๏ธ BrowserSession(...) was passed both storage_state AND user_data_dir. storage_state={self.storage_state} will forcibly overwrite '
f'cookies/localStorage/sessionStorage in user_data_dir={self.user_data_dir}. '
f'For multiple browsers in parallel, use only storage_state with user_data_dir=None, '
f'or use a separate user_data_dir for each browser and set storage_state=None.'
)
return self
@model_validator(mode='after')
def warn_user_data_dir_non_default_version(self) -> Self:
"""
If user is using default profile dir with a non-default channel, force-change it
to avoid corrupting the default data dir created with a different channel.
"""
is_not_using_default_chromium = self.executable_path or self.channel not in (BROWSERUSE_DEFAULT_CHANNEL, None)
if self.user_data_dir == CONFIG.BROWSER_USE_DEFAULT_USER_DATA_DIR and is_not_using_default_chromium:
alternate_name = (
Path(self.executable_path).name.lower().replace(' ', '-')
if self.executable_path
else self.channel.name.lower()
if self.channel
else 'None'
)
logger.warning(
f'โ ๏ธ {self} Changing user_data_dir= {_log_pretty_path(self.user_data_dir)} โก๏ธ .../default-{alternate_name} to avoid {alternate_name.upper()} corruping default profile created by {BROWSERUSE_DEFAULT_CHANNEL.name}'
)
self.user_data_dir = CONFIG.BROWSER_USE_DEFAULT_USER_DATA_DIR.parent / f'default-{alternate_name}'
return self
@model_validator(mode='after')
def warn_deterministic_rendering_weirdness(self) -> Self:
if self.deterministic_rendering:
logger.warning(
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | true |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/browser/session_manager.py | browser_use/browser/session_manager.py | """Event-driven CDP session management.
Manages CDP sessions by listening to Target.attachedToTarget and Target.detachedFromTarget
events, ensuring the session pool always reflects the current browser state.
"""
import asyncio
from typing import TYPE_CHECKING
from cdp_use.cdp.target import AttachedToTargetEvent, DetachedFromTargetEvent, SessionID, TargetID
from browser_use.utils import create_task_with_error_handling
if TYPE_CHECKING:
from browser_use.browser.session import BrowserSession, CDPSession, Target
class SessionManager:
"""Event-driven CDP session manager.
Automatically synchronizes the CDP session pool with browser state via CDP events.
Key features:
- Sessions added/removed automatically via Target attach/detach events
- Multiple sessions can attach to the same target
- Targets only removed when ALL sessions detach
- No stale sessions - pool always reflects browser reality
SessionManager is the SINGLE SOURCE OF TRUTH for all targets and sessions.
"""
def __init__(self, browser_session: 'BrowserSession'):
self.browser_session = browser_session
self.logger = browser_session.logger
# All targets (entities: pages, iframes, workers)
self._targets: dict[TargetID, 'Target'] = {}
# All sessions (communication channels)
self._sessions: dict[SessionID, 'CDPSession'] = {}
# Mapping: target -> sessions attached to it
self._target_sessions: dict[TargetID, set[SessionID]] = {}
# Reverse mapping: session -> target it belongs to
self._session_to_target: dict[SessionID, TargetID] = {}
self._lock = asyncio.Lock()
self._recovery_lock = asyncio.Lock()
# Focus recovery coordination - event-driven instead of polling
self._recovery_in_progress: bool = False
self._recovery_complete_event: asyncio.Event | None = None
self._recovery_task: asyncio.Task | None = None
async def start_monitoring(self) -> None:
"""Start monitoring Target attach/detach events.
Registers CDP event handlers to keep the session pool synchronized with browser state.
Also discovers and initializes all existing targets on startup.
"""
if not self.browser_session._cdp_client_root:
raise RuntimeError('CDP client not initialized')
# Capture cdp_client_root in closure to avoid type errors
cdp_client = self.browser_session._cdp_client_root
# Enable target discovery to receive targetInfoChanged events automatically
# This eliminates the need for getTargetInfo() polling calls
await cdp_client.send.Target.setDiscoverTargets(
params={'discover': True, 'filter': [{'type': 'page'}, {'type': 'iframe'}]}
)
# Register synchronous event handlers (CDP requirement)
def on_attached(event: AttachedToTargetEvent, session_id: SessionID | None = None):
# _handle_target_attached() handles:
# - setAutoAttach for children
# - Create CDPSession
# - Enable monitoring (for pages/tabs)
# - Add to pool
create_task_with_error_handling(
self._handle_target_attached(event),
name='handle_target_attached',
logger_instance=self.logger,
suppress_exceptions=True,
)
def on_detached(event: DetachedFromTargetEvent, session_id: SessionID | None = None):
create_task_with_error_handling(
self._handle_target_detached(event),
name='handle_target_detached',
logger_instance=self.logger,
suppress_exceptions=True,
)
def on_target_info_changed(event, session_id: SessionID | None = None):
# Update session info from targetInfoChanged events (no polling needed!)
create_task_with_error_handling(
self._handle_target_info_changed(event),
name='handle_target_info_changed',
logger_instance=self.logger,
suppress_exceptions=True,
)
cdp_client.register.Target.attachedToTarget(on_attached)
cdp_client.register.Target.detachedFromTarget(on_detached)
cdp_client.register.Target.targetInfoChanged(on_target_info_changed)
self.logger.debug('[SessionManager] Event monitoring started')
# Discover and initialize ALL existing targets
await self._initialize_existing_targets()
def _get_session_for_target(self, target_id: TargetID) -> 'CDPSession | None':
"""Internal: Get ANY valid session for a target (picks first available).
โ ๏ธ INTERNAL API - Use browser_session.get_or_create_cdp_session() instead!
This method has no validation, no focus management, no recovery.
Args:
target_id: Target ID to get session for
Returns:
CDPSession if exists, None if target has detached
"""
session_ids = self._target_sessions.get(target_id, set())
if not session_ids:
# Check if this is the focused target - indicates stale focus that needs cleanup
if self.browser_session.agent_focus_target_id == target_id:
self.logger.warning(
f'[SessionManager] โ ๏ธ Attempted to get session for stale focused target {target_id[:8]}... '
f'Clearing stale focus and triggering recovery.'
)
# Clear stale focus immediately (defense in depth)
self.browser_session.agent_focus_target_id = None
# Trigger recovery if not already in progress
if not self._recovery_in_progress:
self.logger.warning('[SessionManager] Recovery was not in progress! Triggering now.')
self._recovery_task = create_task_with_error_handling(
self._recover_agent_focus(target_id),
name='recover_agent_focus_from_stale_get',
logger_instance=self.logger,
suppress_exceptions=False,
)
return None
return self._sessions.get(next(iter(session_ids)))
def get_all_page_targets(self) -> list:
"""Get all page/tab targets using owned data.
Returns:
List of Target objects for all page/tab targets
"""
page_targets = []
for target in self._targets.values():
if target.target_type in ('page', 'tab'):
page_targets.append(target)
return page_targets
async def validate_session(self, target_id: TargetID) -> bool:
"""Check if a target still has active sessions.
Args:
target_id: Target ID to validate
Returns:
True if target has active sessions, False if it should be removed
"""
if target_id not in self._target_sessions:
return False
return len(self._target_sessions[target_id]) > 0
async def clear(self) -> None:
"""Clear all owned data structures for cleanup."""
async with self._lock:
# Clear owned data (single source of truth)
self._targets.clear()
self._sessions.clear()
self._target_sessions.clear()
self._session_to_target.clear()
self.logger.info('[SessionManager] Cleared all owned data (targets, sessions, mappings)')
async def is_target_valid(self, target_id: TargetID) -> bool:
"""Check if a target is still valid and has active sessions.
Args:
target_id: Target ID to validate
Returns:
True if target is valid and has active sessions, False otherwise
"""
if target_id not in self._target_sessions:
return False
return len(self._target_sessions[target_id]) > 0
def get_target_id_from_session_id(self, session_id: SessionID) -> TargetID | None:
"""Look up which target a session belongs to.
Args:
session_id: The session ID to look up
Returns:
Target ID if found, None otherwise
"""
return self._session_to_target.get(session_id)
def get_target(self, target_id: TargetID) -> 'Target | None':
"""Get target from owned data.
Args:
target_id: Target ID to get
Returns:
Target object if found, None otherwise
"""
return self._targets.get(target_id)
def get_all_targets(self) -> dict[TargetID, 'Target']:
"""Get all targets (read-only access to owned data).
Returns:
Dict mapping target_id to Target objects
"""
return self._targets
def get_all_target_ids(self) -> list[TargetID]:
"""Get all target IDs from owned data.
Returns:
List of all target IDs
"""
return list(self._targets.keys())
def get_all_sessions(self) -> dict[SessionID, 'CDPSession']:
"""Get all sessions (read-only access to owned data).
Returns:
Dict mapping session_id to CDPSession objects
"""
return self._sessions
def get_session(self, session_id: SessionID) -> 'CDPSession | None':
"""Get session from owned data.
Args:
session_id: Session ID to get
Returns:
CDPSession object if found, None otherwise
"""
return self._sessions.get(session_id)
def get_all_sessions_for_target(self, target_id: TargetID) -> list['CDPSession']:
"""Get ALL sessions attached to a target from owned data.
Args:
target_id: Target ID to get sessions for
Returns:
List of all CDPSession objects for this target
"""
session_ids = self._target_sessions.get(target_id, set())
return [self._sessions[sid] for sid in session_ids if sid in self._sessions]
def get_target_sessions_mapping(self) -> dict[TargetID, set[SessionID]]:
"""Get target->sessions mapping (read-only access).
Returns:
Dict mapping target_id to set of session_ids
"""
return self._target_sessions
def get_focused_target(self) -> 'Target | None':
"""Get the target that currently has agent focus.
Convenience method that uses browser_session.agent_focus_target_id.
Returns:
Target object if agent has focus, None otherwise
"""
if not self.browser_session.agent_focus_target_id:
return None
return self.get_target(self.browser_session.agent_focus_target_id)
async def ensure_valid_focus(self, timeout: float = 3.0) -> bool:
"""Ensure agent_focus_target_id points to a valid, attached CDP session.
If the focus target is stale (detached), this method waits for automatic recovery.
Uses event-driven coordination instead of polling for efficiency.
Args:
timeout: Maximum time to wait for recovery in seconds (default: 3.0)
Returns:
True if focus is valid or successfully recovered, False if no focus or recovery failed
"""
if not self.browser_session.agent_focus_target_id:
# No focus at all - might be initial state or complete failure
if self._recovery_in_progress and self._recovery_complete_event:
# Recovery is happening, wait for it
try:
await asyncio.wait_for(self._recovery_complete_event.wait(), timeout=timeout)
# Check again after recovery - simple existence check
focus_id = self.browser_session.agent_focus_target_id
return bool(focus_id and self._get_session_for_target(focus_id))
except TimeoutError:
self.logger.error(f'[SessionManager] โ Timed out waiting for recovery after {timeout}s')
return False
return False
# Simple existence check - does the focused target have a session?
cdp_session = self._get_session_for_target(self.browser_session.agent_focus_target_id)
if cdp_session:
# Session exists - validate it's still active
is_valid = await self.validate_session(self.browser_session.agent_focus_target_id)
if is_valid:
return True
# Focus is stale - wait for recovery using event instead of polling
stale_target_id = self.browser_session.agent_focus_target_id
self.logger.warning(
f'[SessionManager] โ ๏ธ Stale agent_focus detected (target {stale_target_id[:8] if stale_target_id else "None"}... detached), '
f'waiting for recovery...'
)
# Check if recovery is already in progress
if not self._recovery_in_progress:
self.logger.warning(
'[SessionManager] โ ๏ธ Recovery not in progress for stale focus! '
'This indicates a bug - recovery should have been triggered.'
)
return False
# Wait for recovery complete event (event-driven, not polling!)
if self._recovery_complete_event:
try:
start_time = asyncio.get_event_loop().time()
await asyncio.wait_for(self._recovery_complete_event.wait(), timeout=timeout)
elapsed = asyncio.get_event_loop().time() - start_time
# Verify recovery succeeded - simple existence check
focus_id = self.browser_session.agent_focus_target_id
if focus_id and self._get_session_for_target(focus_id):
self.logger.info(
f'[SessionManager] โ
Agent focus recovered to {self.browser_session.agent_focus_target_id[:8]}... '
f'after {elapsed * 1000:.0f}ms'
)
return True
else:
self.logger.error(
f'[SessionManager] โ Recovery completed but focus still invalid after {elapsed * 1000:.0f}ms'
)
return False
except TimeoutError:
self.logger.error(
f'[SessionManager] โ Recovery timed out after {timeout}s '
f'(was: {stale_target_id[:8] if stale_target_id else "None"}..., '
f'now: {self.browser_session.agent_focus_target_id[:8] if self.browser_session.agent_focus_target_id else "None"})'
)
return False
else:
self.logger.error('[SessionManager] โ Recovery event not initialized')
return False
async def _handle_target_attached(self, event: AttachedToTargetEvent) -> None:
"""Handle Target.attachedToTarget event.
Called automatically by Chrome when a new target/session is created.
This is the ONLY place where sessions are added to the pool.
"""
target_id = event['targetInfo']['targetId']
session_id = event['sessionId']
target_type = event['targetInfo']['type']
target_info = event['targetInfo']
waiting_for_debugger = event.get('waitingForDebugger', False)
self.logger.debug(
f'[SessionManager] Target attached: {target_id[:8]}... (session={session_id[:8]}..., '
f'type={target_type}, waitingForDebugger={waiting_for_debugger})'
)
# Defensive check: browser may be shutting down and _cdp_client_root could be None
if self.browser_session._cdp_client_root is None:
self.logger.debug(
f'[SessionManager] Skipping target attach for {target_id[:8]}... - browser shutting down (no CDP client)'
)
return
# Enable auto-attach for this session's children (do this FIRST, outside lock)
try:
await self.browser_session._cdp_client_root.send.Target.setAutoAttach(
params={'autoAttach': True, 'waitForDebuggerOnStart': False, 'flatten': True}, session_id=session_id
)
except Exception as e:
error_str = str(e)
# Expected for short-lived targets (workers, temp iframes) that detach before this executes
if '-32001' not in error_str and 'Session with given id not found' not in error_str:
self.logger.debug(f'[SessionManager] Auto-attach failed for {target_type}: {e}')
async with self._lock:
# Track this session for the target
if target_id not in self._target_sessions:
self._target_sessions[target_id] = set()
self._target_sessions[target_id].add(session_id)
self._session_to_target[session_id] = target_id
# Create or update Target (source of truth for url/title)
if target_id not in self._targets:
from browser_use.browser.session import Target
target = Target(
target_id=target_id,
target_type=target_type,
url=target_info.get('url', 'about:blank'),
title=target_info.get('title', 'Unknown title'),
)
self._targets[target_id] = target
self.logger.debug(f'[SessionManager] Created target {target_id[:8]}... (type={target_type})')
else:
# Update existing target info
existing_target = self._targets[target_id]
existing_target.url = target_info.get('url', existing_target.url)
existing_target.title = target_info.get('title', existing_target.title)
# Create CDPSession (communication channel)
from browser_use.browser.session import CDPSession
assert self.browser_session._cdp_client_root is not None, 'Root CDP client required'
cdp_session = CDPSession(
cdp_client=self.browser_session._cdp_client_root,
target_id=target_id,
session_id=session_id,
)
# Add to sessions dict
self._sessions[session_id] = cdp_session
self.logger.debug(
f'[SessionManager] Created session {session_id[:8]}... for target {target_id[:8]}... '
f'(total sessions: {len(self._sessions)})'
)
# Enable lifecycle events and network monitoring for page targets
if target_type in ('page', 'tab'):
await self._enable_page_monitoring(cdp_session)
# Resume execution if waiting for debugger
if waiting_for_debugger:
try:
assert self.browser_session._cdp_client_root is not None
await self.browser_session._cdp_client_root.send.Runtime.runIfWaitingForDebugger(session_id=session_id)
except Exception as e:
self.logger.warning(f'[SessionManager] Failed to resume execution: {e}')
async def _handle_target_info_changed(self, event: dict) -> None:
"""Handle Target.targetInfoChanged event.
Updates target title/URL without polling getTargetInfo().
Chrome fires this automatically when title or URL changes.
"""
target_info = event.get('targetInfo', {})
target_id = target_info.get('targetId')
if not target_id:
return
async with self._lock:
# Update target if it exists (source of truth for url/title)
if target_id in self._targets:
target = self._targets[target_id]
target.title = target_info.get('title', target.title)
target.url = target_info.get('url', target.url)
async def _handle_target_detached(self, event: DetachedFromTargetEvent) -> None:
"""Handle Target.detachedFromTarget event.
Called automatically by Chrome when a target/session is destroyed.
This is the ONLY place where sessions are removed from the pool.
"""
session_id = event['sessionId']
target_id = event.get('targetId') # May be empty
# If targetId not in event, look it up via session mapping
if not target_id:
async with self._lock:
target_id = self._session_to_target.get(session_id)
if not target_id:
self.logger.warning(f'[SessionManager] Session detached but target unknown (session={session_id[:8]}...)')
return
agent_focus_lost = False
target_fully_removed = False
target_type = None
async with self._lock:
# Remove this session from target's session set
if target_id in self._target_sessions:
self._target_sessions[target_id].discard(session_id)
remaining_sessions = len(self._target_sessions[target_id])
self.logger.debug(
f'[SessionManager] Session detached: target={target_id[:8]}... '
f'session={session_id[:8]}... (remaining={remaining_sessions})'
)
# Only remove target when NO sessions remain
if remaining_sessions == 0:
self.logger.debug(f'[SessionManager] No sessions remain for target {target_id[:8]}..., removing target')
target_fully_removed = True
# Check if agent_focus points to this target
agent_focus_lost = self.browser_session.agent_focus_target_id == target_id
# Immediately clear stale focus to prevent operations on detached target
if agent_focus_lost:
self.logger.debug(
f'[SessionManager] Clearing stale agent_focus_target_id {target_id[:8]}... '
f'to prevent operations on detached target'
)
self.browser_session.agent_focus_target_id = None
# Get target type before removing (needed for TabClosedEvent dispatch)
target = self._targets.get(target_id)
target_type = target.target_type if target else None
# Remove target (entity) from owned data
if target_id in self._targets:
self._targets.pop(target_id)
self.logger.debug(
f'[SessionManager] Removed target {target_id[:8]}... (remaining targets: {len(self._targets)})'
)
# Clean up tracking
del self._target_sessions[target_id]
else:
# Target not tracked - already removed or never attached
self.logger.debug(
f'[SessionManager] Session detached from untracked target: target={target_id[:8]}... '
f'session={session_id[:8]}... (target was already removed or attach event was missed)'
)
# Remove session from owned sessions dict
if session_id in self._sessions:
self._sessions.pop(session_id)
self.logger.debug(
f'[SessionManager] Removed session {session_id[:8]}... (remaining sessions: {len(self._sessions)})'
)
# Remove from reverse mapping
if session_id in self._session_to_target:
del self._session_to_target[session_id]
# Dispatch TabClosedEvent only for page/tab targets that are fully removed (not iframes/workers or partial detaches)
if target_fully_removed:
if target_type in ('page', 'tab'):
from browser_use.browser.events import TabClosedEvent
self.browser_session.event_bus.dispatch(TabClosedEvent(target_id=target_id))
self.logger.debug(f'[SessionManager] Dispatched TabClosedEvent for page target {target_id[:8]}...')
elif target_type:
self.logger.debug(
f'[SessionManager] Target {target_id[:8]}... fully removed (type={target_type}) - not dispatching TabClosedEvent'
)
# Auto-recover agent_focus outside the lock to avoid blocking other operations
if agent_focus_lost:
# Create recovery task instead of awaiting directly - allows concurrent operations to wait on same recovery
if not self._recovery_in_progress:
self._recovery_task = create_task_with_error_handling(
self._recover_agent_focus(target_id),
name='recover_agent_focus',
logger_instance=self.logger,
suppress_exceptions=False,
)
async def _recover_agent_focus(self, crashed_target_id: TargetID) -> None:
"""Auto-recover agent_focus when the focused target crashes/detaches.
Uses recovery lock to prevent concurrent recovery attempts from creating multiple emergency tabs.
Coordinates with ensure_valid_focus() via events for efficient waiting.
Args:
crashed_target_id: The target ID that was lost
"""
try:
# Prevent concurrent recovery attempts
async with self._recovery_lock:
# Set recovery state INSIDE lock to prevent race conditions
if self._recovery_in_progress:
self.logger.debug('[SessionManager] Recovery already in progress, waiting for it to complete')
# Wait for ongoing recovery instead of starting a new one
if self._recovery_complete_event:
try:
await asyncio.wait_for(self._recovery_complete_event.wait(), timeout=5.0)
except TimeoutError:
self.logger.error('[SessionManager] Timed out waiting for ongoing recovery')
return
# Set recovery state
self._recovery_in_progress = True
self._recovery_complete_event = asyncio.Event()
if self.browser_session._cdp_client_root is None:
self.logger.debug('[SessionManager] Skipping focus recovery - browser shutting down (no CDP client)')
return
# Check if another recovery already fixed agent_focus
if self.browser_session.agent_focus_target_id and self.browser_session.agent_focus_target_id != crashed_target_id:
self.logger.debug(
f'[SessionManager] Agent focus already recovered by concurrent operation '
f'(now: {self.browser_session.agent_focus_target_id[:8]}...), skipping recovery'
)
return
# Note: agent_focus_target_id may already be None (cleared in _handle_target_detached)
current_focus_desc = (
f'{self.browser_session.agent_focus_target_id[:8]}...'
if self.browser_session.agent_focus_target_id
else 'None (already cleared)'
)
self.logger.warning(
f'[SessionManager] Agent focus target {crashed_target_id[:8]}... detached! '
f'Current focus: {current_focus_desc}. Auto-recovering by switching to another target...'
)
# Perform recovery (outside lock to allow concurrent operations)
# Try to find another valid page target
page_targets = self.get_all_page_targets()
new_target_id = None
is_existing_tab = False
if page_targets:
# Switch to most recent page that's not the crashed one
new_target_id = page_targets[-1].target_id
is_existing_tab = True
self.logger.info(f'[SessionManager] Switching agent_focus to existing tab {new_target_id[:8]}...')
else:
# No pages exist - create a new one
self.logger.warning('[SessionManager] No tabs remain! Creating new tab for agent...')
new_target_id = await self.browser_session._cdp_create_new_page('about:blank')
self.logger.info(f'[SessionManager] Created new tab {new_target_id[:8]}... for agent')
# Dispatch TabCreatedEvent so watchdogs can initialize
from browser_use.browser.events import TabCreatedEvent
self.browser_session.event_bus.dispatch(TabCreatedEvent(url='about:blank', target_id=new_target_id))
# Wait for CDP attach event to create session
# Note: This polling is necessary - waiting for external Chrome CDP event
# _handle_target_attached will add session to pool when Chrome fires attachedToTarget
new_session = None
for attempt in range(20): # Wait up to 2 seconds
await asyncio.sleep(0.1)
new_session = self._get_session_for_target(new_target_id)
if new_session:
break
if new_session:
self.browser_session.agent_focus_target_id = new_target_id
self.logger.info(f'[SessionManager] โ
Agent focus recovered: {new_target_id[:8]}...')
# Visually activate the tab in browser (only for existing tabs)
if is_existing_tab:
try:
assert self.browser_session._cdp_client_root is not None
await self.browser_session._cdp_client_root.send.Target.activateTarget(params={'targetId': new_target_id})
self.logger.debug(f'[SessionManager] Activated tab {new_target_id[:8]}... in browser UI')
except Exception as e:
self.logger.debug(f'[SessionManager] Failed to activate tab visually: {e}')
# Get target to access url (from owned data)
target = self.get_target(new_target_id)
target_url = target.url if target else 'about:blank'
# Dispatch focus changed event
from browser_use.browser.events import AgentFocusChangedEvent
self.browser_session.event_bus.dispatch(AgentFocusChangedEvent(target_id=new_target_id, url=target_url))
return
# Recovery failed - create emergency fallback tab
self.logger.error(
f'[SessionManager] โ Failed to get session for {new_target_id[:8]}... after 2s, creating emergency fallback tab'
)
fallback_target_id = await self.browser_session._cdp_create_new_page('about:blank')
self.logger.warning(f'[SessionManager] Created emergency fallback tab {fallback_target_id[:8]}...')
# Try one more time with fallback
# Note: This polling is necessary - waiting for external Chrome CDP event
for _ in range(20):
await asyncio.sleep(0.1)
fallback_session = self._get_session_for_target(fallback_target_id)
if fallback_session:
self.browser_session.agent_focus_target_id = fallback_target_id
self.logger.warning(f'[SessionManager] โ ๏ธ Agent focus set to emergency fallback: {fallback_target_id[:8]}...')
from browser_use.browser.events import AgentFocusChangedEvent, TabCreatedEvent
self.browser_session.event_bus.dispatch(TabCreatedEvent(url='about:blank', target_id=fallback_target_id))
self.browser_session.event_bus.dispatch(
AgentFocusChangedEvent(target_id=fallback_target_id, url='about:blank')
)
return
# Complete failure - this should never happen
self.logger.critical(
'[SessionManager] ๐จ CRITICAL: Failed to recover agent_focus even with fallback! Agent may be in broken state.'
)
except Exception as e:
self.logger.error(f'[SessionManager] โ Error during agent_focus recovery: {type(e).__name__}: {e}')
finally:
# Always signal completion and reset recovery state
# This allows all waiting operations to proceed (success or failure)
if self._recovery_complete_event:
self._recovery_complete_event.set()
self._recovery_in_progress = False
self._recovery_task = None
self.logger.debug('[SessionManager] Recovery state reset')
async def _initialize_existing_targets(self) -> None:
"""Discover and initialize all existing targets at startup.
Attaches to each target and initializes it SYNCHRONOUSLY.
Chrome will also fire attachedToTarget events, but _handle_target_attached() is
idempotent (checks if target already in pool), so duplicate handling is safe.
This eliminates race conditions - monitoring is guaranteed ready before navigation.
"""
cdp_client = self.browser_session._cdp_client_root
assert cdp_client is not None
# Get all existing targets
targets_result = await cdp_client.send.Target.getTargets()
existing_targets = targets_result.get('targetInfos', [])
self.logger.debug(f'[SessionManager] Discovered {len(existing_targets)} existing targets')
# Track target IDs for verification
target_ids_to_wait_for = []
# Just attach to ALL existing targets - Chrome fires attachedToTarget events
# The on_attached handler (via create_task) does ALL the work
for target in existing_targets:
target_id = target['targetId']
target_type = target.get('type', 'unknown')
try:
# Just attach - event handler does everything
await cdp_client.send.Target.attachToTarget(params={'targetId': target_id, 'flatten': True})
target_ids_to_wait_for.append(target_id)
except Exception as e:
self.logger.debug(
f'[SessionManager] Failed to attach to existing target {target_id[:8]}... (type={target_type}): {e}'
)
# Wait for event handlers to complete their work (they run via create_task)
# Use event-driven approach instead of polling for better performance
ready_event = asyncio.Event()
async def check_all_ready():
"""Check if all sessions are ready and signal completion."""
while True:
ready_count = 0
for tid in target_ids_to_wait_for:
session = self._get_session_for_target(tid)
if session:
target = self._targets.get(tid)
target_type = target.target_type if target else 'unknown'
# For pages, verify monitoring is enabled
if target_type in ('page', 'tab'):
if hasattr(session, '_lifecycle_events') and session._lifecycle_events is not None:
ready_count += 1
else:
# Non-page targets don't need monitoring
ready_count += 1
if ready_count == len(target_ids_to_wait_for):
ready_event.set()
return
await asyncio.sleep(0.05)
# Start checking in background
check_task = create_task_with_error_handling(
check_all_ready(), name='check_all_targets_ready', logger_instance=self.logger
)
try:
# Wait for completion with timeout
await asyncio.wait_for(ready_event.wait(), timeout=2.0)
except TimeoutError:
# Timeout - count what's ready
ready_count = 0
for tid in target_ids_to_wait_for:
session = self._get_session_for_target(tid)
if session:
target = self._targets.get(tid)
target_type = target.target_type if target else 'unknown'
# For pages, verify monitoring is enabled
if target_type in ('page', 'tab'):
if hasattr(session, '_lifecycle_events') and session._lifecycle_events is not None:
ready_count += 1
else:
# Non-page targets don't need monitoring
ready_count += 1
self.logger.warning(
f'[SessionManager] Initialization timeout after 2.0s: {ready_count}/{len(target_ids_to_wait_for)} sessions ready'
)
finally:
check_task.cancel()
try:
await check_task
except asyncio.CancelledError:
pass
async def _enable_page_monitoring(self, cdp_session: 'CDPSession') -> None:
"""Enable lifecycle events and network monitoring for a page target.
This is called once per page when it's created, avoiding handler accumulation.
Registers a SINGLE lifecycle handler per session that stores events for navigations to consume.
Args:
cdp_session: The CDP session to enable monitoring on
"""
try:
# Enable Page domain first (required for lifecycle events)
await cdp_session.cdp_client.send.Page.enable(session_id=cdp_session.session_id)
# Enable lifecycle events (load, DOMContentLoaded, networkIdle, etc.)
await cdp_session.cdp_client.send.Page.setLifecycleEventsEnabled(
params={'enabled': True}, session_id=cdp_session.session_id
)
# Enable network monitoring for networkIdle detection
await cdp_session.cdp_client.send.Network.enable(session_id=cdp_session.session_id)
# Initialize lifecycle event storage for this session (thread-safe)
from collections import deque
cdp_session._lifecycle_events = deque(maxlen=50) # Keep last 50 events
cdp_session._lifecycle_lock = asyncio.Lock()
# Register ONE handler per session that stores events
def on_lifecycle_event(event, session_id=None):
event_name = event.get('name', 'unknown')
event_loader_id = event.get('loaderId', 'none')
# Find which target this session belongs to
target_id_from_event = None
if session_id:
target_id_from_event = self.get_target_id_from_session_id(session_id)
# Check if this event is for our target
if target_id_from_event == cdp_session.target_id:
# Store event for navigations to consume
event_data = {
'name': event_name,
'loaderId': event_loader_id,
'timestamp': asyncio.get_event_loop().time(),
}
# Append is atomic in CPython
try:
cdp_session._lifecycle_events.append(event_data)
except Exception as e:
# Only log errors, not every event
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | true |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/browser/events.py | browser_use/browser/events.py | """Event definitions for browser communication."""
import inspect
import os
from typing import Any, Literal
from bubus import BaseEvent
from bubus.models import T_EventResultType
from cdp_use.cdp.target import TargetID
from pydantic import BaseModel, Field, field_validator
from browser_use.browser.views import BrowserStateSummary
from browser_use.dom.views import EnhancedDOMTreeNode
def _get_timeout(env_var: str, default: float) -> float | None:
"""
Safely parse environment variable timeout values with robust error handling.
Args:
env_var: Environment variable name (e.g. 'TIMEOUT_NavigateToUrlEvent')
default: Default timeout value as float (e.g. 15.0)
Returns:
Parsed float value or the default if parsing fails
Raises:
ValueError: Only if both env_var and default are invalid (should not happen with valid defaults)
"""
# Try environment variable first
env_value = os.getenv(env_var)
if env_value:
try:
parsed = float(env_value)
if parsed < 0:
print(f'Warning: {env_var}={env_value} is negative, using default {default}')
return default
return parsed
except (ValueError, TypeError):
print(f'Warning: {env_var}={env_value} is not a valid number, using default {default}')
# Fall back to default
return default
# ============================================================================
# Agent/Tools -> BrowserSession Events (High-level browser actions)
# ============================================================================
class ElementSelectedEvent(BaseEvent[T_EventResultType]):
"""An element was selected."""
node: EnhancedDOMTreeNode
@field_validator('node', mode='before')
@classmethod
def serialize_node(cls, data: EnhancedDOMTreeNode | None) -> EnhancedDOMTreeNode | None:
if data is None:
return None
return EnhancedDOMTreeNode(
node_id=data.node_id,
backend_node_id=data.backend_node_id,
session_id=data.session_id,
frame_id=data.frame_id,
target_id=data.target_id,
node_type=data.node_type,
node_name=data.node_name,
node_value=data.node_value,
attributes=data.attributes,
is_scrollable=data.is_scrollable,
is_visible=data.is_visible,
absolute_position=data.absolute_position,
# override the circular reference fields in EnhancedDOMTreeNode as they cant be serialized and aren't needed by event handlers
# only used internally by the DOM service during DOM tree building process, not intended public API use
content_document=None,
shadow_root_type=None,
shadow_roots=[],
parent_node=None,
children_nodes=[],
ax_node=None,
snapshot_node=None,
)
# TODO: add page handle to events
# class PageHandle(share a base with browser.session.CDPSession?):
# url: str
# target_id: TargetID
# @classmethod
# def from_target_id(cls, target_id: TargetID) -> Self:
# return cls(target_id=target_id)
# @classmethod
# def from_target_id(cls, target_id: TargetID) -> Self:
# return cls(target_id=target_id)
# @classmethod
# def from_url(cls, url: str) -> Self:
# @property
# def root_frame_id(self) -> str:
# return self.target_id
# @property
# def session_id(self) -> str:
# return browser_session.get_or_create_cdp_session(self.target_id).session_id
# class PageSelectedEvent(BaseEvent[T_EventResultType]):
# """An event like SwitchToTabEvent(page=PageHandle) or CloseTabEvent(page=PageHandle)"""
# page: PageHandle
class NavigateToUrlEvent(BaseEvent[None]):
"""Navigate to a specific URL."""
url: str
wait_until: Literal['load', 'domcontentloaded', 'networkidle', 'commit'] = 'load'
timeout_ms: int | None = None
new_tab: bool = Field(
default=False, description='Set True to leave the current tab alone and open a new tab in the foreground for the new URL'
)
# existing_tab: PageHandle | None = None # TODO
# time limits enforced by bubus, not exposed to LLM:
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_NavigateToUrlEvent', 15.0)) # seconds
class ClickElementEvent(ElementSelectedEvent[dict[str, Any] | None]):
"""Click an element."""
node: 'EnhancedDOMTreeNode'
button: Literal['left', 'right', 'middle'] = 'left'
# click_count: int = 1 # TODO
# expect_download: bool = False # moved to downloads_watchdog.py
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_ClickElementEvent', 15.0)) # seconds
class ClickCoordinateEvent(BaseEvent[dict]):
"""Click at specific coordinates."""
coordinate_x: int
coordinate_y: int
button: Literal['left', 'right', 'middle'] = 'left'
force: bool = False # If True, skip safety checks (file input, print, select)
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_ClickCoordinateEvent', 15.0)) # seconds
class TypeTextEvent(ElementSelectedEvent[dict | None]):
"""Type text into an element."""
node: 'EnhancedDOMTreeNode'
text: str
clear: bool = True
is_sensitive: bool = False # Flag to indicate if text contains sensitive data
sensitive_key_name: str | None = None # Name of the sensitive key being typed (e.g., 'username', 'password')
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_TypeTextEvent', 60.0)) # seconds
class ScrollEvent(ElementSelectedEvent[None]):
"""Scroll the page or element."""
direction: Literal['up', 'down', 'left', 'right']
amount: int # pixels
node: 'EnhancedDOMTreeNode | None' = None # None means scroll page
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_ScrollEvent', 8.0)) # seconds
class SwitchTabEvent(BaseEvent[TargetID]):
"""Switch to a different tab."""
target_id: TargetID | None = Field(default=None, description='None means switch to the most recently opened tab')
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_SwitchTabEvent', 10.0)) # seconds
class CloseTabEvent(BaseEvent[None]):
"""Close a tab."""
target_id: TargetID
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_CloseTabEvent', 10.0)) # seconds
class ScreenshotEvent(BaseEvent[str]):
"""Request to take a screenshot."""
full_page: bool = False
clip: dict[str, float] | None = None # {x, y, width, height}
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_ScreenshotEvent', 15.0)) # seconds
class BrowserStateRequestEvent(BaseEvent[BrowserStateSummary]):
"""Request current browser state."""
include_dom: bool = True
include_screenshot: bool = True
include_recent_events: bool = False
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_BrowserStateRequestEvent', 30.0)) # seconds
# class WaitForConditionEvent(BaseEvent):
# """Wait for a condition."""
# condition: Literal['navigation', 'selector', 'timeout', 'load_state']
# timeout: float = 30000
# selector: str | None = None
# state: Literal['attached', 'detached', 'visible', 'hidden'] | None = None
class GoBackEvent(BaseEvent[None]):
"""Navigate back in browser history."""
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_GoBackEvent', 15.0)) # seconds
class GoForwardEvent(BaseEvent[None]):
"""Navigate forward in browser history."""
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_GoForwardEvent', 15.0)) # seconds
class RefreshEvent(BaseEvent[None]):
"""Refresh/reload the current page."""
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_RefreshEvent', 15.0)) # seconds
class WaitEvent(BaseEvent[None]):
"""Wait for a specified number of seconds."""
seconds: float = 3.0
max_seconds: float = 10.0 # Safety cap
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_WaitEvent', 60.0)) # seconds
class SendKeysEvent(BaseEvent[None]):
"""Send keyboard keys/shortcuts."""
keys: str # e.g., "ctrl+a", "cmd+c", "Enter"
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_SendKeysEvent', 60.0)) # seconds
class UploadFileEvent(ElementSelectedEvent[None]):
"""Upload a file to an element."""
node: 'EnhancedDOMTreeNode'
file_path: str
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_UploadFileEvent', 30.0)) # seconds
class GetDropdownOptionsEvent(ElementSelectedEvent[dict[str, str]]):
"""Get all options from any dropdown (native <select>, ARIA menus, or custom dropdowns).
Returns a dict containing dropdown type, options list, and element metadata."""
node: 'EnhancedDOMTreeNode'
event_timeout: float | None = Field(
default_factory=lambda: _get_timeout('TIMEOUT_GetDropdownOptionsEvent', 15.0)
) # some dropdowns lazy-load the list of options on first interaction, so we need to wait for them to load (e.g. table filter lists can have thousands of options)
class SelectDropdownOptionEvent(ElementSelectedEvent[dict[str, str]]):
"""Select a dropdown option by exact text from any dropdown type.
Returns a dict containing success status and selection details."""
node: 'EnhancedDOMTreeNode'
text: str # The option text to select
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_SelectDropdownOptionEvent', 8.0)) # seconds
class ScrollToTextEvent(BaseEvent[None]):
"""Scroll to specific text on the page. Raises exception if text not found."""
text: str
direction: Literal['up', 'down'] = 'down'
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_ScrollToTextEvent', 15.0)) # seconds
# ============================================================================
class BrowserStartEvent(BaseEvent):
"""Start/connect to browser."""
cdp_url: str | None = None
launch_options: dict[str, Any] = Field(default_factory=dict)
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_BrowserStartEvent', 30.0)) # seconds
class BrowserStopEvent(BaseEvent):
"""Stop/disconnect from browser."""
force: bool = False
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_BrowserStopEvent', 45.0)) # seconds
class BrowserLaunchResult(BaseModel):
"""Result of launching a browser."""
# TODO: add browser executable_path, pid, version, latency, user_data_dir, X11 $DISPLAY, host IP address, etc.
cdp_url: str
class BrowserLaunchEvent(BaseEvent[BrowserLaunchResult]):
"""Launch a local browser process."""
# TODO: add executable_path, proxy settings, preferences, extra launch args, etc.
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_BrowserLaunchEvent', 30.0)) # seconds
class BrowserKillEvent(BaseEvent):
"""Kill local browser subprocess."""
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_BrowserKillEvent', 30.0)) # seconds
# TODO: replace all Runtime.evaluate() calls with this event
# class ExecuteJavaScriptEvent(BaseEvent):
# """Execute JavaScript in page context."""
# target_id: TargetID
# expression: str
# await_promise: bool = True
# event_timeout: float | None = 60.0 # seconds
# TODO: add this and use the old BrowserProfile.viewport options to set it
# class SetViewportEvent(BaseEvent):
# """Set the viewport size."""
# width: int
# height: int
# device_scale_factor: float = 1.0
# event_timeout: float | None = 15.0 # seconds
# Moved to storage state
# class SetCookiesEvent(BaseEvent):
# """Set browser cookies."""
# cookies: list[dict[str, Any]]
# event_timeout: float | None = (
# 30.0 # only long to support the edge case of restoring a big localStorage / on many origins (has to O(n) visit each origin to restore)
# )
# class GetCookiesEvent(BaseEvent):
# """Get browser cookies."""
# urls: list[str] | None = None
# event_timeout: float | None = 30.0 # seconds
# ============================================================================
# DOM-related Events
# ============================================================================
class BrowserConnectedEvent(BaseEvent):
"""Browser has started/connected."""
cdp_url: str
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_BrowserConnectedEvent', 30.0)) # seconds
class BrowserStoppedEvent(BaseEvent):
"""Browser has stopped/disconnected."""
reason: str | None = None
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_BrowserStoppedEvent', 30.0)) # seconds
class TabCreatedEvent(BaseEvent):
"""A new tab was created."""
target_id: TargetID
url: str
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_TabCreatedEvent', 30.0)) # seconds
class TabClosedEvent(BaseEvent):
"""A tab was closed."""
target_id: TargetID
# TODO:
# new_focus_target_id: int | None = None
# new_focus_url: str | None = None
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_TabClosedEvent', 10.0)) # seconds
# TODO: emit this when DOM changes significantly, inner frame navigates, form submits, history.pushState(), etc.
# class TabUpdatedEvent(BaseEvent):
# """Tab information updated (URL changed, etc.)."""
# target_id: TargetID
# url: str
class AgentFocusChangedEvent(BaseEvent):
"""Agent focus changed to a different tab."""
target_id: TargetID
url: str
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_AgentFocusChangedEvent', 10.0)) # seconds
class TargetCrashedEvent(BaseEvent):
"""A target has crashed."""
target_id: TargetID
error: str
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_TargetCrashedEvent', 10.0)) # seconds
class NavigationStartedEvent(BaseEvent):
"""Navigation started."""
target_id: TargetID
url: str
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_NavigationStartedEvent', 30.0)) # seconds
class NavigationCompleteEvent(BaseEvent):
"""Navigation completed."""
target_id: TargetID
url: str
status: int | None = None
error_message: str | None = None # Error/timeout message if navigation had issues
loading_status: str | None = None # Detailed loading status (e.g., network timeout info)
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_NavigationCompleteEvent', 30.0)) # seconds
# ============================================================================
# Error Events
# ============================================================================
class BrowserErrorEvent(BaseEvent):
"""An error occurred in the browser layer."""
error_type: str
message: str
details: dict[str, Any] = Field(default_factory=dict)
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_BrowserErrorEvent', 30.0)) # seconds
# ============================================================================
# Storage State Events
# ============================================================================
class SaveStorageStateEvent(BaseEvent):
"""Request to save browser storage state."""
path: str | None = None # Optional path, uses profile default if not provided
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_SaveStorageStateEvent', 45.0)) # seconds
class StorageStateSavedEvent(BaseEvent):
"""Notification that storage state was saved."""
path: str
cookies_count: int
origins_count: int
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_StorageStateSavedEvent', 30.0)) # seconds
class LoadStorageStateEvent(BaseEvent):
"""Request to load browser storage state."""
path: str | None = None # Optional path, uses profile default if not provided
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_LoadStorageStateEvent', 45.0)) # seconds
# TODO: refactor this to:
# - on_BrowserConnectedEvent() -> dispatch(LoadStorageStateEvent()) -> _copy_storage_state_from_json_to_browser(json_file, new_cdp_session) + return storage_state from handler
# - on_BrowserStopEvent() -> dispatch(SaveStorageStateEvent()) -> _copy_storage_state_from_browser_to_json(new_cdp_session, json_file)
# and get rid of StorageStateSavedEvent and StorageStateLoadedEvent, have the original events + provide handler return values for any results
class StorageStateLoadedEvent(BaseEvent):
"""Notification that storage state was loaded."""
path: str
cookies_count: int
origins_count: int
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_StorageStateLoadedEvent', 30.0)) # seconds
# ============================================================================
# File Download Events
# ============================================================================
class FileDownloadedEvent(BaseEvent):
"""A file has been downloaded."""
url: str
path: str
file_name: str
file_size: int
file_type: str | None = None # e.g., 'pdf', 'zip', 'docx', etc.
mime_type: str | None = None # e.g., 'application/pdf'
from_cache: bool = False
auto_download: bool = False # Whether this was an automatic download (e.g., PDF auto-download)
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_FileDownloadedEvent', 30.0)) # seconds
class AboutBlankDVDScreensaverShownEvent(BaseEvent):
"""AboutBlankWatchdog has shown DVD screensaver animation on an about:blank tab."""
target_id: TargetID
error: str | None = None
class DialogOpenedEvent(BaseEvent):
"""Event dispatched when a JavaScript dialog is opened and handled."""
dialog_type: str # 'alert', 'confirm', 'prompt', or 'beforeunload'
message: str
url: str
frame_id: str | None = None # Can be None when frameId is not provided by CDP
# target_id: TargetID # TODO: add this to avoid needing target_id_from_frame() later
# Note: Model rebuilding for forward references is handled in the importing modules
# Events with 'EnhancedDOMTreeNode' forward references (ClickElementEvent, TypeTextEvent,
# ScrollEvent, UploadFileEvent) need model_rebuild() called after imports are complete
def _check_event_names_dont_overlap():
"""
check that event names defined in this file are valid and non-overlapping
(naiively n^2 so it's pretty slow but ok for now, optimize when >20 events)
"""
event_names = {
name.split('[')[0]
for name in globals().keys()
if not name.startswith('_')
and inspect.isclass(globals()[name])
and issubclass(globals()[name], BaseEvent)
and name != 'BaseEvent'
}
for name_a in event_names:
assert name_a.endswith('Event'), f'Event with name {name_a} does not end with "Event"'
for name_b in event_names:
if name_a != name_b: # Skip self-comparison
assert name_a not in name_b, (
f'Event with name {name_a} is a substring of {name_b}, all events must be completely unique to avoid find-and-replace accidents'
)
# overlapping event names are a nightmare to trace and rename later, dont do it!
# e.g. prevent ClickEvent and FailedClickEvent are terrible names because one is a substring of the other,
# must be ClickEvent and ClickFailedEvent to preserve the usefulnes of codebase grep/sed/awk as refactoring tools.
# at import time, we do a quick check that all event names defined above are valid and non-overlapping.
# this is hand written in blood by a human! not LLM slop. feel free to optimize but do not remove it without a good reason.
_check_event_names_dont_overlap()
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/browser/__init__.py | browser_use/browser/__init__.py | from typing import TYPE_CHECKING
# Type stubs for lazy imports
if TYPE_CHECKING:
from .profile import BrowserProfile, ProxySettings
from .session import BrowserSession
# Lazy imports mapping for heavy browser components
_LAZY_IMPORTS = {
'ProxySettings': ('.profile', 'ProxySettings'),
'BrowserProfile': ('.profile', 'BrowserProfile'),
'BrowserSession': ('.session', 'BrowserSession'),
}
def __getattr__(name: str):
"""Lazy import mechanism for heavy browser components."""
if name in _LAZY_IMPORTS:
module_path, attr_name = _LAZY_IMPORTS[name]
try:
from importlib import import_module
# Use relative import for current package
full_module_path = f'browser_use.browser{module_path}'
module = import_module(full_module_path)
attr = getattr(module, attr_name)
# Cache the imported attribute in the module's globals
globals()[name] = attr
return attr
except ImportError as e:
raise ImportError(f'Failed to import {name} from {full_module_path}: {e}') from e
raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
__all__ = [
'BrowserSession',
'BrowserProfile',
'ProxySettings',
]
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/browser/video_recorder.py | browser_use/browser/video_recorder.py | """Video Recording Service for Browser Use Sessions."""
import base64
import logging
import math
import subprocess
from pathlib import Path
from typing import Optional
from browser_use.browser.profile import ViewportSize
try:
import imageio.v2 as iio # type: ignore[import-not-found]
import imageio_ffmpeg # type: ignore[import-not-found]
import numpy as np # type: ignore[import-not-found]
from imageio.core.format import Format # type: ignore[import-not-found]
IMAGEIO_AVAILABLE = True
except ImportError:
IMAGEIO_AVAILABLE = False
logger = logging.getLogger(__name__)
def _get_padded_size(size: ViewportSize, macro_block_size: int = 16) -> ViewportSize:
"""Calculates the dimensions padded to the nearest multiple of macro_block_size."""
width = int(math.ceil(size['width'] / macro_block_size)) * macro_block_size
height = int(math.ceil(size['height'] / macro_block_size)) * macro_block_size
return ViewportSize(width=width, height=height)
class VideoRecorderService:
"""
Handles the video encoding process for a browser session using imageio.
This service captures individual frames from the CDP screencast, decodes them,
and appends them to a video file using a pip-installable ffmpeg backend.
It automatically resizes frames to match the target video dimensions.
"""
def __init__(self, output_path: Path, size: ViewportSize, framerate: int):
"""
Initializes the video recorder.
Args:
output_path: The full path where the video will be saved.
size: A ViewportSize object specifying the width and height of the video.
framerate: The desired framerate for the output video.
"""
self.output_path = output_path
self.size = size
self.framerate = framerate
self._writer: Optional['Format.Writer'] = None
self._is_active = False
self.padded_size = _get_padded_size(self.size)
def start(self) -> None:
"""
Prepares and starts the video writer.
If the required optional dependencies are not installed, this method will
log an error and do nothing.
"""
if not IMAGEIO_AVAILABLE:
logger.error(
'MP4 recording requires optional dependencies. Please install them with: pip install "browser-use[video]"'
)
return
try:
self.output_path.parent.mkdir(parents=True, exist_ok=True)
# The macro_block_size is set to None because we handle padding ourselves
self._writer = iio.get_writer(
str(self.output_path),
fps=self.framerate,
codec='libx264',
quality=8, # A good balance of quality and file size (1-10 scale)
pixelformat='yuv420p', # Ensures compatibility with most players
macro_block_size=None,
)
self._is_active = True
logger.debug(f'Video recorder started. Output will be saved to {self.output_path}')
except Exception as e:
logger.error(f'Failed to initialize video writer: {e}')
self._is_active = False
def add_frame(self, frame_data_b64: str) -> None:
"""
Decodes a base64-encoded PNG frame, resizes it, pads it to be codec-compatible,
and appends it to the video.
Args:
frame_data_b64: A base64-encoded string of the PNG frame data.
"""
if not self._is_active or not self._writer:
return
try:
frame_bytes = base64.b64decode(frame_data_b64)
# Build a filter chain for ffmpeg:
# 1. scale: Resizes the frame to the user-specified dimensions.
# 2. pad: Adds black bars to meet codec's macro-block requirements,
# centering the original content.
vf_chain = (
f'scale={self.size["width"]}:{self.size["height"]},'
f'pad={self.padded_size["width"]}:{self.padded_size["height"]}:(ow-iw)/2:(oh-ih)/2:color=black'
)
output_pix_fmt = 'rgb24'
command = [
imageio_ffmpeg.get_ffmpeg_exe(),
'-f',
'image2pipe', # Input format from a pipe
'-c:v',
'png', # Specify input codec is PNG
'-i',
'-', # Input from stdin
'-vf',
vf_chain, # Video filter for resizing and padding
'-f',
'rawvideo', # Output format is raw video
'-pix_fmt',
output_pix_fmt, # Output pixel format
'-', # Output to stdout
]
# Execute ffmpeg as a subprocess
proc = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate(input=frame_bytes)
if proc.returncode != 0:
err_msg = err.decode(errors='ignore').strip()
if 'deprecated pixel format used' not in err_msg.lower():
raise OSError(f'ffmpeg error during resizing/padding: {err_msg}')
else:
logger.debug(f'ffmpeg warning during resizing/padding: {err_msg}')
# Convert the raw output bytes to a numpy array with the padded dimensions
img_array = np.frombuffer(out, dtype=np.uint8).reshape((self.padded_size['height'], self.padded_size['width'], 3))
self._writer.append_data(img_array)
except Exception as e:
logger.warning(f'Could not process and add video frame: {e}')
def stop_and_save(self) -> None:
"""
Finalizes the video file by closing the writer.
This method should be called when the recording session is complete.
"""
if not self._is_active or not self._writer:
return
try:
self._writer.close()
logger.info(f'๐น Video recording saved successfully to: {self.output_path}')
except Exception as e:
logger.error(f'Failed to finalize and save video: {e}')
finally:
self._is_active = False
self._writer = None
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/browser/session.py | browser_use/browser/session.py | """Event-driven browser session with backwards compatibility."""
import asyncio
import logging
from functools import cached_property
from pathlib import Path
from typing import TYPE_CHECKING, Any, Literal, Self, Union, cast, overload
from urllib.parse import urlparse, urlunparse
from uuid import UUID
import httpx
from bubus import EventBus
from cdp_use import CDPClient
from cdp_use.cdp.fetch import AuthRequiredEvent, RequestPausedEvent
from cdp_use.cdp.network import Cookie
from cdp_use.cdp.target import AttachedToTargetEvent, SessionID, TargetID
from pydantic import BaseModel, ConfigDict, Field, PrivateAttr
from uuid_extensions import uuid7str
from browser_use.browser.cloud.cloud import CloudBrowserAuthError, CloudBrowserClient, CloudBrowserError
# CDP logging is now handled by setup_logging() in logging_config.py
# It automatically sets CDP logs to the same level as browser_use logs
from browser_use.browser.cloud.views import CloudBrowserParams, CreateBrowserRequest, ProxyCountryCode
from browser_use.browser.events import (
AgentFocusChangedEvent,
BrowserConnectedEvent,
BrowserErrorEvent,
BrowserLaunchEvent,
BrowserLaunchResult,
BrowserStartEvent,
BrowserStateRequestEvent,
BrowserStopEvent,
BrowserStoppedEvent,
CloseTabEvent,
FileDownloadedEvent,
NavigateToUrlEvent,
NavigationCompleteEvent,
NavigationStartedEvent,
SwitchTabEvent,
TabClosedEvent,
TabCreatedEvent,
)
from browser_use.browser.profile import BrowserProfile, ProxySettings
from browser_use.browser.views import BrowserStateSummary, TabInfo
from browser_use.dom.views import DOMRect, EnhancedDOMTreeNode, TargetInfo
from browser_use.observability import observe_debug
from browser_use.utils import _log_pretty_url, create_task_with_error_handling, is_new_tab_page
if TYPE_CHECKING:
from browser_use.actor.page import Page
from browser_use.browser.demo_mode import DemoMode
DEFAULT_BROWSER_PROFILE = BrowserProfile()
_LOGGED_UNIQUE_SESSION_IDS = set() # track unique session IDs that have been logged to make sure we always assign a unique enough id to new sessions and avoid ambiguity in logs
red = '\033[91m'
reset = '\033[0m'
class Target(BaseModel):
"""Browser target (page, iframe, worker) - the actual entity being controlled.
A target represents a browsing context with its own URL, title, and type.
Multiple CDP sessions can attach to the same target for communication.
"""
model_config = ConfigDict(arbitrary_types_allowed=True, revalidate_instances='never')
target_id: TargetID
target_type: str # 'page', 'iframe', 'worker', etc.
url: str = 'about:blank'
title: str = 'Unknown title'
class CDPSession(BaseModel):
"""CDP communication channel to a target.
A session is a connection that allows sending CDP commands to a specific target.
Multiple sessions can attach to the same target.
"""
model_config = ConfigDict(arbitrary_types_allowed=True, revalidate_instances='never')
cdp_client: CDPClient
target_id: TargetID
session_id: SessionID
# Lifecycle monitoring (populated by SessionManager)
_lifecycle_events: Any = PrivateAttr(default=None)
_lifecycle_lock: Any = PrivateAttr(default=None)
class BrowserSession(BaseModel):
"""Event-driven browser session with backwards compatibility.
This class provides a 2-layer architecture:
- High-level event handling for agents/tools
- Direct CDP/Playwright calls for browser operations
Supports both event-driven and imperative calling styles.
Browser configuration is stored in the browser_profile, session identity in direct fields:
```python
# Direct settings (recommended for most users)
session = BrowserSession(headless=True, user_data_dir='./profile')
# Or use a profile (for advanced use cases)
session = BrowserSession(browser_profile=BrowserProfile(...))
# Access session fields directly, browser settings via profile or property
print(session.id) # Session field
```
"""
model_config = ConfigDict(
arbitrary_types_allowed=True,
validate_assignment=True,
extra='forbid',
revalidate_instances='never', # resets private attrs on every model rebuild
)
# Overload 1: Cloud browser mode (use cloud-specific params)
@overload
def __init__(
self,
*,
# Cloud browser params - use these for cloud mode
cloud_profile_id: UUID | str | None = None,
cloud_proxy_country_code: ProxyCountryCode | None = None,
cloud_timeout: int | None = None,
# Backward compatibility aliases
profile_id: UUID | str | None = None,
proxy_country_code: ProxyCountryCode | None = None,
timeout: int | None = None,
use_cloud: bool | None = None,
cloud_browser: bool | None = None, # Backward compatibility alias
cloud_browser_params: CloudBrowserParams | None = None,
# Common params that work with cloud
id: str | None = None,
headers: dict[str, str] | None = None,
allowed_domains: list[str] | None = None,
prohibited_domains: list[str] | None = None,
keep_alive: bool | None = None,
minimum_wait_page_load_time: float | None = None,
wait_for_network_idle_page_load_time: float | None = None,
wait_between_actions: float | None = None,
auto_download_pdfs: bool | None = None,
cookie_whitelist_domains: list[str] | None = None,
cross_origin_iframes: bool | None = None,
highlight_elements: bool | None = None,
dom_highlight_elements: bool | None = None,
paint_order_filtering: bool | None = None,
max_iframes: int | None = None,
max_iframe_depth: int | None = None,
) -> None: ...
# Overload 2: Local browser mode (use local browser params)
@overload
def __init__(
self,
*,
# Core configuration for local
id: str | None = None,
cdp_url: str | None = None,
browser_profile: BrowserProfile | None = None,
# Local browser launch params
executable_path: str | Path | None = None,
headless: bool | None = None,
user_data_dir: str | Path | None = None,
args: list[str] | None = None,
downloads_path: str | Path | None = None,
# Common params
headers: dict[str, str] | None = None,
allowed_domains: list[str] | None = None,
prohibited_domains: list[str] | None = None,
keep_alive: bool | None = None,
minimum_wait_page_load_time: float | None = None,
wait_for_network_idle_page_load_time: float | None = None,
wait_between_actions: float | None = None,
auto_download_pdfs: bool | None = None,
cookie_whitelist_domains: list[str] | None = None,
cross_origin_iframes: bool | None = None,
highlight_elements: bool | None = None,
dom_highlight_elements: bool | None = None,
paint_order_filtering: bool | None = None,
max_iframes: int | None = None,
max_iframe_depth: int | None = None,
# All other local params
env: dict[str, str | float | bool] | None = None,
ignore_default_args: list[str] | Literal[True] | None = None,
channel: str | None = None,
chromium_sandbox: bool | None = None,
devtools: bool | None = None,
traces_dir: str | Path | None = None,
accept_downloads: bool | None = None,
permissions: list[str] | None = None,
user_agent: str | None = None,
screen: dict | None = None,
viewport: dict | None = None,
no_viewport: bool | None = None,
device_scale_factor: float | None = None,
record_har_content: str | None = None,
record_har_mode: str | None = None,
record_har_path: str | Path | None = None,
record_video_dir: str | Path | None = None,
record_video_framerate: int | None = None,
record_video_size: dict | None = None,
storage_state: str | Path | dict[str, Any] | None = None,
disable_security: bool | None = None,
deterministic_rendering: bool | None = None,
proxy: ProxySettings | None = None,
enable_default_extensions: bool | None = None,
window_size: dict | None = None,
window_position: dict | None = None,
filter_highlight_ids: bool | None = None,
profile_directory: str | None = None,
) -> None: ...
def __init__(
self,
# Core configuration
id: str | None = None,
cdp_url: str | None = None,
is_local: bool = False,
browser_profile: BrowserProfile | None = None,
# Cloud browser params (don't mix with local browser params)
cloud_profile_id: UUID | str | None = None,
cloud_proxy_country_code: ProxyCountryCode | None = None,
cloud_timeout: int | None = None,
# Backward compatibility aliases for cloud params
profile_id: UUID | str | None = None,
proxy_country_code: ProxyCountryCode | None = None,
timeout: int | None = None,
# BrowserProfile fields that can be passed directly
# From BrowserConnectArgs
headers: dict[str, str] | None = None,
# From BrowserLaunchArgs
env: dict[str, str | float | bool] | None = None,
executable_path: str | Path | None = None,
headless: bool | None = None,
args: list[str] | None = None,
ignore_default_args: list[str] | Literal[True] | None = None,
channel: str | None = None,
chromium_sandbox: bool | None = None,
devtools: bool | None = None,
downloads_path: str | Path | None = None,
traces_dir: str | Path | None = None,
# From BrowserContextArgs
accept_downloads: bool | None = None,
permissions: list[str] | None = None,
user_agent: str | None = None,
screen: dict | None = None,
viewport: dict | None = None,
no_viewport: bool | None = None,
device_scale_factor: float | None = None,
record_har_content: str | None = None,
record_har_mode: str | None = None,
record_har_path: str | Path | None = None,
record_video_dir: str | Path | None = None,
record_video_framerate: int | None = None,
record_video_size: dict | None = None,
# From BrowserLaunchPersistentContextArgs
user_data_dir: str | Path | None = None,
# From BrowserNewContextArgs
storage_state: str | Path | dict[str, Any] | None = None,
# BrowserProfile specific fields
## Cloud Browser Fields
use_cloud: bool | None = None,
cloud_browser: bool | None = None, # Backward compatibility alias
cloud_browser_params: CloudBrowserParams | None = None,
## Other params
disable_security: bool | None = None,
deterministic_rendering: bool | None = None,
allowed_domains: list[str] | None = None,
prohibited_domains: list[str] | None = None,
keep_alive: bool | None = None,
proxy: ProxySettings | None = None,
enable_default_extensions: bool | None = None,
window_size: dict | None = None,
window_position: dict | None = None,
minimum_wait_page_load_time: float | None = None,
wait_for_network_idle_page_load_time: float | None = None,
wait_between_actions: float | None = None,
filter_highlight_ids: bool | None = None,
auto_download_pdfs: bool | None = None,
profile_directory: str | None = None,
cookie_whitelist_domains: list[str] | None = None,
# DOM extraction layer configuration
cross_origin_iframes: bool | None = None,
highlight_elements: bool | None = None,
dom_highlight_elements: bool | None = None,
paint_order_filtering: bool | None = None,
# Iframe processing limits
max_iframes: int | None = None,
max_iframe_depth: int | None = None,
):
# Following the same pattern as AgentSettings in service.py
# Only pass non-None values to avoid validation errors
profile_kwargs = {
k: v
for k, v in locals().items()
if k
not in [
'self',
'browser_profile',
'id',
'cloud_profile_id',
'cloud_proxy_country_code',
'cloud_timeout',
'profile_id',
'proxy_country_code',
'timeout',
]
and v is not None
}
# Handle backward compatibility: prefer cloud_* params over old names
final_profile_id = cloud_profile_id if cloud_profile_id is not None else profile_id
final_proxy_country_code = cloud_proxy_country_code if cloud_proxy_country_code is not None else proxy_country_code
final_timeout = cloud_timeout if cloud_timeout is not None else timeout
# If any cloud params are provided, create cloud_browser_params
if final_profile_id is not None or final_proxy_country_code is not None or final_timeout is not None:
cloud_params = CreateBrowserRequest(
cloud_profile_id=final_profile_id,
cloud_proxy_country_code=final_proxy_country_code,
cloud_timeout=final_timeout,
)
profile_kwargs['cloud_browser_params'] = cloud_params
profile_kwargs['use_cloud'] = True
# Handle backward compatibility: map cloud_browser to use_cloud
if 'cloud_browser' in profile_kwargs:
profile_kwargs['use_cloud'] = profile_kwargs.pop('cloud_browser')
# If cloud_browser_params is set, force use_cloud=True
if cloud_browser_params is not None:
profile_kwargs['use_cloud'] = True
# if is_local is False but executable_path is provided, set is_local to True
if is_local is False and executable_path is not None:
profile_kwargs['is_local'] = True
# Only set is_local=True when cdp_url is missing if we're not using cloud browser
# (cloud browser will provide cdp_url later)
use_cloud = profile_kwargs.get('use_cloud') or profile_kwargs.get('cloud_browser')
if not cdp_url and not use_cloud:
profile_kwargs['is_local'] = True
# Create browser profile from direct parameters or use provided one
if browser_profile is not None:
# Merge any direct kwargs into the provided browser_profile (direct kwargs take precedence)
merged_kwargs = {**browser_profile.model_dump(exclude_unset=True), **profile_kwargs}
resolved_browser_profile = BrowserProfile(**merged_kwargs)
else:
resolved_browser_profile = BrowserProfile(**profile_kwargs)
# Initialize the Pydantic model
super().__init__(
id=id or str(uuid7str()),
browser_profile=resolved_browser_profile,
)
# Session configuration (session identity only)
id: str = Field(default_factory=lambda: str(uuid7str()), description='Unique identifier for this browser session')
# Browser configuration (reusable profile)
browser_profile: BrowserProfile = Field(
default_factory=lambda: DEFAULT_BROWSER_PROFILE,
description='BrowserProfile() options to use for the session, otherwise a default profile will be used',
)
# LLM screenshot resizing configuration
llm_screenshot_size: tuple[int, int] | None = Field(
default=None,
description='Target size (width, height) to resize screenshots before sending to LLM. Coordinates from LLM will be scaled back to original viewport size.',
)
# Cache of original viewport size for coordinate conversion (set when browser state is captured)
_original_viewport_size: tuple[int, int] | None = PrivateAttr(default=None)
# Convenience properties for common browser settings
@property
def cdp_url(self) -> str | None:
"""CDP URL from browser profile."""
return self.browser_profile.cdp_url
@property
def is_local(self) -> bool:
"""Whether this is a local browser instance from browser profile."""
return self.browser_profile.is_local
@property
def cloud_browser(self) -> bool:
"""Whether to use cloud browser service from browser profile."""
return self.browser_profile.use_cloud
@property
def demo_mode(self) -> 'DemoMode | None':
"""Lazy init demo mode helper when enabled."""
if not self.browser_profile.demo_mode:
return None
if self._demo_mode is None:
from browser_use.browser.demo_mode import DemoMode
self._demo_mode = DemoMode(self)
return self._demo_mode
# Main shared event bus for all browser session + all watchdogs
event_bus: EventBus = Field(default_factory=EventBus)
# Mutable public state - which target has agent focus
agent_focus_target_id: TargetID | None = None
# Mutable private state shared between watchdogs
_cdp_client_root: CDPClient | None = PrivateAttr(default=None)
_connection_lock: Any = PrivateAttr(default=None) # asyncio.Lock for preventing concurrent connections
# PUBLIC: SessionManager instance (OWNS all targets and sessions)
session_manager: Any = Field(default=None, exclude=True) # SessionManager
_cached_browser_state_summary: Any = PrivateAttr(default=None)
_cached_selector_map: dict[int, EnhancedDOMTreeNode] = PrivateAttr(default_factory=dict)
_downloaded_files: list[str] = PrivateAttr(default_factory=list) # Track files downloaded during this session
_closed_popup_messages: list[str] = PrivateAttr(default_factory=list) # Store messages from auto-closed JavaScript dialogs
# Watchdogs
_crash_watchdog: Any | None = PrivateAttr(default=None)
_downloads_watchdog: Any | None = PrivateAttr(default=None)
_aboutblank_watchdog: Any | None = PrivateAttr(default=None)
_security_watchdog: Any | None = PrivateAttr(default=None)
_storage_state_watchdog: Any | None = PrivateAttr(default=None)
_local_browser_watchdog: Any | None = PrivateAttr(default=None)
_default_action_watchdog: Any | None = PrivateAttr(default=None)
_dom_watchdog: Any | None = PrivateAttr(default=None)
_screenshot_watchdog: Any | None = PrivateAttr(default=None)
_permissions_watchdog: Any | None = PrivateAttr(default=None)
_recording_watchdog: Any | None = PrivateAttr(default=None)
_cloud_browser_client: CloudBrowserClient = PrivateAttr(default_factory=lambda: CloudBrowserClient())
_demo_mode: 'DemoMode | None' = PrivateAttr(default=None)
_logger: Any = PrivateAttr(default=None)
@property
def logger(self) -> Any:
"""Get instance-specific logger with session ID in the name"""
# **regenerate it every time** because our id and str(self) can change as browser connection state changes
# if self._logger is None or not self._cdp_client_root:
# self._logger = logging.getLogger(f'browser_use.{self}')
return logging.getLogger(f'browser_use.{self}')
@cached_property
def _id_for_logs(self) -> str:
"""Get human-friendly semi-unique identifier for differentiating different BrowserSession instances in logs"""
str_id = self.id[-4:] # default to last 4 chars of truly random uuid, less helpful than cdp port but always unique enough
port_number = (self.cdp_url or 'no-cdp').rsplit(':', 1)[-1].split('/', 1)[0].strip()
port_is_random = not port_number.startswith('922')
port_is_unique_enough = port_number not in _LOGGED_UNIQUE_SESSION_IDS
if port_number and port_number.isdigit() and port_is_random and port_is_unique_enough:
# if cdp port is random/unique enough to identify this session, use it as our id in logs
_LOGGED_UNIQUE_SESSION_IDS.add(port_number)
str_id = port_number
return str_id
@property
def _tab_id_for_logs(self) -> str:
return self.agent_focus_target_id[-2:] if self.agent_focus_target_id else f'{red}--{reset}'
def __repr__(self) -> str:
return f'BrowserSession๐
{self._id_for_logs} ๐
ฃ {self._tab_id_for_logs} (cdp_url={self.cdp_url}, profile={self.browser_profile})'
def __str__(self) -> str:
return f'BrowserSession๐
{self._id_for_logs} ๐
ฃ {self._tab_id_for_logs}'
async def reset(self) -> None:
"""Clear all cached CDP sessions with proper cleanup."""
cdp_status = 'connected' if self._cdp_client_root else 'not connected'
session_mgr_status = 'exists' if self.session_manager else 'None'
self.logger.debug(
f'๐ Resetting browser session (CDP: {cdp_status}, SessionManager: {session_mgr_status}, '
f'focus: {self.agent_focus_target_id[-4:] if self.agent_focus_target_id else "None"})'
)
# Clear session manager (which owns _targets, _sessions, _target_sessions)
if self.session_manager:
await self.session_manager.clear()
self.session_manager = None
# Close CDP WebSocket before clearing to prevent stale event handlers
if self._cdp_client_root:
try:
await self._cdp_client_root.stop()
self.logger.debug('Closed CDP client WebSocket during reset')
except Exception as e:
self.logger.debug(f'Error closing CDP client during reset: {e}')
self._cdp_client_root = None # type: ignore
self._cached_browser_state_summary = None
self._cached_selector_map.clear()
self._downloaded_files.clear()
self.agent_focus_target_id = None
if self.is_local:
self.browser_profile.cdp_url = None
self._crash_watchdog = None
self._downloads_watchdog = None
self._aboutblank_watchdog = None
self._security_watchdog = None
self._storage_state_watchdog = None
self._local_browser_watchdog = None
self._default_action_watchdog = None
self._dom_watchdog = None
self._screenshot_watchdog = None
self._permissions_watchdog = None
self._recording_watchdog = None
if self._demo_mode:
self._demo_mode.reset()
self._demo_mode = None
self.logger.info('โ
Browser session reset complete')
def model_post_init(self, __context) -> None:
"""Register event handlers after model initialization."""
self._connection_lock = asyncio.Lock()
# Check if handlers are already registered to prevent duplicates
from browser_use.browser.watchdog_base import BaseWatchdog
start_handlers = self.event_bus.handlers.get('BrowserStartEvent', [])
start_handler_names = [getattr(h, '__name__', str(h)) for h in start_handlers]
if any('on_BrowserStartEvent' in name for name in start_handler_names):
raise RuntimeError(
'[BrowserSession] Duplicate handler registration attempted! '
'on_BrowserStartEvent is already registered. '
'This likely means BrowserSession was initialized multiple times with the same EventBus.'
)
BaseWatchdog.attach_handler_to_session(self, BrowserStartEvent, self.on_BrowserStartEvent)
BaseWatchdog.attach_handler_to_session(self, BrowserStopEvent, self.on_BrowserStopEvent)
BaseWatchdog.attach_handler_to_session(self, NavigateToUrlEvent, self.on_NavigateToUrlEvent)
BaseWatchdog.attach_handler_to_session(self, SwitchTabEvent, self.on_SwitchTabEvent)
BaseWatchdog.attach_handler_to_session(self, TabCreatedEvent, self.on_TabCreatedEvent)
BaseWatchdog.attach_handler_to_session(self, TabClosedEvent, self.on_TabClosedEvent)
BaseWatchdog.attach_handler_to_session(self, AgentFocusChangedEvent, self.on_AgentFocusChangedEvent)
BaseWatchdog.attach_handler_to_session(self, FileDownloadedEvent, self.on_FileDownloadedEvent)
BaseWatchdog.attach_handler_to_session(self, CloseTabEvent, self.on_CloseTabEvent)
@observe_debug(ignore_input=True, ignore_output=True, name='browser_session_start')
async def start(self) -> None:
"""Start the browser session."""
start_event = self.event_bus.dispatch(BrowserStartEvent())
await start_event
# Ensure any exceptions from the event handler are propagated
await start_event.event_result(raise_if_any=True, raise_if_none=False)
async def kill(self) -> None:
"""Kill the browser session and reset all state."""
self.logger.debug('๐ kill() called - stopping browser with force=True and resetting state')
# First save storage state while CDP is still connected
from browser_use.browser.events import SaveStorageStateEvent
save_event = self.event_bus.dispatch(SaveStorageStateEvent())
await save_event
# Dispatch stop event to kill the browser
await self.event_bus.dispatch(BrowserStopEvent(force=True))
# Stop the event bus
await self.event_bus.stop(clear=True, timeout=5)
# Reset all state
await self.reset()
# Create fresh event bus
self.event_bus = EventBus()
async def stop(self) -> None:
"""Stop the browser session without killing the browser process.
This clears event buses and cached state but keeps the browser alive.
Useful when you want to clean up resources but plan to reconnect later.
"""
self.logger.debug('โธ๏ธ stop() called - stopping browser gracefully (force=False) and resetting state')
# First save storage state while CDP is still connected
from browser_use.browser.events import SaveStorageStateEvent
save_event = self.event_bus.dispatch(SaveStorageStateEvent())
await save_event
# Now dispatch BrowserStopEvent to notify watchdogs
await self.event_bus.dispatch(BrowserStopEvent(force=False))
# Stop the event bus
await self.event_bus.stop(clear=True, timeout=5)
# Reset all state
await self.reset()
# Create fresh event bus
self.event_bus = EventBus()
@observe_debug(ignore_input=True, ignore_output=True, name='browser_start_event_handler')
async def on_BrowserStartEvent(self, event: BrowserStartEvent) -> dict[str, str]:
"""Handle browser start request.
Returns:
Dict with 'cdp_url' key containing the CDP URL
Note: This method is idempotent - calling start() multiple times is safe.
- If already connected, it skips reconnection
- If you need to reset state, call stop() or kill() first
"""
# Initialize and attach all watchdogs FIRST so LocalBrowserWatchdog can handle BrowserLaunchEvent
await self.attach_all_watchdogs()
try:
# If no CDP URL, launch local browser or cloud browser
if not self.cdp_url:
if self.browser_profile.use_cloud or self.browser_profile.cloud_browser_params is not None:
# Use cloud browser service
try:
# Use cloud_browser_params if provided, otherwise create empty request
cloud_params = self.browser_profile.cloud_browser_params or CreateBrowserRequest()
cloud_browser_response = await self._cloud_browser_client.create_browser(cloud_params)
self.browser_profile.cdp_url = cloud_browser_response.cdpUrl
self.browser_profile.is_local = False
self.logger.info('๐ค๏ธ Successfully connected to cloud browser service')
except CloudBrowserAuthError:
raise CloudBrowserAuthError(
'Authentication failed for cloud browser service. Set BROWSER_USE_API_KEY environment variable. You can also create an API key at https://cloud.browser-use.com/new-api-key'
)
except CloudBrowserError as e:
raise CloudBrowserError(f'Failed to create cloud browser: {e}')
elif self.is_local:
# Launch local browser using event-driven approach
launch_event = self.event_bus.dispatch(BrowserLaunchEvent())
await launch_event
# Get the CDP URL from LocalBrowserWatchdog handler result
launch_result: BrowserLaunchResult = cast(
BrowserLaunchResult, await launch_event.event_result(raise_if_none=True, raise_if_any=True)
)
self.browser_profile.cdp_url = launch_result.cdp_url
else:
raise ValueError('Got BrowserSession(is_local=False) but no cdp_url was provided to connect to!')
assert self.cdp_url and '://' in self.cdp_url
# Use lock to prevent concurrent connection attempts (race condition protection)
async with self._connection_lock:
# Only connect if not already connected
if self._cdp_client_root is None:
# Setup browser via CDP (for both local and remote cases)
await self.connect(cdp_url=self.cdp_url)
assert self.cdp_client is not None
# Notify that browser is connected (single place)
self.event_bus.dispatch(BrowserConnectedEvent(cdp_url=self.cdp_url))
if self.browser_profile.demo_mode:
try:
demo = self.demo_mode
if demo:
await demo.ensure_ready()
except Exception as exc:
self.logger.warning(f'[DemoMode] Failed to inject demo overlay: {exc}')
else:
self.logger.debug('Already connected to CDP, skipping reconnection')
if self.browser_profile.demo_mode:
try:
demo = self.demo_mode
if demo:
await demo.ensure_ready()
except Exception as exc:
self.logger.warning(f'[DemoMode] Failed to inject demo overlay: {exc}')
# Return the CDP URL for other components
return {'cdp_url': self.cdp_url}
except Exception as e:
self.event_bus.dispatch(
BrowserErrorEvent(
error_type='BrowserStartEventError',
message=f'Failed to start browser: {type(e).__name__} {e}',
details={'cdp_url': self.cdp_url, 'is_local': self.is_local},
)
)
raise
async def on_NavigateToUrlEvent(self, event: NavigateToUrlEvent) -> None:
"""Handle navigation requests - core browser functionality."""
self.logger.debug(f'[on_NavigateToUrlEvent] Received NavigateToUrlEvent: url={event.url}, new_tab={event.new_tab}')
if not self.agent_focus_target_id:
self.logger.warning('Cannot navigate - browser not connected')
return
target_id = None
current_target_id = self.agent_focus_target_id
# If new_tab=True but we're already in a new tab, set new_tab=False
current_target = self.session_manager.get_target(current_target_id)
if event.new_tab and is_new_tab_page(current_target.url):
self.logger.debug(f'[on_NavigateToUrlEvent] Already on blank tab ({current_target.url}), reusing')
event.new_tab = False
try:
# Find or create target for navigation
self.logger.debug(f'[on_NavigateToUrlEvent] Processing new_tab={event.new_tab}')
if event.new_tab:
page_targets = self.session_manager.get_all_page_targets()
self.logger.debug(f'[on_NavigateToUrlEvent] Found {len(page_targets)} existing tabs')
# Look for existing about:blank tab that's not the current one
for idx, target in enumerate(page_targets):
self.logger.debug(f'[on_NavigateToUrlEvent] Tab {idx}: url={target.url}, targetId={target.target_id}')
if target.url == 'about:blank' and target.target_id != current_target_id:
target_id = target.target_id
self.logger.debug(f'Reusing existing about:blank tab #{target_id[-4:]}')
break
# Create new tab if no reusable one found
if not target_id:
self.logger.debug('[on_NavigateToUrlEvent] No reusable about:blank tab found, creating new tab...')
try:
target_id = await self._cdp_create_new_page('about:blank')
self.logger.debug(f'Created new tab #{target_id[-4:]}')
# Dispatch TabCreatedEvent for new tab
await self.event_bus.dispatch(TabCreatedEvent(target_id=target_id, url='about:blank'))
except Exception as e:
self.logger.error(f'[on_NavigateToUrlEvent] Failed to create new tab: {type(e).__name__}: {e}')
# Fall back to using current tab
target_id = current_target_id
self.logger.warning(f'[on_NavigateToUrlEvent] Falling back to current tab #{target_id[-4:]}')
else:
# Use current tab
target_id = target_id or current_target_id
# Switch to target tab if needed (for both new_tab=True and new_tab=False)
if self.agent_focus_target_id is None or self.agent_focus_target_id != target_id:
self.logger.debug(
f'[on_NavigateToUrlEvent] Switching to target tab {target_id[-4:]} (current: {self.agent_focus_target_id[-4:] if self.agent_focus_target_id else "none"})'
)
# Activate target (bring to foreground)
await self.event_bus.dispatch(SwitchTabEvent(target_id=target_id))
else:
self.logger.debug(f'[on_NavigateToUrlEvent] Already on target tab {target_id[-4:]}, skipping SwitchTabEvent')
assert self.agent_focus_target_id is not None and self.agent_focus_target_id == target_id, (
'Agent focus not updated to new target_id after SwitchTabEvent should have switched to it'
)
# Dispatch navigation started
await self.event_bus.dispatch(NavigationStartedEvent(target_id=target_id, url=event.url))
# Navigate to URL with proper lifecycle waiting
await self._navigate_and_wait(event.url, target_id)
# Close any extension options pages that might have opened
await self._close_extension_options_pages()
# Dispatch navigation complete
self.logger.debug(f'Dispatching NavigationCompleteEvent for {event.url} (tab #{target_id[-4:]})')
await self.event_bus.dispatch(
NavigationCompleteEvent(
target_id=target_id,
url=event.url,
status=None, # CDP doesn't provide status directly
)
)
await self.event_bus.dispatch(AgentFocusChangedEvent(target_id=target_id, url=event.url))
# Note: These should be handled by dedicated watchdogs:
# - Security checks (security_watchdog)
# - Page health checks (crash_watchdog)
# - Dialog handling (dialog_watchdog)
# - Download handling (downloads_watchdog)
# - DOM rebuilding (dom_watchdog)
except Exception as e:
self.logger.error(f'Navigation failed: {type(e).__name__}: {e}')
# target_id might be unbound if exception happens early
if 'target_id' in locals() and target_id:
await self.event_bus.dispatch(
NavigationCompleteEvent(
target_id=target_id,
url=event.url,
error_message=f'{type(e).__name__}: {e}',
)
)
await self.event_bus.dispatch(AgentFocusChangedEvent(target_id=target_id, url=event.url))
raise
async def _navigate_and_wait(self, url: str, target_id: str, timeout: float | None = None) -> None:
"""Navigate to URL and wait for page readiness using CDP lifecycle events.
Two-strategy approach optimized for speed with robust fallback:
1. networkIdle - Returns ASAP when no network activity (~50-200ms for cached pages)
2. load - Fallback when page has ongoing network activity (all resources loaded)
This gives us instant returns for cached content while being robust for dynamic pages.
NO handler registration here - handlers are registered ONCE per session in SessionManager.
We poll stored events instead to avoid handler accumulation.
"""
cdp_session = await self.get_or_create_cdp_session(target_id, focus=False)
if timeout is None:
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | true |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/browser/demo_mode.py | browser_use/browser/demo_mode.py | """Demo mode helper for injecting and updating the in-browser log panel."""
from __future__ import annotations
import asyncio
import json
import logging
from datetime import datetime, timezone
from typing import Any
from browser_use.browser.session import BrowserSession
# Embedded JavaScript for demo panel (injected into browser pages)
_DEMO_PANEL_SCRIPT = r"""(function () {
// SESSION_ID_PLACEHOLDER will be replaced by DemoMode with actual session ID
const SESSION_ID = '__BROWSER_USE_SESSION_ID_PLACEHOLDER__';
const EXCLUDE_ATTR = 'data-browser-use-exclude-' + SESSION_ID;
const PANEL_ID = 'browser-use-demo-panel';
const STYLE_ID = 'browser-use-demo-panel-style';
const STORAGE_KEY = '__browserUseDemoLogs__';
const STORAGE_HTML_KEY = '__browserUseDemoLogsHTML__';
const PANEL_STATE_KEY = '__browserUseDemoPanelState__';
const TOGGLE_BUTTON_ID = 'browser-use-demo-toggle';
const MAX_MESSAGES = 100;
const EXPANDED_IDS_KEY = '__browserUseExpandedEntries__';
const LEVEL_ICONS = {
info: 'โน๏ธ',
action: 'โถ๏ธ',
thought: '๐ญ',
success: 'โ
',
warning: 'โ ๏ธ',
error: 'โ',
};
const LEVEL_LABELS = {
info: 'info',
action: 'action',
thought: 'thought',
success: 'success',
warning: 'warning',
error: 'error',
};
if (window.__browserUseDemoPanelLoaded) {
const existingPanel = document.getElementById(PANEL_ID);
if (!existingPanel) {
initializePanel();
}
return;
}
window.__browserUseDemoPanelLoaded = true;
const state = {
panel: null,
list: null,
messages: [],
isOpen: true,
toggleButton: null,
};
state.messages = restoreMessages();
function initializePanel() {
console.log('Browser-use demo panel initialized with session ID:', SESSION_ID);
addStyles();
state.isOpen = loadPanelState();
state.panel = buildPanel();
state.list = state.panel.querySelector('[data-role="log-list"]');
appendToHost(state.panel);
state.toggleButton = buildToggleButton();
appendToHost(state.toggleButton);
const savedWidth = loadPanelWidth();
if (savedWidth) {
document.documentElement.style.setProperty('--browser-use-demo-panel-width', `${savedWidth}px`);
}
if (!hydrateFromStoredMarkup()) {
state.messages.forEach((entry) => appendEntry(entry, false));
}
attachCloseHandler();
if (state.isOpen) {
openPanel(false);
} else {
closePanel(false);
}
adjustLayout();
window.addEventListener('resize', debounce(adjustLayout, 150));
}
function appendToHost(node) {
if (!node) {
return;
}
const host = document.body || document.documentElement;
if (!host.contains(node)) {
host.appendChild(node);
}
if (!document.body) {
document.addEventListener(
'DOMContentLoaded',
() => {
if (document.body && node.parentNode !== document.body) {
document.body.appendChild(node);
}
},
{ once: true }
);
}
}
function addStyles() {
if (document.getElementById(STYLE_ID)) {
return;
}
const style = document.createElement('style');
style.id = STYLE_ID;
style.setAttribute(EXCLUDE_ATTR, 'true');
style.textContent = `
#${PANEL_ID} {
position: fixed;
top: 0;
right: 0;
width: var(--browser-use-demo-panel-width, 340px);
max-width: calc(100vw - 64px);
height: 100vh;
display: flex;
flex-direction: column;
background: #05070d;
color: #f8f9ff;
font-family: 'JetBrains Mono', 'Fira Code', 'Monaco', 'Menlo', monospace;
font-size: 13px;
line-height: 1.4;
box-shadow: -6px 0 25px rgba(0, 0, 0, 0.35);
z-index: 2147480000;
border-left: 1px solid rgba(255, 255, 255, 0.14);
backdrop-filter: blur(10px);
pointer-events: auto;
transform: translateX(0);
opacity: 1;
transition: transform 0.25s ease, opacity 0.25s ease;
}
#${PANEL_ID}[data-open="false"] {
transform: translateX(110%);
opacity: 0;
pointer-events: none;
}
#${PANEL_ID} .browser-use-demo-header {
padding: 16px 18px 12px;
border-bottom: 1px solid rgba(255, 255, 255, 0.14);
display: flex;
align-items: baseline;
justify-content: space-between;
gap: 8px;
flex-wrap: wrap;
}
#${PANEL_ID} .browser-use-demo-header h1 {
font-size: 15px;
text-transform: uppercase;
letter-spacing: 0.12em;
margin: 0;
color: #f8f9ff;
}
#${PANEL_ID} .browser-use-badge {
font-size: 11px;
padding: 2px 10px;
border-radius: 999px;
border: 1px solid rgba(255, 255, 255, 0.4);
text-transform: uppercase;
letter-spacing: 0.08em;
color: #f8f9ff;
}
#${PANEL_ID} .browser-use-logo img {
height: 36px;
}
#${PANEL_ID} .browser-use-header-actions {
margin-left: auto;
display: flex;
align-items: center;
gap: 8px;
}
#${PANEL_ID} .browser-use-close-btn {
width: 28px;
height: 28px;
border-radius: 50%;
border: 1px solid rgba(255, 255, 255, 0.2);
background: rgba(255, 255, 255, 0.05);
color: #f8f9ff;
cursor: pointer;
font-size: 16px;
line-height: 1;
display: flex;
align-items: center;
justify-content: center;
transition: background 0.2s ease, border 0.2s ease;
}
#${PANEL_ID} .browser-use-close-btn:hover {
background: rgba(255, 255, 255, 0.15);
border-color: rgba(255, 255, 255, 0.35);
}
#${PANEL_ID} .browser-use-demo-body {
flex: 1;
overflow-y: auto;
scrollbar-width: thin;
scrollbar-color: rgba(255, 255, 255, 0.3) transparent;
padding: 8px 0 12px;
}
#${PANEL_ID} .browser-use-demo-body::-webkit-scrollbar {
width: 8px;
}
#${PANEL_ID} .browser-use-demo-body::-webkit-scrollbar-thumb {
background: rgba(255, 255, 255, 0.25);
border-radius: 999px;
}
.browser-use-demo-entry {
display: flex;
gap: 12px;
padding: 10px 18px;
border-left: 2px solid transparent;
border-bottom: 1px solid rgba(255, 255, 255, 0.04);
animation: browser-use-fade-in 0.25s ease;
background: #000000;
}
.browser-use-demo-entry:last-child {
border-bottom-color: transparent;
}
.browser-use-entry-icon {
font-size: 16px;
line-height: 1.2;
width: 20px;
}
.browser-use-entry-content {
flex: 1;
min-width: 0;
}
.browser-use-entry-meta {
font-size: 11px;
letter-spacing: 0.08em;
text-transform: uppercase;
color: white;
margin-bottom: 4px;
display: flex;
justify-content: space-between;
gap: 12px;
}
.browser-use-entry-message {
margin: 0;
word-break: break-word;
font-size: 12px;
color: #f8f9ff;
display: flex;
flex-direction: column;
gap: 6px;
}
.browser-use-markdown-content {
margin: 0;
line-height: 1.5;
}
.browser-use-markdown-content p {
margin: 0 0 8px 0;
}
.browser-use-markdown-content p:last-child {
margin-bottom: 0;
}
.browser-use-markdown-content h1,
.browser-use-markdown-content h2,
.browser-use-markdown-content h3 {
margin: 8px 0 4px 0;
font-weight: 600;
color: #f8f9ff;
}
.browser-use-markdown-content h1 {
font-size: 16px;
}
.browser-use-markdown-content h2 {
font-size: 14px;
}
.browser-use-markdown-content h3 {
font-size: 13px;
}
.browser-use-markdown-content code {
background: rgba(255, 255, 255, 0.1);
padding: 2px 6px;
border-radius: 3px;
font-family: 'JetBrains Mono', 'Fira Code', 'Monaco', 'Menlo', monospace;
font-size: 11px;
color: #60a5fa;
}
.browser-use-markdown-content pre {
background: rgba(0, 0, 0, 0.3);
padding: 8px 12px;
border-radius: 4px;
overflow-x: auto;
margin: 8px 0;
border: 1px solid rgba(255, 255, 255, 0.1);
}
.browser-use-markdown-content pre code {
background: transparent;
padding: 0;
color: #f8f9ff;
font-size: 11px;
white-space: pre;
}
.browser-use-markdown-content ul,
.browser-use-markdown-content ol {
margin: 4px 0 4px 16px;
padding: 0;
}
.browser-use-markdown-content li {
margin: 2px 0;
}
.browser-use-markdown-content a {
color: #60a5fa;
text-decoration: underline;
}
.browser-use-markdown-content a:hover {
color: #93c5fd;
}
.browser-use-markdown-content strong {
font-weight: 600;
color: #f8f9ff;
}
.browser-use-markdown-content em {
font-style: italic;
}
.browser-use-demo-entry:not(.expanded) .browser-use-markdown-content {
max-height: 120px;
overflow: hidden;
mask-image: linear-gradient(to bottom, rgba(0,0,0,1), rgba(0,0,0,0));
}
.browser-use-entry-toggle {
align-self: flex-start;
background: rgba(255, 255, 255, 0.1);
border: 1px solid rgba(255, 255, 255, 0.2);
color: #f8f9ff;
padding: 2px 10px;
font-size: 11px;
border-radius: 999px;
cursor: pointer;
}
.browser-use-demo-entry.level-info { border-left-color: #60a5fa; }
.browser-use-demo-entry.level-action { border-left-color: #34d399; }
.browser-use-demo-entry.level-thought { border-left-color: #f97316; }
.browser-use-demo-entry.level-warning { border-left-color: #fbbf24; }
.browser-use-demo-entry.level-success { border-left-color: #22c55e; }
.browser-use-demo-entry.level-error { border-left-color: #f87171; }
@keyframes browser-use-fade-in {
from { opacity: 0; transform: translateY(6px); }
to { opacity: 1; transform: translateY(0); }
}
@media (max-width: 1024px) {
#${PANEL_ID} {
font-size: 12px;
}
#${PANEL_ID} .browser-use-demo-header {
padding: 12px 16px 10px;
}
}
#${TOGGLE_BUTTON_ID} {
position: fixed;
top: 20px;
right: 20px;
width: 44px;
height: 44px;
border-radius: 50%;
border: 1px solid rgba(255, 255, 255, 0.2);
background: rgba(5, 7, 13, 0.92);
color: #f8f9ff;
font-size: 18px;
display: none;
align-items: center;
justify-content: center;
cursor: pointer;
z-index: 2147480001;
box-shadow: 0 8px 20px rgba(0, 0, 0, 0.4);
transition: transform 0.2s ease, background 0.2s ease;
}
#${TOGGLE_BUTTON_ID}:hover {
transform: scale(1.05);
background: rgba(5, 7, 13, 0.98);
}
#${TOGGLE_BUTTON_ID} img {
display: block;
width: 24px;
height: auto;
max-width: 100%;
max-height: 100%;
object-fit: contain;
pointer-events: none;
user-select: none;
}
`;
document.head.appendChild(style);
}
function buildPanel() {
const panel = document.createElement('section');
panel.id = PANEL_ID;
panel.setAttribute('role', 'complementary');
panel.setAttribute('aria-label', 'Browser-use demo panel');
panel.setAttribute(EXCLUDE_ATTR, 'true');
const header = document.createElement('header');
header.className = 'browser-use-demo-header';
const title = document.createElement('div');
title.className = 'browser-use-logo';
const logo = document.createElement('img');
logo.src = 'https://raw.githubusercontent.com/browser-use/browser-use/main/static/browser-use-dark.png';
logo.alt = 'Browser-use';
logo.loading = 'lazy';
title.appendChild(logo);
const actions = document.createElement('div');
actions.className = 'browser-use-header-actions';
const closeBtn = document.createElement('button');
closeBtn.type = 'button';
closeBtn.className = 'browser-use-close-btn';
closeBtn.setAttribute(EXCLUDE_ATTR, 'true');
closeBtn.setAttribute('aria-label', 'Hide demo panel');
closeBtn.dataset.role = 'close-toggle';
closeBtn.innerHTML = '×';
actions.appendChild(closeBtn);
header.appendChild(title);
header.appendChild(actions);
const body = document.createElement('div');
body.className = 'browser-use-demo-body';
body.setAttribute('data-role', 'log-list');
panel.appendChild(header);
panel.appendChild(body);
panel.setAttribute('data-open', 'true');
return panel;
}
function buildToggleButton() {
const button = document.createElement('button');
button.id = TOGGLE_BUTTON_ID;
button.type = 'button';
button.setAttribute(EXCLUDE_ATTR, 'true');
button.setAttribute('aria-label', 'Open demo panel');
const img = document.createElement('img');
img.alt = 'Browser-use';
img.loading = 'eager';
// Use embedded SVG as data URI to avoid CSP issues
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | true |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/browser/python_highlights.py | browser_use/browser/python_highlights.py | """Python-based highlighting system for drawing bounding boxes on screenshots.
This module replaces JavaScript-based highlighting with fast Python image processing
to draw bounding boxes around interactive elements directly on screenshots.
"""
import asyncio
import base64
import io
import logging
import os
from PIL import Image, ImageDraw, ImageFont
from browser_use.dom.views import DOMSelectorMap, EnhancedDOMTreeNode
from browser_use.observability import observe_debug
from browser_use.utils import time_execution_async
logger = logging.getLogger(__name__)
# Font cache to prevent repeated font loading and reduce memory usage
_FONT_CACHE: dict[tuple[str, int], ImageFont.FreeTypeFont | None] = {}
# Cross-platform font paths
_FONT_PATHS = [
'/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf', # Linux (Debian/Ubuntu)
'/usr/share/fonts/TTF/DejaVuSans-Bold.ttf', # Linux (Arch/Fedora)
'/System/Library/Fonts/Arial.ttf', # macOS
'C:\\Windows\\Fonts\\arial.ttf', # Windows
'arial.ttf', # Windows (system path)
'Arial Bold.ttf', # macOS alternative
'/usr/share/fonts/truetype/liberation/LiberationSans-Bold.ttf', # Linux alternative
]
def get_cross_platform_font(font_size: int) -> ImageFont.FreeTypeFont | None:
"""Get a cross-platform compatible font with caching to prevent memory leaks.
Args:
font_size: Size of the font to load
Returns:
ImageFont object or None if no system fonts are available
"""
# Use cache key based on font size
cache_key = ('system_font', font_size)
# Return cached font if available
if cache_key in _FONT_CACHE:
return _FONT_CACHE[cache_key]
# Try to load a system font
font = None
for font_path in _FONT_PATHS:
try:
font = ImageFont.truetype(font_path, font_size)
break
except OSError:
continue
# Cache the result (even if None) to avoid repeated attempts
_FONT_CACHE[cache_key] = font
return font
def cleanup_font_cache() -> None:
"""Clean up the font cache to prevent memory leaks in long-running applications."""
global _FONT_CACHE
_FONT_CACHE.clear()
# Color scheme for different element types
ELEMENT_COLORS = {
'button': '#FF6B6B', # Red for buttons
'input': '#4ECDC4', # Teal for inputs
'select': '#45B7D1', # Blue for dropdowns
'a': '#96CEB4', # Green for links
'textarea': '#FF8C42', # Orange for text areas (was yellow, now more visible)
'default': '#DDA0DD', # Light purple for other interactive elements
}
# Element type mappings
ELEMENT_TYPE_MAP = {
'button': 'button',
'input': 'input',
'select': 'select',
'a': 'a',
'textarea': 'textarea',
}
def get_element_color(tag_name: str, element_type: str | None = None) -> str:
"""Get color for element based on tag name and type."""
# Check input type first
if tag_name == 'input' and element_type:
if element_type in ['button', 'submit']:
return ELEMENT_COLORS['button']
# Use tag-based color
return ELEMENT_COLORS.get(tag_name.lower(), ELEMENT_COLORS['default'])
def should_show_index_overlay(backend_node_id: int | None) -> bool:
"""Determine if index overlay should be shown."""
return backend_node_id is not None
def draw_enhanced_bounding_box_with_text(
draw, # ImageDraw.Draw - avoiding type annotation due to PIL typing issues
bbox: tuple[int, int, int, int],
color: str,
text: str | None = None,
font: ImageFont.FreeTypeFont | None = None,
element_type: str = 'div',
image_size: tuple[int, int] = (2000, 1500),
device_pixel_ratio: float = 1.0,
) -> None:
"""Draw an enhanced bounding box with much bigger index containers and dashed borders."""
x1, y1, x2, y2 = bbox
# Draw dashed bounding box with pattern: 1 line, 2 spaces, 1 line, 2 spaces...
dash_length = 4
gap_length = 8
line_width = 2
# Helper function to draw dashed line
def draw_dashed_line(start_x, start_y, end_x, end_y):
if start_x == end_x: # Vertical line
y = start_y
while y < end_y:
dash_end = min(y + dash_length, end_y)
draw.line([(start_x, y), (start_x, dash_end)], fill=color, width=line_width)
y += dash_length + gap_length
else: # Horizontal line
x = start_x
while x < end_x:
dash_end = min(x + dash_length, end_x)
draw.line([(x, start_y), (dash_end, start_y)], fill=color, width=line_width)
x += dash_length + gap_length
# Draw dashed rectangle
draw_dashed_line(x1, y1, x2, y1) # Top
draw_dashed_line(x2, y1, x2, y2) # Right
draw_dashed_line(x2, y2, x1, y2) # Bottom
draw_dashed_line(x1, y2, x1, y1) # Left
# Draw much bigger index overlay if we have index text
if text:
try:
# Scale font size for appropriate sizing across different resolutions
img_width, img_height = image_size
css_width = img_width # / device_pixel_ratio
# Much smaller scaling - 1% of CSS viewport width, max 16px to prevent huge highlights
base_font_size = max(10, min(20, int(css_width * 0.01)))
# Use shared font loading function with caching
big_font = get_cross_platform_font(base_font_size)
if big_font is None:
big_font = font # Fallback to original font if no system fonts found
# Get text size with bigger font
if big_font:
bbox_text = draw.textbbox((0, 0), text, font=big_font)
text_width = bbox_text[2] - bbox_text[0]
text_height = bbox_text[3] - bbox_text[1]
else:
# Fallback for default font
bbox_text = draw.textbbox((0, 0), text)
text_width = bbox_text[2] - bbox_text[0]
text_height = bbox_text[3] - bbox_text[1]
# Scale padding appropriately for different resolutions
padding = max(4, min(10, int(css_width * 0.005))) # 0.3% of CSS width, max 4px
element_width = x2 - x1
element_height = y2 - y1
# Container dimensions
container_width = text_width + padding * 2
container_height = text_height + padding * 2
# Position in top center - for small elements, place further up to avoid blocking content
# Center horizontally within the element
bg_x1 = x1 + (element_width - container_width) // 2
# Simple rule: if element is small, place index further up to avoid blocking icons
if element_width < 60 or element_height < 30:
# Small element: place well above to avoid blocking content
bg_y1 = max(0, y1 - container_height - 5)
else:
# Regular element: place inside with small offset
bg_y1 = y1 + 2
bg_x2 = bg_x1 + container_width
bg_y2 = bg_y1 + container_height
# Center the number within the index box with proper baseline handling
text_x = bg_x1 + (container_width - text_width) // 2
# Add extra vertical space to prevent clipping
text_y = bg_y1 + (container_height - text_height) // 2 - bbox_text[1] # Subtract top offset
# Ensure container stays within image bounds
img_width, img_height = image_size
if bg_x1 < 0:
offset = -bg_x1
bg_x1 += offset
bg_x2 += offset
text_x += offset
if bg_y1 < 0:
offset = -bg_y1
bg_y1 += offset
bg_y2 += offset
text_y += offset
if bg_x2 > img_width:
offset = bg_x2 - img_width
bg_x1 -= offset
bg_x2 -= offset
text_x -= offset
if bg_y2 > img_height:
offset = bg_y2 - img_height
bg_y1 -= offset
bg_y2 -= offset
text_y -= offset
# Draw bigger background rectangle with thicker border
draw.rectangle([bg_x1, bg_y1, bg_x2, bg_y2], fill=color, outline='white', width=2)
# Draw white text centered in the index box
draw.text((text_x, text_y), text, fill='white', font=big_font or font)
except Exception as e:
logger.debug(f'Failed to draw enhanced text overlay: {e}')
def draw_bounding_box_with_text(
draw, # ImageDraw.Draw - avoiding type annotation due to PIL typing issues
bbox: tuple[int, int, int, int],
color: str,
text: str | None = None,
font: ImageFont.FreeTypeFont | None = None,
) -> None:
"""Draw a bounding box with optional text overlay."""
x1, y1, x2, y2 = bbox
# Draw dashed bounding box
dash_length = 2
gap_length = 6
# Top edge
x = x1
while x < x2:
end_x = min(x + dash_length, x2)
draw.line([(x, y1), (end_x, y1)], fill=color, width=2)
draw.line([(x, y1 + 1), (end_x, y1 + 1)], fill=color, width=2)
x += dash_length + gap_length
# Bottom edge
x = x1
while x < x2:
end_x = min(x + dash_length, x2)
draw.line([(x, y2), (end_x, y2)], fill=color, width=2)
draw.line([(x, y2 - 1), (end_x, y2 - 1)], fill=color, width=2)
x += dash_length + gap_length
# Left edge
y = y1
while y < y2:
end_y = min(y + dash_length, y2)
draw.line([(x1, y), (x1, end_y)], fill=color, width=2)
draw.line([(x1 + 1, y), (x1 + 1, end_y)], fill=color, width=2)
y += dash_length + gap_length
# Right edge
y = y1
while y < y2:
end_y = min(y + dash_length, y2)
draw.line([(x2, y), (x2, end_y)], fill=color, width=2)
draw.line([(x2 - 1, y), (x2 - 1, end_y)], fill=color, width=2)
y += dash_length + gap_length
# Draw index overlay if we have index text
if text:
try:
# Get text size
if font:
bbox_text = draw.textbbox((0, 0), text, font=font)
text_width = bbox_text[2] - bbox_text[0]
text_height = bbox_text[3] - bbox_text[1]
else:
# Fallback for default font
bbox_text = draw.textbbox((0, 0), text)
text_width = bbox_text[2] - bbox_text[0]
text_height = bbox_text[3] - bbox_text[1]
# Smart positioning based on element size
padding = 5
element_width = x2 - x1
element_height = y2 - y1
element_area = element_width * element_height
index_box_area = (text_width + padding * 2) * (text_height + padding * 2)
# Calculate size ratio to determine positioning strategy
size_ratio = element_area / max(index_box_area, 1)
if size_ratio < 4:
# Very small elements: place outside in bottom-right corner
text_x = x2 + padding
text_y = y2 - text_height
# Ensure it doesn't go off screen
text_x = min(text_x, 1200 - text_width - padding)
text_y = max(text_y, 0)
elif size_ratio < 16:
# Medium elements: place in bottom-right corner inside
text_x = x2 - text_width - padding
text_y = y2 - text_height - padding
else:
# Large elements: place in center
text_x = x1 + (element_width - text_width) // 2
text_y = y1 + (element_height - text_height) // 2
# Ensure text stays within bounds
text_x = max(0, min(text_x, 1200 - text_width))
text_y = max(0, min(text_y, 800 - text_height))
# Draw background rectangle for maximum contrast
bg_x1 = text_x - padding
bg_y1 = text_y - padding
bg_x2 = text_x + text_width + padding
bg_y2 = text_y + text_height + padding
# Use white background with thick black border for maximum visibility
draw.rectangle([bg_x1, bg_y1, bg_x2, bg_y2], fill='white', outline='black', width=2)
# Draw bold dark text on light background for best contrast
draw.text((text_x, text_y), text, fill='black', font=font)
except Exception as e:
logger.debug(f'Failed to draw text overlay: {e}')
def process_element_highlight(
element_id: int,
element: EnhancedDOMTreeNode,
draw,
device_pixel_ratio: float,
font,
filter_highlight_ids: bool,
image_size: tuple[int, int],
) -> None:
"""Process a single element for highlighting."""
try:
# Use absolute_position coordinates directly
if not element.absolute_position:
return
bounds = element.absolute_position
# Scale coordinates from CSS pixels to device pixels for screenshot
# The screenshot is captured at device pixel resolution, but coordinates are in CSS pixels
x1 = int(bounds.x * device_pixel_ratio)
y1 = int(bounds.y * device_pixel_ratio)
x2 = int((bounds.x + bounds.width) * device_pixel_ratio)
y2 = int((bounds.y + bounds.height) * device_pixel_ratio)
# Ensure coordinates are within image bounds
img_width, img_height = image_size
x1 = max(0, min(x1, img_width))
y1 = max(0, min(y1, img_height))
x2 = max(x1, min(x2, img_width))
y2 = max(y1, min(y2, img_height))
# Skip if bounding box is too small or invalid
if x2 - x1 < 2 or y2 - y1 < 2:
return
# Get element color based on type
tag_name = element.tag_name if hasattr(element, 'tag_name') else 'div'
element_type = None
if hasattr(element, 'attributes') and element.attributes:
element_type = element.attributes.get('type')
color = get_element_color(tag_name, element_type)
# Get element index for overlay and apply filtering
backend_node_id = getattr(element, 'backend_node_id', None)
index_text = None
if backend_node_id is not None:
if filter_highlight_ids:
# Use the meaningful text that matches what the LLM sees
meaningful_text = element.get_meaningful_text_for_llm()
# Show ID only if meaningful text is less than 5 characters
if len(meaningful_text) < 3:
index_text = str(backend_node_id)
else:
# Always show ID when filter is disabled
index_text = str(backend_node_id)
# Draw enhanced bounding box with bigger index
draw_enhanced_bounding_box_with_text(
draw, (x1, y1, x2, y2), color, index_text, font, tag_name, image_size, device_pixel_ratio
)
except Exception as e:
logger.debug(f'Failed to draw highlight for element {element_id}: {e}')
@observe_debug(ignore_input=True, ignore_output=True, name='create_highlighted_screenshot')
@time_execution_async('create_highlighted_screenshot')
async def create_highlighted_screenshot(
screenshot_b64: str,
selector_map: DOMSelectorMap,
device_pixel_ratio: float = 1.0,
viewport_offset_x: int = 0,
viewport_offset_y: int = 0,
filter_highlight_ids: bool = True,
) -> str:
"""Create a highlighted screenshot with bounding boxes around interactive elements.
Args:
screenshot_b64: Base64 encoded screenshot
selector_map: Map of interactive elements with their positions
device_pixel_ratio: Device pixel ratio for scaling coordinates
viewport_offset_x: X offset for viewport positioning
viewport_offset_y: Y offset for viewport positioning
Returns:
Base64 encoded highlighted screenshot
"""
try:
# Decode screenshot
screenshot_data = base64.b64decode(screenshot_b64)
image = Image.open(io.BytesIO(screenshot_data)).convert('RGBA')
# Create drawing context
draw = ImageDraw.Draw(image)
# Load font using shared function with caching
font = get_cross_platform_font(12)
# If no system fonts found, font remains None and will use default font
# Process elements sequentially to avoid ImageDraw thread safety issues
# PIL ImageDraw is not thread-safe, so we process elements one by one
for element_id, element in selector_map.items():
process_element_highlight(element_id, element, draw, device_pixel_ratio, font, filter_highlight_ids, image.size)
# Convert back to base64
output_buffer = io.BytesIO()
try:
image.save(output_buffer, format='PNG')
output_buffer.seek(0)
highlighted_b64 = base64.b64encode(output_buffer.getvalue()).decode('utf-8')
logger.debug(f'Successfully created highlighted screenshot with {len(selector_map)} elements')
return highlighted_b64
finally:
# Explicit cleanup to prevent memory leaks
output_buffer.close()
if 'image' in locals():
image.close()
except Exception as e:
logger.error(f'Failed to create highlighted screenshot: {e}')
# Clean up on error as well
if 'image' in locals():
image.close()
# Return original screenshot on error
return screenshot_b64
async def get_viewport_info_from_cdp(cdp_session) -> tuple[float, int, int]:
"""Get viewport information from CDP session.
Returns:
Tuple of (device_pixel_ratio, scroll_x, scroll_y)
"""
try:
# Get layout metrics which includes viewport info and device pixel ratio
metrics = await cdp_session.cdp_client.send.Page.getLayoutMetrics(session_id=cdp_session.session_id)
# Extract viewport information
visual_viewport = metrics.get('visualViewport', {})
css_visual_viewport = metrics.get('cssVisualViewport', {})
css_layout_viewport = metrics.get('cssLayoutViewport', {})
# Calculate device pixel ratio
css_width = css_visual_viewport.get('clientWidth', css_layout_viewport.get('clientWidth', 1280.0))
device_width = visual_viewport.get('clientWidth', css_width)
device_pixel_ratio = device_width / css_width if css_width > 0 else 1.0
# Get scroll position in CSS pixels
scroll_x = int(css_visual_viewport.get('pageX', 0))
scroll_y = int(css_visual_viewport.get('pageY', 0))
return float(device_pixel_ratio), scroll_x, scroll_y
except Exception as e:
logger.debug(f'Failed to get viewport info from CDP: {e}')
return 1.0, 0, 0
@time_execution_async('create_highlighted_screenshot_async')
async def create_highlighted_screenshot_async(
screenshot_b64: str, selector_map: DOMSelectorMap, cdp_session=None, filter_highlight_ids: bool = True
) -> str:
"""Async wrapper for creating highlighted screenshots.
Args:
screenshot_b64: Base64 encoded screenshot
selector_map: Map of interactive elements
cdp_session: CDP session for getting viewport info
filter_highlight_ids: Whether to filter element IDs based on meaningful text
Returns:
Base64 encoded highlighted screenshot
"""
# Get viewport information if CDP session is available
device_pixel_ratio = 1.0
viewport_offset_x = 0
viewport_offset_y = 0
if cdp_session:
try:
device_pixel_ratio, viewport_offset_x, viewport_offset_y = await get_viewport_info_from_cdp(cdp_session)
except Exception as e:
logger.debug(f'Failed to get viewport info from CDP: {e}')
# Create highlighted screenshot with async processing
final_screenshot = await create_highlighted_screenshot(
screenshot_b64, selector_map, device_pixel_ratio, viewport_offset_x, viewport_offset_y, filter_highlight_ids
)
filename = os.getenv('BROWSER_USE_SCREENSHOT_FILE')
if filename:
def _write_screenshot():
try:
with open(filename, 'wb') as f:
f.write(base64.b64decode(final_screenshot))
logger.debug('Saved screenshot to ' + str(filename))
except Exception as e:
logger.warning(f'Failed to save screenshot to {filename}: {e}')
await asyncio.to_thread(_write_screenshot)
return final_screenshot
# Export the cleanup function for external use in long-running applications
__all__ = ['create_highlighted_screenshot', 'create_highlighted_screenshot_async', 'cleanup_font_cache']
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.