sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
browser-use/browser-use:browser_use/browser/watchdogs/default_action_watchdog.py | """Default browser action handlers using CDP."""
import asyncio
import json
import os
from cdp_use.cdp.input.commands import DispatchKeyEventParameters
from browser_use.actor.utils import get_key_info
from browser_use.browser.events import (
ClickCoordinateEvent,
ClickElementEvent,
GetDropdownOptionsEvent,
GoBackEvent,
GoForwardEvent,
RefreshEvent,
ScrollEvent,
ScrollToTextEvent,
SelectDropdownOptionEvent,
SendKeysEvent,
TypeTextEvent,
UploadFileEvent,
WaitEvent,
)
from browser_use.browser.views import BrowserError, URLNotAllowedError
from browser_use.browser.watchdog_base import BaseWatchdog
from browser_use.dom.service import EnhancedDOMTreeNode
from browser_use.observability import observe_debug
# Import EnhancedDOMTreeNode and rebuild event models that have forward references to it
# This must be done after all imports are complete
ClickCoordinateEvent.model_rebuild()
ClickElementEvent.model_rebuild()
GetDropdownOptionsEvent.model_rebuild()
SelectDropdownOptionEvent.model_rebuild()
TypeTextEvent.model_rebuild()
ScrollEvent.model_rebuild()
UploadFileEvent.model_rebuild()
class DefaultActionWatchdog(BaseWatchdog):
"""Handles default browser actions like click, type, and scroll using CDP."""
async def _execute_click_with_download_detection(
self,
click_coro,
download_start_timeout: float = 0.5,
download_complete_timeout: float = 30.0,
) -> dict | None:
"""Execute a click operation and automatically wait for any triggered download
Args:
click_coro: Coroutine that performs the click (should return click_metadata dict or None)
download_start_timeout: Time to wait for download to start after click (seconds)
download_complete_timeout: Time to wait for download to complete once started (seconds)
Returns:
Click metadata dict, potentially with 'download' key containing download info.
If a download times out but is still in progress, includes 'download_in_progress' with status.
"""
import time
download_started = asyncio.Event()
download_completed = asyncio.Event()
download_info: dict = {}
progress_info: dict = {'last_update': 0.0, 'received_bytes': 0, 'total_bytes': 0, 'state': ''}
def on_download_start(info: dict) -> None:
"""Direct callback when download starts (called from CDP handler)."""
if info.get('auto_download'):
return # ignore auto-downloads
download_info['guid'] = info.get('guid', '')
download_info['url'] = info.get('url', '')
download_info['suggested_filename'] = info.get('suggested_filename', 'download')
download_started.set()
self.logger.debug(f'[ClickWithDownload] Download started: {download_info["suggested_filename"]}')
def on_download_progress(info: dict) -> None:
"""Direct callback when download progress updates (called from CDP handler)."""
# Match by guid if available
if download_info.get('guid') and info.get('guid') != download_info['guid']:
return # different download
progress_info['last_update'] = time.time()
progress_info['received_bytes'] = info.get('received_bytes', 0)
progress_info['total_bytes'] = info.get('total_bytes', 0)
progress_info['state'] = info.get('state', '')
self.logger.debug(
f'[ClickWithDownload] Progress: {progress_info["received_bytes"]}/{progress_info["total_bytes"]} bytes ({progress_info["state"]})'
)
def on_download_complete(info: dict) -> None:
"""Direct callback when download completes (called from CDP handler)."""
if info.get('auto_download'):
return # ignore auto-downloads
# Match by guid if available, otherwise accept any non-auto download
if download_info.get('guid') and info.get('guid') and info.get('guid') != download_info['guid']:
return # different download
download_info['path'] = info.get('path', '')
download_info['file_name'] = info.get('file_name', '')
download_info['file_size'] = info.get('file_size', 0)
download_info['file_type'] = info.get('file_type')
download_info['mime_type'] = info.get('mime_type')
download_completed.set()
self.logger.debug(f'[ClickWithDownload] Download completed: {download_info["file_name"]}')
# Get the downloads watchdog and register direct callbacks
downloads_watchdog = self.browser_session._downloads_watchdog
self.logger.debug(f'[ClickWithDownload] downloads_watchdog={downloads_watchdog is not None}')
if downloads_watchdog:
self.logger.debug('[ClickWithDownload] Registering download callbacks...')
downloads_watchdog.register_download_callbacks(
on_start=on_download_start,
on_progress=on_download_progress,
on_complete=on_download_complete,
)
else:
self.logger.warning('[ClickWithDownload] No downloads_watchdog available!')
try:
# Perform the click
click_metadata = await click_coro
# Check for validation errors - return them immediately without waiting for downloads
if isinstance(click_metadata, dict) and 'validation_error' in click_metadata:
return click_metadata
# Wait briefly to see if a download starts
try:
await asyncio.wait_for(download_started.wait(), timeout=download_start_timeout)
# Download started!
self.logger.info(f'📥 Download started: {download_info.get("suggested_filename", "unknown")}')
# Now wait for it to complete with longer timeout
try:
await asyncio.wait_for(download_completed.wait(), timeout=download_complete_timeout)
# Download completed successfully
msg = f'Downloaded file: {download_info["file_name"]} ({download_info["file_size"]} bytes) saved to {download_info["path"]}'
self.logger.info(f'💾 {msg}')
# Merge download info into click_metadata
if click_metadata is None:
click_metadata = {}
click_metadata['download'] = {
'path': download_info['path'],
'file_name': download_info['file_name'],
'file_size': download_info['file_size'],
'file_type': download_info.get('file_type'),
'mime_type': download_info.get('mime_type'),
}
except TimeoutError:
# Download timed out - check if it's still in progress
if click_metadata is None:
click_metadata = {}
filename = download_info.get('suggested_filename', 'unknown')
received = progress_info.get('received_bytes', 0)
total = progress_info.get('total_bytes', 0)
state = progress_info.get('state', 'unknown')
last_update = progress_info.get('last_update', 0.0)
time_since_update = time.time() - last_update if last_update > 0 else float('inf')
# Check if download is still actively progressing (received update in last 5 seconds)
is_still_active = time_since_update < 5.0 and state == 'inProgress'
if is_still_active:
# Download is still progressing - suggest waiting
if total > 0:
percent = (received / total) * 100
progress_str = f'{percent:.1f}% ({received:,}/{total:,} bytes)'
else:
progress_str = f'{received:,} bytes downloaded (total size unknown)'
msg = (
f'Download timed out after {download_complete_timeout}s but is still in progress: '
f'{filename} - {progress_str}. '
f'The download appears to be progressing normally. Consider using the wait action '
f'to allow more time for the download to complete.'
)
self.logger.warning(f'⏱️ {msg}')
click_metadata['download_in_progress'] = {
'file_name': filename,
'received_bytes': received,
'total_bytes': total,
'state': state,
'message': msg,
}
else:
# Download may be stalled or completed
if received > 0:
msg = (
f'Download timed out after {download_complete_timeout}s: {filename}. '
f'Last progress: {received:,} bytes received. '
f'The download may have stalled or completed - check the downloads folder.'
)
else:
msg = (
f'Download timed out after {download_complete_timeout}s: {filename}. '
f'No progress data received - the download may have failed to start properly.'
)
self.logger.warning(f'⏱️ {msg}')
click_metadata['download_timeout'] = {
'file_name': filename,
'received_bytes': received,
'total_bytes': total,
'message': msg,
}
except TimeoutError:
# No download started within grace period
pass
return click_metadata if isinstance(click_metadata, dict) else None
finally:
# Unregister download callbacks
if downloads_watchdog:
downloads_watchdog.unregister_download_callbacks(
on_start=on_download_start,
on_progress=on_download_progress,
on_complete=on_download_complete,
)
def _is_print_related_element(self, element_node: EnhancedDOMTreeNode) -> bool:
"""Check if an element is related to printing (print buttons, print dialogs, etc.).
Primary check: onclick attribute (most reliable for print detection)
Fallback: button text/value (for cases without onclick)
"""
# Primary: Check onclick attribute for print-related functions (most reliable)
onclick = element_node.attributes.get('onclick', '').lower() if element_node.attributes else ''
if onclick and 'print' in onclick:
# Matches: window.print(), PrintElem(), print(), etc.
return True
return False
async def _handle_print_button_click(self, element_node: EnhancedDOMTreeNode) -> dict | None:
"""Handle print button by directly generating PDF via CDP instead of opening dialog.
Returns:
Metadata dict with download path if successful, None otherwise
"""
try:
import base64
import os
from pathlib import Path
# Get CDP session
cdp_session = await self.browser_session.get_or_create_cdp_session(focus=True)
# Generate PDF using CDP Page.printToPDF
result = await asyncio.wait_for(
cdp_session.cdp_client.send.Page.printToPDF(
params={
'printBackground': True,
'preferCSSPageSize': True,
},
session_id=cdp_session.session_id,
),
timeout=15.0, # 15 second timeout for PDF generation
)
pdf_data = result.get('data')
if not pdf_data:
self.logger.warning('⚠️ PDF generation returned no data')
return None
# Decode base64 PDF data
pdf_bytes = base64.b64decode(pdf_data)
# Get downloads path
downloads_path = self.browser_session.browser_profile.downloads_path
if not downloads_path:
self.logger.warning('⚠️ No downloads path configured, cannot save PDF')
return None
# Generate filename from page title or URL
try:
page_title = await asyncio.wait_for(self.browser_session.get_current_page_title(), timeout=2.0)
# Sanitize title for filename
import re
safe_title = re.sub(r'[^\w\s-]', '', page_title)[:50] # Max 50 chars
filename = f'{safe_title}.pdf' if safe_title else 'print.pdf'
except Exception:
filename = 'print.pdf'
# Ensure downloads directory exists
downloads_dir = Path(downloads_path).expanduser().resolve()
downloads_dir.mkdir(parents=True, exist_ok=True)
# Generate unique filename if file exists
final_path = downloads_dir / filename
if final_path.exists():
base, ext = os.path.splitext(filename)
counter = 1
while (downloads_dir / f'{base} ({counter}){ext}').exists():
counter += 1
final_path = downloads_dir / f'{base} ({counter}){ext}'
# Write PDF to file
import anyio
async with await anyio.open_file(final_path, 'wb') as f:
await f.write(pdf_bytes)
file_size = final_path.stat().st_size
self.logger.info(f'✅ Generated PDF via CDP: {final_path} ({file_size:,} bytes)')
# Dispatch FileDownloadedEvent
from browser_use.browser.events import FileDownloadedEvent
page_url = await self.browser_session.get_current_page_url()
self.browser_session.event_bus.dispatch(
FileDownloadedEvent(
url=page_url,
path=str(final_path),
file_name=final_path.name,
file_size=file_size,
file_type='pdf',
mime_type='application/pdf',
auto_download=False, # This was intentional (user clicked print)
)
)
return {'pdf_generated': True, 'path': str(final_path)}
except TimeoutError:
self.logger.warning('⏱️ PDF generation timed out')
return None
except Exception as e:
self.logger.warning(f'⚠️ Failed to generate PDF via CDP: {type(e).__name__}: {e}')
return None
@observe_debug(ignore_input=True, ignore_output=True, name='click_element_event')
async def on_ClickElementEvent(self, event: ClickElementEvent) -> dict | None:
"""Handle click request with CDP. Automatically waits for file downloads if triggered."""
try:
# Check if session is alive before attempting any operations
if not self.browser_session.agent_focus_target_id:
error_msg = 'Cannot execute click: browser session is corrupted (target_id=None). Session may have crashed.'
self.logger.error(f'{error_msg}')
raise BrowserError(error_msg)
# Use the provided node
element_node = event.node
index_for_logging = element_node.backend_node_id or 'unknown'
# Check if element is a file input (should not be clicked)
if self.browser_session.is_file_input(element_node):
msg = f'Index {index_for_logging} - has an element which opens file upload dialog. To upload files please use a specific function to upload files'
self.logger.info(f'{msg}')
return {'validation_error': msg}
# Detect print-related elements and handle them specially
is_print_element = self._is_print_related_element(element_node)
if is_print_element:
self.logger.info(
f'🖨️ Detected print button (index {index_for_logging}), generating PDF directly instead of opening dialog...'
)
click_metadata = await self._handle_print_button_click(element_node)
if click_metadata and click_metadata.get('pdf_generated'):
msg = f'Generated PDF: {click_metadata.get("path")}'
self.logger.info(f'💾 {msg}')
return click_metadata
else:
self.logger.warning('⚠️ PDF generation failed, falling back to regular click')
# Execute click with automatic download detection
click_metadata = await self._execute_click_with_download_detection(self._click_element_node_impl(element_node))
# Check for validation errors
if isinstance(click_metadata, dict) and 'validation_error' in click_metadata:
self.logger.info(f'{click_metadata["validation_error"]}')
return click_metadata
# Build success message for non-download clicks
if 'download' not in (click_metadata or {}):
msg = f'Clicked button {element_node.node_name}: {element_node.get_all_children_text(max_depth=2)}'
self.logger.debug(f'🖱️ {msg}')
self.logger.debug(f'Element xpath: {element_node.xpath}')
return click_metadata
except Exception:
raise
async def on_ClickCoordinateEvent(self, event: ClickCoordinateEvent) -> dict | None:
"""Handle click at coordinates with CDP. Automatically waits for file downloads if triggered."""
try:
# Check if session is alive before attempting any operations
if not self.browser_session.agent_focus_target_id:
error_msg = 'Cannot execute click: browser session is corrupted (target_id=None). Session may have crashed.'
self.logger.error(f'{error_msg}')
raise BrowserError(error_msg)
# If force=True, skip safety checks and click directly (with download detection)
if event.force:
self.logger.debug(f'Force clicking at coordinates ({event.coordinate_x}, {event.coordinate_y})')
return await self._execute_click_with_download_detection(
self._click_on_coordinate(event.coordinate_x, event.coordinate_y, force=True)
)
# Get element at coordinates for safety checks
element_node = await self.browser_session.get_dom_element_at_coordinates(event.coordinate_x, event.coordinate_y)
if element_node is None:
# No element found, click directly (with download detection)
self.logger.debug(
f'No element found at coordinates ({event.coordinate_x}, {event.coordinate_y}), proceeding with click anyway'
)
return await self._execute_click_with_download_detection(
self._click_on_coordinate(event.coordinate_x, event.coordinate_y, force=False)
)
# Safety check: file input
if self.browser_session.is_file_input(element_node):
msg = f'Cannot click at ({event.coordinate_x}, {event.coordinate_y}) - element is a file input. To upload files please use upload_file action'
self.logger.info(f'{msg}')
return {'validation_error': msg}
# Safety check: select element
tag_name = element_node.tag_name.lower() if element_node.tag_name else ''
if tag_name == 'select':
msg = f'Cannot click at ({event.coordinate_x}, {event.coordinate_y}) - element is a <select>. Use dropdown_options action instead.'
self.logger.info(f'{msg}')
return {'validation_error': msg}
# Safety check: print-related elements
is_print_element = self._is_print_related_element(element_node)
if is_print_element:
self.logger.info(
f'🖨️ Detected print button at ({event.coordinate_x}, {event.coordinate_y}), generating PDF directly instead of opening dialog...'
)
click_metadata = await self._handle_print_button_click(element_node)
if click_metadata and click_metadata.get('pdf_generated'):
msg = f'Generated PDF: {click_metadata.get("path")}'
self.logger.info(f'💾 {msg}')
return click_metadata
else:
self.logger.warning('⚠️ PDF generation failed, falling back to regular click')
# All safety checks passed, click at coordinates (with download detection)
return await self._execute_click_with_download_detection(
self._click_on_coordinate(event.coordinate_x, event.coordinate_y, force=False)
)
except Exception:
raise
async def on_TypeTextEvent(self, event: TypeTextEvent) -> dict | None:
"""Handle text input request with CDP."""
try:
# Use the provided node
element_node = event.node
index_for_logging = element_node.backend_node_id or 'unknown'
# Check if this is index 0 or a falsy index - type to the page (whatever has focus)
if not element_node.backend_node_id or element_node.backend_node_id == 0:
# Type to the page without focusing any specific element
await self._type_to_page(event.text)
# Log with sensitive data protection
if event.is_sensitive:
if event.sensitive_key_name:
self.logger.info(f'⌨️ Typed <{event.sensitive_key_name}> to the page (current focus)')
else:
self.logger.info('⌨️ Typed <sensitive> to the page (current focus)')
else:
self.logger.info(f'⌨️ Typed "{event.text}" to the page (current focus)')
return None # No coordinates available for page typing
else:
try:
# Try to type to the specific element
input_metadata = await self._input_text_element_node_impl(
element_node,
event.text,
clear=event.clear or (not event.text),
is_sensitive=event.is_sensitive,
)
# Log with sensitive data protection
if event.is_sensitive:
if event.sensitive_key_name:
self.logger.info(f'⌨️ Typed <{event.sensitive_key_name}> into element with index {index_for_logging}')
else:
self.logger.info(f'⌨️ Typed <sensitive> into element with index {index_for_logging}')
else:
self.logger.info(f'⌨️ Typed "{event.text}" into element with index {index_for_logging}')
self.logger.debug(f'Element xpath: {element_node.xpath}')
return input_metadata # Return coordinates if available
except Exception as e:
# Element not found or error - fall back to typing to the page
self.logger.warning(f'Failed to type to element {index_for_logging}: {e}. Falling back to page typing.')
try:
await asyncio.wait_for(self._click_element_node_impl(element_node), timeout=10.0)
except Exception as e:
pass
await self._type_to_page(event.text)
# Log with sensitive data protection
if event.is_sensitive:
if event.sensitive_key_name:
self.logger.info(f'⌨️ Typed <{event.sensitive_key_name}> to the page as fallback')
else:
self.logger.info('⌨️ Typed <sensitive> to the page as fallback')
else:
self.logger.info(f'⌨️ Typed "{event.text}" to the page as fallback')
return None # No coordinates available for fallback typing
# Note: We don't clear cached state here - let multi_act handle DOM change detection
# by explicitly rebuilding and comparing when needed
except Exception as e:
raise
async def on_ScrollEvent(self, event: ScrollEvent) -> None:
"""Handle scroll request with CDP."""
# Check if we have a current target for scrolling
if not self.browser_session.agent_focus_target_id:
error_msg = 'No active target for scrolling'
raise BrowserError(error_msg)
try:
# Convert direction and amount to pixels
# Positive pixels = scroll down, negative = scroll up
pixels = event.amount if event.direction == 'down' else -event.amount
# Element-specific scrolling if node is provided
if event.node is not None:
element_node = event.node
index_for_logging = element_node.backend_node_id or 'unknown'
# Check if the element is an iframe
is_iframe = element_node.tag_name and element_node.tag_name.upper() == 'IFRAME'
# Try to scroll the element's container
success = await self._scroll_element_container(element_node, pixels)
if success:
self.logger.debug(
f'📜 Scrolled element {index_for_logging} container {event.direction} by {event.amount} pixels'
)
# For iframe scrolling, we need to force a full DOM refresh
# because the iframe's content has changed position
if is_iframe:
self.logger.debug('🔄 Forcing DOM refresh after iframe scroll')
# Note: We don't clear cached state here - let multi_act handle DOM change detection
# by explicitly rebuilding and comparing when needed
# Wait a bit for the scroll to settle and DOM to update
await asyncio.sleep(0.2)
return None
# Perform target-level scroll
await self._scroll_with_cdp_gesture(pixels)
# Note: We don't clear cached state here - let multi_act handle DOM change detection
# by explicitly rebuilding and comparing when needed
# Log success
self.logger.debug(f'📜 Scrolled {event.direction} by {event.amount} pixels')
return None
except Exception as e:
raise
# ========== Implementation Methods ==========
async def _check_element_occlusion(self, backend_node_id: int, x: float, y: float, cdp_session) -> bool:
"""Check if an element is occluded by other elements at the given coordinates.
Args:
backend_node_id: The backend node ID of the target element
x: X coordinate to check
y: Y coordinate to check
cdp_session: CDP session to use
Returns:
True if element is occluded, False if clickable
"""
try:
session_id = cdp_session.session_id
# Get target element info for comparison
target_result = await cdp_session.cdp_client.send.DOM.resolveNode(
params={'backendNodeId': backend_node_id}, session_id=session_id
)
if 'object' not in target_result:
self.logger.debug('Could not resolve target element, assuming occluded')
return True
object_id = target_result['object']['objectId']
# Get target element info
target_info_result = await cdp_session.cdp_client.send.Runtime.callFunctionOn(
params={
'objectId': object_id,
'functionDeclaration': """
function() {
const getElementInfo = (el) => {
return {
tagName: el.tagName,
id: el.id || '',
className: el.className || '',
textContent: (el.textContent || '').substring(0, 100)
};
};
const elementAtPoint = document.elementFromPoint(arguments[0], arguments[1]);
if (!elementAtPoint) {
return { targetInfo: getElementInfo(this), isClickable: false };
}
// Simple containment-based clickability logic
let isClickable = this === elementAtPoint ||
this.contains(elementAtPoint) ||
elementAtPoint.contains(this);
// Check label-input associations when containment check fails
if (!isClickable) {
const target = this;
const atPoint = elementAtPoint;
// Case 1: target is <input>, atPoint is its associated <label> (or child of that label)
if (target.tagName === 'INPUT' && target.id) {
const escapedId = CSS.escape(target.id);
const assocLabel = document.querySelector('label[for="' + escapedId + '"]');
if (assocLabel && (assocLabel === atPoint || assocLabel.contains(atPoint))) {
isClickable = true;
}
}
// Case 2: target is <input>, atPoint is inside a <label> ancestor that wraps the target
if (!isClickable && target.tagName === 'INPUT') {
let ancestor = atPoint;
for (let i = 0; i < 3 && ancestor; i++) {
if (ancestor.tagName === 'LABEL' && ancestor.contains(target)) {
isClickable = true;
break;
}
ancestor = ancestor.parentElement;
}
}
// Case 3: target is <label>, atPoint is the associated <input>
if (!isClickable && target.tagName === 'LABEL') {
if (target.htmlFor && atPoint.tagName === 'INPUT' && atPoint.id === target.htmlFor) {
isClickable = true;
}
// Also check if atPoint is an input inside the label
if (!isClickable && atPoint.tagName === 'INPUT' && target.contains(atPoint)) {
isClickable = true;
}
}
}
return {
targetInfo: getElementInfo(this),
elementAtPointInfo: getElementInfo(elementAtPoint),
isClickable: isClickable
};
}
""",
'arguments': [{'value': x}, {'value': y}],
'returnByValue': True,
},
session_id=session_id,
)
if 'result' not in target_info_result or 'value' not in target_info_result['result']:
self.logger.debug('Could not get target element info, assuming occluded')
return True
target_data = target_info_result['result']['value']
is_clickable = target_data.get('isClickable', False)
if is_clickable:
self.logger.debug('Element is clickable (target, contained, or semantically related)')
return False
else:
target_info = target_data.get('targetInfo', {})
element_at_point_info = target_data.get('elementAtPointInfo', {})
self.logger.debug(
f'Element is occluded. Target: {target_info.get("tagName", "unknown")} '
f'(id={target_info.get("id", "none")}), '
f'ElementAtPoint: {element_at_point_info.get("tagName", "unknown")} '
f'(id={element_at_point_info.get("id", "none")})'
)
return True
except Exception as e:
self.logger.debug(f'Occlusion check failed: {e}, assuming not occluded')
return False
async def _click_element_node_impl(self, element_node) -> dict | None:
"""
Click an element using pure CDP with multiple fallback methods for getting element geometry.
Args:
element_node: The DOM element to click
"""
try:
# Check if element is a file input or select dropdown - these should not be clicked
tag_name = element_node.tag_name.lower() if element_node.tag_name else ''
element_type = element_node.attributes.get('type', '').lower() if element_node.attributes else ''
if tag_name == 'select':
msg = f'Cannot click on <select> elements. Use dropdown_options(index={element_node.backend_node_id}) action instead.'
# Return error dict instead of raising to avoid ERROR logs
return {'validation_error': msg}
if tag_name == 'input' and element_type == 'file':
msg = f'Cannot click on file input element (index={element_node.backend_node_id}). File uploads must be handled using upload_file_to_element action.'
# Return error dict instead of raising to avoid ERROR logs
return {'validation_error': msg}
# Get CDP client
cdp_session = await self.browser_session.cdp_client_for_node(element_node)
# Get the correct session ID for the element's frame
session_id = cdp_session.session_id
# Get element bounds
backend_node_id = element_node.backend_node_id
# Get viewport dimensions for visibility checks
layout_metrics = await cdp_session.cdp_client.send.Page.getLayoutMetrics(session_id=session_id)
viewport_width = layout_metrics['layoutViewport']['clientWidth']
viewport_height = layout_metrics['layoutViewport']['clientHeight']
# Scroll element into view FIRST before getting coordinates
try:
await cdp_session.cdp_client.send.DOM.scrollIntoViewIfNeeded(
params={'backendNodeId': backend_node_id}, session_id=session_id
)
await asyncio.sleep(0.05) # Wait for scroll to complete
self.logger.debug('Scrolled element into view before getting coordinates')
except Exception as e:
self.logger.debug(f'Failed to scroll element into view: {e}')
# Get element coordinates using the unified method AFTER scrolling
element_rect = await self.browser_session.get_element_coordinates(backend_node_id, cdp_session)
# Convert rect to quads format if we got coordinates
quads = []
if element_rect:
# Convert DOMRect to quad format
x, y, w, h = element_rect.x, element_rect.y, element_rect.width, element_rect.height
quads = [
[
x,
y, # top-left
x + w,
y, # top-right
x + w,
y + h, # bottom-right
x,
y + h, # bottom-left
]
]
self.logger.debug(
f'Got coordinates from unified method: {element_rect.x}, {element_rect.y}, {element_rect.width}x{element_rect.height}'
)
# If we still don't have quads, fall back to JS click
if not quads:
self.logger.warning('Could not get element geometry from any method, falling back to JavaScript click')
try:
result = await cdp_session.cdp_client.send.DOM.resolveNode(
params={'backendNodeId': backend_node_id},
session_id=session_id,
)
assert 'object' in result and 'objectId' in result['object'], (
'Failed to find DOM element based on backendNodeId, maybe page content changed?'
)
object_id = result['object']['objectId']
await cdp_session.cdp_client.send.Runtime.callFunctionOn(
params={
'functionDeclaration': 'function() { this.click(); }',
'objectId': object_id,
},
session_id=session_id,
)
await asyncio.sleep(0.05)
# Navigation is handled by BrowserSession via events
return None
except Exception as js_e:
self.logger.warning(f'CDP JavaScript click also failed: {js_e}')
if 'No node with given id found' in str(js_e):
raise Exception('Element with given id not found')
else:
raise Exception(f'Failed to click element: {js_e}')
# Find the largest visible quad within the viewport
best_quad = None
best_area = 0
for quad in quads:
if len(quad) < 8:
continue
# Calculate quad bounds
xs = [quad[i] for i in range(0, 8, 2)]
ys = [quad[i] for i in range(1, 8, 2)]
min_x, max_x = min(xs), max(xs)
min_y, max_y = min(ys), max(ys)
# Check if quad intersects with viewport
if max_x < 0 or max_y < 0 or min_x > viewport_width or min_y > viewport_height:
continue # Quad is completely outside viewport
# Calculate visible area (intersection with viewport)
visible_min_x = max(0, min_x)
visible_max_x = min(viewport_width, max_x)
visible_min_y = max(0, min_y)
visible_max_y = min(viewport_height, max_y)
visible_width = visible_max_x - visible_min_x
visible_height = visible_max_y - visible_min_y
visible_area = visible_width * visible_height
if visible_area > best_area:
best_area = visible_area
best_quad = quad
if not best_quad:
# No visible quad found, use the first quad anyway
best_quad = quads[0]
self.logger.warning('No visible quad found, using first quad')
# Calculate center point of the best quad
center_x = sum(best_quad[i] for i in range(0, 8, 2)) / 4
center_y = sum(best_quad[i] for i in range(1, 8, 2)) / 4
# Ensure click point is within viewport bounds
center_x = max(0, min(viewport_width - 1, center_x))
center_y = max(0, min(viewport_height - 1, center_y))
# Check for occlusion before attempting CDP click
is_occluded = await self._check_element_occlusion(backend_node_id, center_x, center_y, cdp_session)
if is_occluded:
self.logger.debug('🚫 Element is occluded, falling back to JavaScript click')
try:
result = await cdp_session.cdp_client.send.DOM.resolveNode(
params={'backendNodeId': backend_node_id},
session_id=session_id,
)
assert 'object' in result and 'objectId' in result['object'], (
'Failed to find DOM element based on backendNodeId'
)
object_id = result['object']['objectId']
await cdp_session.cdp_client.send.Runtime.callFunctionOn(
params={
'functionDeclaration': 'function() { this.click(); }',
'objectId': object_id,
},
session_id=session_id,
)
await asyncio.sleep(0.05)
return None
except Exception as js_e:
self.logger.error(f'JavaScript click fallback failed: {js_e}')
raise Exception(f'Failed to click occluded element: {js_e}')
# Perform the click using CDP (element is not occluded)
try:
self.logger.debug(f'👆 Dragging mouse over element before clicking x: {center_x}px y: {center_y}px ...')
# Move mouse to element
await cdp_session.cdp_client.send.Input.dispatchMouseEvent(
params={
'type': 'mouseMoved',
'x': center_x,
'y': center_y,
},
session_id=session_id,
)
await asyncio.sleep(0.05)
# Mouse down
self.logger.debug(f'👆🏾 Clicking x: {center_x}px y: {center_y}px ...')
try:
await asyncio.wait_for(
cdp_session.cdp_client.send.Input.dispatchMouseEvent(
params={
'type': 'mousePressed',
'x': center_x,
'y': center_y,
'button': 'left',
'clickCount': 1,
},
session_id=session_id,
),
timeout=3.0, # 3 second timeout for mousePressed
)
await asyncio.sleep(0.08)
except TimeoutError:
self.logger.debug('⏱️ Mouse down timed out (likely due to dialog), continuing...')
# Don't sleep if we timed out
# Mouse up
try:
await asyncio.wait_for(
cdp_session.cdp_client.send.Input.dispatchMouseEvent(
params={
'type': 'mouseReleased',
'x': center_x,
'y': center_y,
'button': 'left',
'clickCount': 1,
},
session_id=session_id,
),
timeout=5.0, # 5 second timeout for mouseReleased
)
except TimeoutError:
self.logger.debug('⏱️ Mouse up timed out (possibly due to lag or dialog popup), continuing...')
self.logger.debug('🖱️ Clicked successfully using x,y coordinates')
# Return coordinates as dict for metadata
return {'click_x': center_x, 'click_y': center_y}
except Exception as e:
self.logger.warning(f'CDP click failed: {type(e).__name__}: {e}')
# Fall back to JavaScript click via CDP
try:
result = await cdp_session.cdp_client.send.DOM.resolveNode(
params={'backendNodeId': backend_node_id},
session_id=session_id,
)
assert 'object' in result and 'objectId' in result['object'], (
'Failed to find DOM element based on backendNodeId, maybe page content changed?'
)
object_id = result['object']['objectId']
await cdp_session.cdp_client.send.Runtime.callFunctionOn(
params={
'functionDeclaration': 'function() { this.click(); }',
'objectId': object_id,
},
session_id=session_id,
)
# Small delay for dialog dismissal
await asyncio.sleep(0.1)
return None
except Exception as js_e:
self.logger.warning(f'CDP JavaScript click also failed: {js_e}')
raise Exception(f'Failed to click element: {e}')
finally:
# Always re-focus back to original top-level page session context in case click opened a new tab/popup/window/dialog/etc.
# Use timeout to prevent hanging if dialog is blocking
try:
cdp_session = await asyncio.wait_for(self.browser_session.get_or_create_cdp_session(focus=True), timeout=3.0)
await asyncio.wait_for(
cdp_session.cdp_client.send.Runtime.runIfWaitingForDebugger(session_id=cdp_session.session_id),
timeout=2.0,
)
except TimeoutError:
self.logger.debug('⏱️ Refocus after click timed out (page may be blocked by dialog). Continuing...')
except Exception as e:
self.logger.debug(f'⚠️ Refocus error (non-critical): {type(e).__name__}: {e}')
except URLNotAllowedError as e:
raise e
except BrowserError as e:
raise e
except Exception as e:
# Extract key element info for error message
element_info = f'<{element_node.tag_name or "unknown"}'
if element_node.backend_node_id:
element_info += f' index={element_node.backend_node_id}'
element_info += '>'
# Create helpful error message based on context
error_detail = f'Failed to click element {element_info}. The element may not be interactable or visible.'
# Add hint if element has index (common in code-use mode)
if element_node.backend_node_id:
error_detail += f' If the page changed after navigation/interaction, the index [{element_node.backend_node_id}] may be stale. Get fresh browser state before retrying.'
raise BrowserError(
message=f'Failed to click element: {str(e)}',
long_term_memory=error_detail,
)
async def _click_on_coordinate(self, coordinate_x: int, coordinate_y: int, force: bool = False) -> dict | None:
"""
Click directly at coordinates using CDP Input.dispatchMouseEvent.
Args:
coordinate_x: X coordinate in viewport
coordinate_y: Y coordinate in viewport
force: If True, skip all safety checks (used when force=True in event)
Returns:
Dict with click coordinates or None
"""
try:
# Get CDP session
cdp_session = await self.browser_session.get_or_create_cdp_session()
session_id = cdp_session.session_id
self.logger.debug(f'👆 Moving mouse to ({coordinate_x}, {coordinate_y})...')
# Move mouse to coordinates
await cdp_session.cdp_client.send.Input.dispatchMouseEvent(
params={
'type': 'mouseMoved',
'x': coordinate_x,
'y': coordinate_y,
},
session_id=session_id,
)
await asyncio.sleep(0.05)
# Mouse down
self.logger.debug(f'👆🏾 Clicking at ({coordinate_x}, {coordinate_y})...')
try:
await asyncio.wait_for(
cdp_session.cdp_client.send.Input.dispatchMouseEvent(
params={
'type': 'mousePressed',
'x': coordinate_x,
'y': coordinate_y,
'button': 'left',
'clickCount': 1,
},
session_id=session_id,
),
timeout=3.0,
)
await asyncio.sleep(0.05)
except TimeoutError:
self.logger.debug('⏱️ Mouse down timed out (likely due to dialog), continuing...')
# Mouse up
try:
await asyncio.wait_for(
cdp_session.cdp_client.send.Input.dispatchMouseEvent(
params={
'type': 'mouseReleased',
'x': coordinate_x,
'y': coordinate_y,
'button': 'left',
'clickCount': 1,
},
session_id=session_id,
),
timeout=5.0,
)
except TimeoutError:
self.logger.debug('⏱️ Mouse up timed out (possibly due to lag or dialog popup), continuing...')
self.logger.debug(f'🖱️ Clicked successfully at ({coordinate_x}, {coordinate_y})')
# Return coordinates as metadata
return {'click_x': coordinate_x, 'click_y': coordinate_y}
except Exception as e:
self.logger.error(f'Failed to click at coordinates ({coordinate_x}, {coordinate_y}): {type(e).__name__}: {e}')
raise BrowserError(
message=f'Failed to click at coordinates: {e}',
long_term_memory=f'Failed to click at coordinates ({coordinate_x}, {coordinate_y}). The coordinates may be outside viewport or the page may have changed.',
)
async def _type_to_page(self, text: str):
"""
Type text to the page (whatever element currently has focus).
This is used when index is 0 or when an element can't be found.
"""
try:
# Get CDP client and session
cdp_session = await self.browser_session.get_or_create_cdp_session(target_id=None, focus=True)
# Type the text character by character to the focused element
for char in text:
# Handle newline characters as Enter key
if char == '\n':
# Send proper Enter key sequence
await cdp_session.cdp_client.send.Input.dispatchKeyEvent(
params={
'type': 'keyDown',
'key': 'Enter',
'code': 'Enter',
'windowsVirtualKeyCode': 13,
},
session_id=cdp_session.session_id,
)
# Send char event with carriage return
await cdp_session.cdp_client.send.Input.dispatchKeyEvent(
params={
'type': 'char',
'text': '\r',
},
session_id=cdp_session.session_id,
)
# Send keyup
await cdp_session.cdp_client.send.Input.dispatchKeyEvent(
params={
'type': 'keyUp',
'key': 'Enter',
'code': 'Enter',
'windowsVirtualKeyCode': 13,
},
session_id=cdp_session.session_id,
)
else:
# Handle regular characters
# Send keydown
await cdp_session.cdp_client.send.Input.dispatchKeyEvent(
params={
'type': 'keyDown',
'key': char,
},
session_id=cdp_session.session_id,
)
# Send char for actual text input
await cdp_session.cdp_client.send.Input.dispatchKeyEvent(
params={
'type': 'char',
'text': char,
},
session_id=cdp_session.session_id,
)
# Send keyup
await cdp_session.cdp_client.send.Input.dispatchKeyEvent(
params={
'type': 'keyUp',
'key': char,
},
session_id=cdp_session.session_id,
)
# Add 10ms delay between keystrokes
await asyncio.sleep(0.010)
except Exception as e:
raise Exception(f'Failed to type to page: {str(e)}')
def _get_char_modifiers_and_vk(self, char: str) -> tuple[int, int, str]:
"""Get modifiers, virtual key code, and base key for a character.
Returns:
(modifiers, windowsVirtualKeyCode, base_key)
"""
# Characters that require Shift modifier
shift_chars = {
'!': ('1', 49),
'@': ('2', 50),
'#': ('3', 51),
'$': ('4', 52),
'%': ('5', 53),
'^': ('6', 54),
'&': ('7', 55),
'*': ('8', 56),
'(': ('9', 57),
')': ('0', 48),
'_': ('-', 189),
'+': ('=', 187),
'{': ('[', 219),
'}': (']', 221),
'|': ('\\', 220),
':': (';', 186),
'"': ("'", 222),
'<': (',', 188),
'>': ('.', 190),
'?': ('/', 191),
'~': ('`', 192),
}
# Check if character requires Shift
if char in shift_chars:
base_key, vk_code = shift_chars[char]
return (8, vk_code, base_key) # Shift=8
# Uppercase letters require Shift
if char.isupper():
return (8, ord(char), char.lower()) # Shift=8
# Lowercase letters
if char.islower():
return (0, ord(char.upper()), char)
# Numbers
if char.isdigit():
return (0, ord(char), char)
# Special characters without Shift
no_shift_chars = {
' ': 32,
'-': 189,
'=': 187,
'[': 219,
']': 221,
'\\': 220,
';': 186,
"'": 222,
',': 188,
'.': 190,
'/': 191,
'`': 192,
}
if char in no_shift_chars:
return (0, no_shift_chars[char], char)
# Fallback
return (0, ord(char.upper()) if char.isalpha() else ord(char), char)
def _get_key_code_for_char(self, char: str) -> str:
"""Get the proper key code for a character (like Playwright does)."""
# Key code mapping for common characters (using proper base keys + modifiers)
key_codes = {
' ': 'Space',
'.': 'Period',
',': 'Comma',
'-': 'Minus',
'_': 'Minus', # Underscore uses Minus with Shift
'@': 'Digit2', # @ uses Digit2 with Shift
'!': 'Digit1', # ! uses Digit1 with Shift (not 'Exclamation')
'?': 'Slash', # ? uses Slash with Shift
':': 'Semicolon', # : uses Semicolon with Shift
';': 'Semicolon',
'(': 'Digit9', # ( uses Digit9 with Shift
')': 'Digit0', # ) uses Digit0 with Shift
'[': 'BracketLeft',
']': 'BracketRight',
'{': 'BracketLeft', # { uses BracketLeft with Shift
'}': 'BracketRight', # } uses BracketRight with Shift
'/': 'Slash',
'\\': 'Backslash',
'=': 'Equal',
'+': 'Equal', # + uses Equal with Shift
'*': 'Digit8', # * uses Digit8 with Shift
'&': 'Digit7', # & uses Digit7 with Shift
'%': 'Digit5', # % uses Digit5 with Shift
'$': 'Digit4', # $ uses Digit4 with Shift
'#': 'Digit3', # # uses Digit3 with Shift
'^': 'Digit6', # ^ uses Digit6 with Shift
'~': 'Backquote', # ~ uses Backquote with Shift
'`': 'Backquote',
"'": 'Quote',
'"': 'Quote', # " uses Quote with Shift
}
# Numbers
if char.isdigit():
return f'Digit{char}'
# Letters
if char.isalpha():
return f'Key{char.upper()}'
# Special characters
if char in key_codes:
return key_codes[char]
# Fallback for unknown characters
return f'Key{char.upper()}'
async def _clear_text_field(self, object_id: str, cdp_session) -> bool:
"""Clear text field using multiple strategies, starting with the most reliable."""
try:
# Strategy 1: Direct JavaScript value/content setting (handles both inputs and contenteditable)
self.logger.debug('🧹 Clearing text field using JavaScript value setting')
clear_result = await cdp_session.cdp_client.send.Runtime.callFunctionOn(
params={
'functionDeclaration': """
function() {
// Check if it's a contenteditable element
const hasContentEditable = this.getAttribute('contenteditable') === 'true' ||
this.getAttribute('contenteditable') === '' ||
this.isContentEditable === true;
if (hasContentEditable) {
// For contenteditable elements, clear all content
while (this.firstChild) {
this.removeChild(this.firstChild);
}
this.textContent = "";
this.innerHTML = "";
// Focus and position cursor at the beginning
this.focus();
const selection = window.getSelection();
const range = document.createRange();
range.setStart(this, 0);
range.setEnd(this, 0);
selection.removeAllRanges();
selection.addRange(range);
// Dispatch events
this.dispatchEvent(new Event("input", { bubbles: true }));
this.dispatchEvent(new Event("change", { bubbles: true }));
return {cleared: true, method: 'contenteditable', finalText: this.textContent};
} else if (this.value !== undefined) {
// For regular inputs with value property
try {
this.select();
} catch (e) {
// ignore
}
this.value = "";
this.dispatchEvent(new Event("input", { bubbles: true }));
this.dispatchEvent(new Event("change", { bubbles: true }));
return {cleared: true, method: 'value', finalText: this.value};
} else {
return {cleared: false, method: 'none', error: 'Not a supported input type'};
}
}
""",
'objectId': object_id,
'returnByValue': True,
},
session_id=cdp_session.session_id,
)
# Check the clear result
clear_info = clear_result.get('result', {}).get('value', {})
self.logger.debug(f'Clear result: {clear_info}')
if clear_info.get('cleared'):
final_text = clear_info.get('finalText', '')
if not final_text or not final_text.strip():
self.logger.debug(f'✅ Text field cleared successfully using {clear_info.get("method")}')
return True
else:
self.logger.debug(f'⚠️ JavaScript clear partially failed, field still contains: "{final_text}"')
return False
else:
self.logger.debug(f'❌ JavaScript clear failed: {clear_info.get("error", "Unknown error")}')
return False
except Exception as e:
self.logger.debug(f'JavaScript clear failed with exception: {e}')
return False
# Strategy 2: Triple-click + Delete (fallback for stubborn fields)
try:
self.logger.debug('🧹 Fallback: Clearing using triple-click + Delete')
# Get element center coordinates for triple-click
bounds_result = await cdp_session.cdp_client.send.Runtime.callFunctionOn(
params={
'functionDeclaration': 'function() { return this.getBoundingClientRect(); }',
'objectId': object_id,
'returnByValue': True,
},
session_id=cdp_session.session_id,
)
if bounds_result.get('result', {}).get('value'):
bounds = bounds_result['result']['value']
center_x = bounds['x'] + bounds['width'] / 2
center_y = bounds['y'] + bounds['height'] / 2
# Triple-click to select all text
await cdp_session.cdp_client.send.Input.dispatchMouseEvent(
params={
'type': 'mousePressed',
'x': center_x,
'y': center_y,
'button': 'left',
'clickCount': 3,
},
session_id=cdp_session.session_id,
)
await cdp_session.cdp_client.send.Input.dispatchMouseEvent(
params={
'type': 'mouseReleased',
'x': center_x,
'y': center_y,
'button': 'left',
'clickCount': 3,
},
session_id=cdp_session.session_id,
)
# Delete selected text
await cdp_session.cdp_client.send.Input.dispatchKeyEvent(
params={
'type': 'keyDown',
'key': 'Delete',
'code': 'Delete',
},
session_id=cdp_session.session_id,
)
await cdp_session.cdp_client.send.Input.dispatchKeyEvent(
params={
'type': 'keyUp',
'key': 'Delete',
'code': 'Delete',
},
session_id=cdp_session.session_id,
)
self.logger.debug('✅ Text field cleared using triple-click + Delete')
return True
except Exception as e:
self.logger.debug(f'Triple-click clear failed: {e}')
# Strategy 3: Keyboard shortcuts (last resort)
try:
import platform
is_macos = platform.system() == 'Darwin'
select_all_modifier = 4 if is_macos else 2 # Meta=4 (Cmd), Ctrl=2
modifier_name = 'Cmd' if is_macos else 'Ctrl'
self.logger.debug(f'🧹 Last resort: Clearing using {modifier_name}+A + Backspace')
# Select all text (Ctrl/Cmd+A)
await cdp_session.cdp_client.send.Input.dispatchKeyEvent(
params={
'type': 'keyDown',
'key': 'a',
'code': 'KeyA',
'modifiers': select_all_modifier,
},
session_id=cdp_session.session_id,
)
await cdp_session.cdp_client.send.Input.dispatchKeyEvent(
params={
'type': 'keyUp',
'key': 'a',
'code': 'KeyA',
'modifiers': select_all_modifier,
},
session_id=cdp_session.session_id,
)
# Delete selected text (Backspace)
await cdp_session.cdp_client.send.Input.dispatchKeyEvent(
params={
'type': 'keyDown',
'key': 'Backspace',
'code': 'Backspace',
},
session_id=cdp_session.session_id,
)
await cdp_session.cdp_client.send.Input.dispatchKeyEvent(
params={
'type': 'keyUp',
'key': 'Backspace',
'code': 'Backspace',
},
session_id=cdp_session.session_id,
)
self.logger.debug('✅ Text field cleared using keyboard shortcuts')
return True
except Exception as e:
self.logger.debug(f'All clearing strategies failed: {e}')
return False
async def _focus_element_simple(
self, backend_node_id: int, object_id: str, cdp_session, input_coordinates: dict | None = None
) -> bool:
"""Simple focus strategy: CDP first, then click if failed."""
# Strategy 1: Try CDP DOM.focus first
try:
result = await cdp_session.cdp_client.send.DOM.focus(
params={'backendNodeId': backend_node_id},
session_id=cdp_session.session_id,
)
self.logger.debug(f'Element focused using CDP DOM.focus (result: {result})')
return True
except Exception as e:
self.logger.debug(f'❌ CDP DOM.focus threw exception: {type(e).__name__}: {e}')
# Strategy 2: Try click to focus if CDP failed
if input_coordinates and 'input_x' in input_coordinates and 'input_y' in input_coordinates:
try:
click_x = input_coordinates['input_x']
click_y = input_coordinates['input_y']
self.logger.debug(f'🎯 Attempting click-to-focus at ({click_x:.1f}, {click_y:.1f})')
# Click to focus
await cdp_session.cdp_client.send.Input.dispatchMouseEvent(
params={
'type': 'mousePressed',
'x': click_x,
'y': click_y,
'button': 'left',
'clickCount': 1,
},
session_id=cdp_session.session_id,
)
await cdp_session.cdp_client.send.Input.dispatchMouseEvent(
params={
'type': 'mouseReleased',
'x': click_x,
'y': click_y,
'button': 'left',
'clickCount': 1,
},
session_id=cdp_session.session_id,
)
self.logger.debug('✅ Element focused using click method')
return True
except Exception as e:
self.logger.debug(f'Click focus failed: {e}')
# Both strategies failed
self.logger.debug('Focus strategies failed, will attempt typing anyway')
return False
def _requires_direct_value_assignment(self, element_node: EnhancedDOMTreeNode) -> bool:
"""
Check if an element requires direct value assignment instead of character-by-character typing.
Certain input types have compound components, custom plugins, or special requirements
that make character-by-character typing unreliable. These need direct .value assignment:
Native HTML5:
- date, time, datetime-local: Have spinbutton components (ISO format required)
- month, week: Similar compound structure
- color: Expects hex format #RRGGBB
- range: Needs numeric value within min/max
jQuery/Bootstrap Datepickers:
- Detected by class names or data attributes
- Often expect specific date formats (MM/DD/YYYY, DD/MM/YYYY, etc.)
Note: We use direct assignment because:
1. Typing triggers intermediate validation that might reject partial values
2. Compound components (like date spinbuttons) don't work with sequential typing
3. It's much faster and more reliable
4. We dispatch proper input/change events afterward to trigger listeners
"""
if not element_node.tag_name or not element_node.attributes:
return False
tag_name = element_node.tag_name.lower()
# Check for native HTML5 inputs that need direct assignment
if tag_name == 'input':
input_type = element_node.attributes.get('type', '').lower()
# Native HTML5 inputs with compound components or strict formats
if input_type in {'date', 'time', 'datetime-local', 'month', 'week', 'color', 'range'}:
return True
# Detect jQuery/Bootstrap datepickers (text inputs with datepicker plugins)
if input_type in {'text', ''}:
# Check for common datepicker indicators
class_attr = element_node.attributes.get('class', '').lower()
if any(
indicator in class_attr
for indicator in ['datepicker', 'daterangepicker', 'datetimepicker', 'bootstrap-datepicker']
):
return True
# Check for data attributes indicating datepickers
if any(attr in element_node.attributes for attr in ['data-datepicker', 'data-date-format', 'data-provide']):
return True
return False
async def _set_value_directly(self, element_node: EnhancedDOMTreeNode, text: str, object_id: str, cdp_session) -> None:
"""
Set element value directly using JavaScript for inputs that don't support typing.
This is used for:
- Date/time inputs where character-by-character typing doesn't work
- jQuery datepickers that need direct value assignment
- Color/range inputs that need specific formats
- Any input with custom plugins that intercept typing
After setting the value, we dispatch comprehensive events to ensure all frameworks
and plugins recognize the change (React, Vue, Angular, jQuery, etc.)
"""
try:
# Set the value using JavaScript with comprehensive event dispatching
# callFunctionOn expects a function body (not a self-invoking function)
set_value_js = f"""
function() {{
// Store old value for comparison
const oldValue = this.value;
// REACT-COMPATIBLE VALUE SETTING:
// React uses Object.getOwnPropertyDescriptor to track input changes
// We need to use the native setter to bypass React's tracking and then trigger events
const nativeInputValueSetter = Object.getOwnPropertyDescriptor(
window.HTMLInputElement.prototype,
'value'
).set;
// Set the value using the native setter (bypasses React's control)
nativeInputValueSetter.call(this, {json.dumps(text)});
// Dispatch comprehensive events to ensure all frameworks detect the change
// Order matters: focus -> input -> change -> blur (mimics user interaction)
// 1. Focus event (in case element isn't focused)
this.dispatchEvent(new FocusEvent('focus', {{ bubbles: true }}));
// 2. Input event (CRITICAL for React onChange)
// React listens to 'input' events on the document and checks for value changes
const inputEvent = new Event('input', {{ bubbles: true, cancelable: true }});
this.dispatchEvent(inputEvent);
// 3. Change event (for form handling, traditional listeners)
const changeEvent = new Event('change', {{ bubbles: true, cancelable: true }});
this.dispatchEvent(changeEvent);
// 4. Blur event (triggers final validation in some libraries)
this.dispatchEvent(new FocusEvent('blur', {{ bubbles: true }}));
// 5. jQuery-specific events (if jQuery is present)
if (typeof jQuery !== 'undefined' && jQuery.fn) {{
try {{
jQuery(this).trigger('change');
// Trigger datepicker-specific events if it's a datepicker
if (jQuery(this).data('datepicker')) {{
jQuery(this).datepicker('update');
}}
}} catch (e) {{
// jQuery not available or error, continue anyway
}}
}}
return this.value;
}}
"""
result = await cdp_session.cdp_client.send.Runtime.callFunctionOn(
params={
'objectId': object_id,
'functionDeclaration': set_value_js,
'returnByValue': True,
},
session_id=cdp_session.session_id,
)
# Verify the value was set correctly
if 'result' in result and 'value' in result['result']:
actual_value = result['result']['value']
self.logger.debug(f'✅ Value set directly to: "{actual_value}"')
else:
self.logger.warning('⚠️ Could not verify value was set correctly')
except Exception as e:
self.logger.error(f'❌ Failed to set value directly: {e}')
raise
async def _input_text_element_node_impl(
self, element_node: EnhancedDOMTreeNode, text: str, clear: bool = True, is_sensitive: bool = False
) -> dict | None:
"""
Input text into an element using pure CDP with improved focus fallbacks.
For date/time inputs, uses direct value assignment instead of typing.
"""
try:
# Get CDP client
cdp_client = self.browser_session.cdp_client
# Get the correct session ID for the element's iframe
# session_id = await self._get_session_id_for_element(element_node)
# cdp_session = await self.browser_session.get_or_create_cdp_session(target_id=element_node.target_id, focus=True)
cdp_session = await self.browser_session.cdp_client_for_node(element_node)
# Get element info
backend_node_id = element_node.backend_node_id
# Track coordinates for metadata
input_coordinates = None
# Scroll element into view
try:
await cdp_session.cdp_client.send.DOM.scrollIntoViewIfNeeded(
params={'backendNodeId': backend_node_id}, session_id=cdp_session.session_id
)
await asyncio.sleep(0.01)
except Exception as e:
# Node detached errors are common with shadow DOM and dynamic content
# The element can still be interacted with even if scrolling fails
error_str = str(e)
if 'Node is detached from document' in error_str or 'detached from document' in error_str:
self.logger.debug(
f'Element node temporarily detached during scroll (common with shadow DOM), continuing: {element_node}'
)
else:
self.logger.debug(f'Failed to scroll element {element_node} into view before typing: {type(e).__name__}: {e}')
# Get object ID for the element
result = await cdp_client.send.DOM.resolveNode(
params={'backendNodeId': backend_node_id},
session_id=cdp_session.session_id,
)
assert 'object' in result and 'objectId' in result['object'], (
'Failed to find DOM element based on backendNodeId, maybe page content changed?'
)
object_id = result['object']['objectId']
# Get current coordinates using unified method
coords = await self.browser_session.get_element_coordinates(backend_node_id, cdp_session)
if coords:
center_x = coords.x + coords.width / 2
center_y = coords.y + coords.height / 2
# Check for occlusion before using coordinates for focus
is_occluded = await self._check_element_occlusion(backend_node_id, center_x, center_y, cdp_session)
if is_occluded:
self.logger.debug('🚫 Input element is occluded, skipping coordinate-based focus')
input_coordinates = None # Force fallback to CDP-only focus
else:
input_coordinates = {'input_x': center_x, 'input_y': center_y}
self.logger.debug(f'Using unified coordinates: x={center_x:.1f}, y={center_y:.1f}')
else:
input_coordinates = None
self.logger.debug('No coordinates found for element')
# Ensure we have a valid object_id before proceeding
if not object_id:
raise ValueError('Could not get object_id for element')
# Step 1: Focus the element using simple strategy
focused_successfully = await self._focus_element_simple(
backend_node_id=backend_node_id, object_id=object_id, cdp_session=cdp_session, input_coordinates=input_coordinates
)
# Step 2: Check if this element requires direct value assignment (date/time inputs)
requires_direct_assignment = self._requires_direct_value_assignment(element_node)
if requires_direct_assignment:
# Date/time inputs: use direct value assignment instead of typing
self.logger.debug(
f'🎯 Element type={element_node.attributes.get("type")} requires direct value assignment, setting value directly'
)
await self._set_value_directly(element_node, text, object_id, cdp_session)
# Return input coordinates for metadata
return input_coordinates
# Step 3: Clear existing text if requested (only for regular inputs that support typing)
if clear:
cleared_successfully = await self._clear_text_field(object_id=object_id, cdp_session=cdp_session)
if not cleared_successfully:
self.logger.warning('⚠️ Text field clearing failed, typing may append to existing text')
# Step 4: Type the text character by character using proper human-like key events
# This emulates exactly how a human would type, which modern websites expect
if is_sensitive:
# Note: sensitive_key_name is not passed to this low-level method,
# but we could extend the signature if needed for more granular logging
self.logger.debug('🎯 Typing <sensitive> character by character')
else:
self.logger.debug(f'🎯 Typing text character by character: "{text}"')
# Detect contenteditable elements (may have leaf-start bug where first char is dropped)
_attrs = element_node.attributes or {}
_is_contenteditable = _attrs.get('contenteditable') in ('true', '') or (
_attrs.get('role') == 'textbox' and element_node.tag_name not in ('input', 'textarea')
)
# For contenteditable: after typing first char, check if dropped and retype if needed
_check_first_char = _is_contenteditable and len(text) > 0 and clear
_first_char = text[0] if _check_first_char else None
for i, char in enumerate(text):
# Handle newline characters as Enter key
if char == '\n':
# Send proper Enter key sequence
await cdp_session.cdp_client.send.Input.dispatchKeyEvent(
params={
'type': 'keyDown',
'key': 'Enter',
'code': 'Enter',
'windowsVirtualKeyCode': 13,
},
session_id=cdp_session.session_id,
)
# Small delay to emulate human typing speed
await asyncio.sleep(0.001)
# Send char event with carriage return
await cdp_session.cdp_client.send.Input.dispatchKeyEvent(
params={
'type': 'char',
'text': '\r',
'key': 'Enter',
},
session_id=cdp_session.session_id,
)
# Send keyUp event
await cdp_session.cdp_client.send.Input.dispatchKeyEvent(
params={
'type': 'keyUp',
'key': 'Enter',
'code': 'Enter',
'windowsVirtualKeyCode': 13,
},
session_id=cdp_session.session_id,
)
else:
# Handle regular characters
# Get proper modifiers, VK code, and base key for the character
modifiers, vk_code, base_key = self._get_char_modifiers_and_vk(char)
key_code = self._get_key_code_for_char(base_key)
# self.logger.debug(f'🎯 Typing character {i + 1}/{len(text)}: "{char}" (base_key: {base_key}, code: {key_code}, modifiers: {modifiers}, vk: {vk_code})')
# Step 1: Send keyDown event (NO text parameter)
await cdp_session.cdp_client.send.Input.dispatchKeyEvent(
params={
'type': 'keyDown',
'key': base_key,
'code': key_code,
'modifiers': modifiers,
'windowsVirtualKeyCode': vk_code,
},
session_id=cdp_session.session_id,
)
# Small delay to emulate human typing speed
await asyncio.sleep(0.005)
# Step 2: Send char event (WITH text parameter) - this is crucial for text input
await cdp_session.cdp_client.send.Input.dispatchKeyEvent(
params={
'type': 'char',
'text': char,
'key': char,
},
session_id=cdp_session.session_id,
)
# Step 3: Send keyUp event (NO text parameter)
await cdp_session.cdp_client.send.Input.dispatchKeyEvent(
params={
'type': 'keyUp',
'key': base_key,
'code': key_code,
'modifiers': modifiers,
'windowsVirtualKeyCode': vk_code,
},
session_id=cdp_session.session_id,
)
# After first char on contenteditable: check if dropped and retype if needed
if i == 0 and _check_first_char and _first_char:
check_result = await cdp_session.cdp_client.send.Runtime.evaluate(
params={'expression': 'document.activeElement.textContent'},
session_id=cdp_session.session_id,
)
content = check_result.get('result', {}).get('value', '')
if _first_char not in content:
self.logger.debug(f'🎯 First char "{_first_char}" was dropped (leaf-start bug), retyping')
# Retype the first character - cursor now past leaf-start
modifiers, vk_code, base_key = self._get_char_modifiers_and_vk(_first_char)
key_code = self._get_key_code_for_char(base_key)
await cdp_session.cdp_client.send.Input.dispatchKeyEvent(
params={
'type': 'keyDown',
'key': base_key,
'code': key_code,
'modifiers': modifiers,
'windowsVirtualKeyCode': vk_code,
},
session_id=cdp_session.session_id,
)
await asyncio.sleep(0.005)
await cdp_session.cdp_client.send.Input.dispatchKeyEvent(
params={'type': 'char', 'text': _first_char, 'key': _first_char},
session_id=cdp_session.session_id,
)
await cdp_session.cdp_client.send.Input.dispatchKeyEvent(
params={
'type': 'keyUp',
'key': base_key,
'code': key_code,
'modifiers': modifiers,
'windowsVirtualKeyCode': vk_code,
},
session_id=cdp_session.session_id,
)
# Small delay between characters to look human (realistic typing speed)
await asyncio.sleep(0.001)
# Step 4: Trigger framework-aware DOM events after typing completion
# Modern JavaScript frameworks (React, Vue, Angular) rely on these events
# to update their internal state and trigger re-renders
await self._trigger_framework_events(object_id=object_id, cdp_session=cdp_session)
# Step 5: Read back actual value for verification (skip for sensitive data)
if not is_sensitive:
try:
await asyncio.sleep(0.05) # let autocomplete/formatter JS settle
readback_result = await cdp_session.cdp_client.send.Runtime.callFunctionOn(
params={
'objectId': object_id,
'functionDeclaration': 'function() { return this.value !== undefined ? this.value : this.textContent; }',
'returnByValue': True,
},
session_id=cdp_session.session_id,
)
actual_value = readback_result.get('result', {}).get('value')
if actual_value is not None:
if input_coordinates is None:
input_coordinates = {}
input_coordinates['actual_value'] = actual_value
except Exception as e:
self.logger.debug(f'Value readback failed (non-critical): {e}')
# Step 6: Auto-retry on concatenation mismatch (only when clear was requested)
# If we asked to clear but the readback value contains the typed text as a substring
# yet is longer, the field had pre-existing text that wasn't cleared. Set directly.
if clear and not is_sensitive and input_coordinates and 'actual_value' in input_coordinates:
actual_value = input_coordinates['actual_value']
if (
isinstance(actual_value, str)
and actual_value != text
and len(actual_value) > len(text)
and (actual_value.endswith(text) or actual_value.startswith(text))
):
self.logger.info(f'🔄 Concatenation detected: got "{actual_value}", expected "{text}" — auto-retrying')
try:
# Clear + set value via native setter in one JS call (works with React/Vue)
retry_result = await cdp_session.cdp_client.send.Runtime.callFunctionOn(
params={
'objectId': object_id,
'functionDeclaration': """
function(newValue) {
if (this.value !== undefined) {
var desc = Object.getOwnPropertyDescriptor(
HTMLInputElement.prototype, 'value'
) || Object.getOwnPropertyDescriptor(
HTMLTextAreaElement.prototype, 'value'
);
if (desc && desc.set) {
desc.set.call(this, newValue);
} else {
this.value = newValue;
}
} else if (this.isContentEditable) {
this.textContent = newValue;
}
this.dispatchEvent(new Event('input', { bubbles: true }));
this.dispatchEvent(new Event('change', { bubbles: true }));
return this.value !== undefined ? this.value : this.textContent;
}
""",
'arguments': [{'value': text}],
'returnByValue': True,
},
session_id=cdp_session.session_id,
)
retry_value = retry_result.get('result', {}).get('value')
if retry_value is not None:
input_coordinates['actual_value'] = retry_value
if retry_value == text:
self.logger.info('✅ Auto-retry fixed concatenation')
else:
self.logger.warning(f'⚠️ Auto-retry value still differs: "{retry_value}"')
except Exception as e:
self.logger.debug(f'Auto-retry failed (non-critical): {e}')
# Return coordinates metadata if available
return input_coordinates
except Exception as e:
self.logger.error(f'Failed to input text via CDP: {type(e).__name__}: {e}')
raise BrowserError(f'Failed to input text into element: {repr(element_node)}')
async def _trigger_framework_events(self, object_id: str, cdp_session) -> None:
"""
Trigger framework-aware DOM events after text input completion.
This is critical for modern JavaScript frameworks (React, Vue, Angular, etc.)
that rely on DOM events to update their internal state and trigger re-renders.
Args:
object_id: CDP object ID of the input element
cdp_session: CDP session for the element's context
"""
try:
# Execute JavaScript to trigger comprehensive event sequence
framework_events_script = """
function() {
// Find the target element (available as 'this' when using objectId)
const element = this;
if (!element) return false;
// Ensure element is focused
element.focus();
// Comprehensive event sequence for maximum framework compatibility
const events = [
// Input event - primary event for React controlled components
{ type: 'input', bubbles: true, cancelable: true },
// Change event - important for form validation and Vue v-model
{ type: 'change', bubbles: true, cancelable: true },
// Blur event - triggers validation in many frameworks
{ type: 'blur', bubbles: true, cancelable: true }
];
let success = true;
events.forEach(eventConfig => {
try {
const event = new Event(eventConfig.type, {
bubbles: eventConfig.bubbles,
cancelable: eventConfig.cancelable
});
// Special handling for InputEvent (more specific than Event)
if (eventConfig.type === 'input') {
const inputEvent = new InputEvent('input', {
bubbles: true,
cancelable: true,
data: element.value,
inputType: 'insertText'
});
element.dispatchEvent(inputEvent);
} else {
element.dispatchEvent(event);
}
} catch (e) {
success = false;
console.warn('Framework event dispatch failed:', eventConfig.type, e);
}
});
// Special React synthetic event handling
// React uses internal fiber properties for event system
if (element._reactInternalFiber || element._reactInternalInstance || element.__reactInternalInstance) {
try {
// Trigger React's synthetic event system
const syntheticInputEvent = new InputEvent('input', {
bubbles: true,
cancelable: true,
data: element.value
});
// Force React to process this as a synthetic event
Object.defineProperty(syntheticInputEvent, 'isTrusted', { value: true });
element.dispatchEvent(syntheticInputEvent);
} catch (e) {
console.warn('React synthetic event failed:', e);
}
}
// Special Vue reactivity trigger
// Vue uses __vueParentComponent or __vue__ for component access
if (element.__vue__ || element._vnode || element.__vueParentComponent) {
try {
// Vue often needs explicit input event with proper timing
const vueEvent = new Event('input', { bubbles: true });
setTimeout(() => element.dispatchEvent(vueEvent), 0);
} catch (e) {
console.warn('Vue reactivity trigger failed:', e);
}
}
return success;
}
"""
# Execute the framework events script
result = await cdp_session.cdp_client.send.Runtime.callFunctionOn(
params={
'objectId': object_id,
'functionDeclaration': framework_events_script,
'returnByValue': True,
},
session_id=cdp_session.session_id,
)
success = result.get('result', {}).get('value', False)
if success:
self.logger.debug('✅ Framework events triggered successfully')
else:
self.logger.warning('⚠️ Failed to trigger framework events')
except Exception as e:
self.logger.warning(f'⚠️ Failed to trigger framework events: {type(e).__name__}: {e}')
# Don't raise - framework events are a best-effort enhancement
async def _scroll_with_cdp_gesture(self, pixels: int) -> bool:
"""
Scroll using CDP Input.synthesizeScrollGesture to simulate realistic scroll gesture.
Args:
pixels: Number of pixels to scroll (positive = down, negative = up)
Returns:
True if successful, False if failed
"""
try:
# Get focused CDP session using public API (validates and waits for recovery if needed)
cdp_session = await self.browser_session.get_or_create_cdp_session()
cdp_client = cdp_session.cdp_client
session_id = cdp_session.session_id
# Get viewport dimensions from cached value if available
if self.browser_session._original_viewport_size:
viewport_width, viewport_height = self.browser_session._original_viewport_size
else:
# Fallback: query layout metrics
layout_metrics = await cdp_client.send.Page.getLayoutMetrics(session_id=session_id)
viewport_width = layout_metrics['layoutViewport']['clientWidth']
viewport_height = layout_metrics['layoutViewport']['clientHeight']
# Calculate center of viewport
center_x = viewport_width / 2
center_y = viewport_height / 2
# For scroll gesture, positive yDistance scrolls up, negative scrolls down
# (opposite of mouseWheel deltaY convention)
y_distance = -pixels
# Synthesize scroll gesture - use very high speed for near-instant scrolling
await cdp_client.send.Input.synthesizeScrollGesture(
params={
'x': center_x,
'y': center_y,
'xDistance': 0,
'yDistance': y_distance,
'speed': 50000, # pixels per second (high = near-instant scroll)
},
session_id=session_id,
)
self.logger.debug(f'📄 Scrolled via CDP gesture: {pixels}px')
return True
except Exception as e:
# Not critical - JavaScript fallback will handle scrolling
self.logger.debug(f'CDP gesture scroll failed ({type(e).__name__}: {e}), falling back to JS')
return False
async def _scroll_element_container(self, element_node, pixels: int) -> bool:
"""Try to scroll an element's container using CDP."""
try:
cdp_session = await self.browser_session.cdp_client_for_node(element_node)
# Check if this is an iframe - if so, scroll its content directly
if element_node.tag_name and element_node.tag_name.upper() == 'IFRAME':
# For iframes, we need to scroll the content document, not the iframe element itself
# Use JavaScript to directly scroll the iframe's content
backend_node_id = element_node.backend_node_id
# Resolve the node to get an object ID
result = await cdp_session.cdp_client.send.DOM.resolveNode(
params={'backendNodeId': backend_node_id},
session_id=cdp_session.session_id,
)
if 'object' in result and 'objectId' in result['object']:
object_id = result['object']['objectId']
# Scroll the iframe's content directly
scroll_result = await cdp_session.cdp_client.send.Runtime.callFunctionOn(
params={
'functionDeclaration': f"""
function() {{
try {{
const doc = this.contentDocument || this.contentWindow.document;
if (doc) {{
const scrollElement = doc.documentElement || doc.body;
if (scrollElement) {{
const oldScrollTop = scrollElement.scrollTop;
scrollElement.scrollTop += {pixels};
const newScrollTop = scrollElement.scrollTop;
return {{
success: true,
oldScrollTop: oldScrollTop,
newScrollTop: newScrollTop,
scrolled: newScrollTop - oldScrollTop
}};
}}
}}
return {{success: false, error: 'Could not access iframe content'}};
}} catch (e) {{
return {{success: false, error: e.toString()}};
}}
}}
""",
'objectId': object_id,
'returnByValue': True,
},
session_id=cdp_session.session_id,
)
if scroll_result and 'result' in scroll_result and 'value' in scroll_result['result']:
result_value = scroll_result['result']['value']
if result_value.get('success'):
self.logger.debug(f'Successfully scrolled iframe content by {result_value.get("scrolled", 0)}px')
return True
else:
self.logger.debug(f'Failed to scroll iframe: {result_value.get("error", "Unknown error")}')
# For non-iframe elements, use the standard mouse wheel approach
# Get element bounds to know where to scroll
backend_node_id = element_node.backend_node_id
box_model = await cdp_session.cdp_client.send.DOM.getBoxModel(
params={'backendNodeId': backend_node_id}, session_id=cdp_session.session_id
)
content_quad = box_model['model']['content']
# Calculate center point
center_x = (content_quad[0] + content_quad[2] + content_quad[4] + content_quad[6]) / 4
center_y = (content_quad[1] + content_quad[3] + content_quad[5] + content_quad[7]) / 4
# Dispatch mouse wheel event at element location
await cdp_session.cdp_client.send.Input.dispatchMouseEvent(
params={
'type': 'mouseWheel',
'x': center_x,
'y': center_y,
'deltaX': 0,
'deltaY': pixels,
},
session_id=cdp_session.session_id,
)
return True
except Exception as e:
self.logger.debug(f'Failed to scroll element container via CDP: {e}')
return False
async def _get_session_id_for_element(self, element_node: EnhancedDOMTreeNode) -> str | None:
"""Get the appropriate CDP session ID for an element based on its frame."""
if element_node.frame_id:
# Element is in an iframe, need to get session for that frame
try:
all_targets = self.browser_session.session_manager.get_all_targets()
# Find the target for this frame
for target_id, target in all_targets.items():
if target.target_type == 'iframe' and element_node.frame_id in str(target_id):
# Create temporary session for iframe target without switching focus
temp_session = await self.browser_session.get_or_create_cdp_session(target_id, focus=False)
return temp_session.session_id
# If frame not found in targets, use main target session
self.logger.debug(f'Frame {element_node.frame_id} not found in targets, using main session')
except Exception as e:
self.logger.debug(f'Error getting frame session: {e}, using main session')
# Use main target session - get_or_create_cdp_session validates focus automatically
cdp_session = await self.browser_session.get_or_create_cdp_session()
return cdp_session.session_id
async def on_GoBackEvent(self, event: GoBackEvent) -> None:
"""Handle navigate back request with CDP."""
cdp_session = await self.browser_session.get_or_create_cdp_session()
try:
# Get CDP client and session
# Get navigation history
history = await cdp_session.cdp_client.send.Page.getNavigationHistory(session_id=cdp_session.session_id)
current_index = history['currentIndex']
entries = history['entries']
# Check if we can go back
if current_index <= 0:
self.logger.warning('⚠️ Cannot go back - no previous entry in history')
return
# Navigate to the previous entry
previous_entry_id = entries[current_index - 1]['id']
await cdp_session.cdp_client.send.Page.navigateToHistoryEntry(
params={'entryId': previous_entry_id}, session_id=cdp_session.session_id
)
# Wait for navigation
await asyncio.sleep(0.5)
# Navigation is handled by BrowserSession via events
self.logger.info(f'🔙 Navigated back to {entries[current_index - 1]["url"]}')
except Exception as e:
raise
async def on_GoForwardEvent(self, event: GoForwardEvent) -> None:
"""Handle navigate forward request with CDP."""
cdp_session = await self.browser_session.get_or_create_cdp_session()
try:
# Get navigation history
history = await cdp_session.cdp_client.send.Page.getNavigationHistory(session_id=cdp_session.session_id)
current_index = history['currentIndex']
entries = history['entries']
# Check if we can go forward
if current_index >= len(entries) - 1:
self.logger.warning('⚠️ Cannot go forward - no next entry in history')
return
# Navigate to the next entry
next_entry_id = entries[current_index + 1]['id']
await cdp_session.cdp_client.send.Page.navigateToHistoryEntry(
params={'entryId': next_entry_id}, session_id=cdp_session.session_id
)
# Wait for navigation
await asyncio.sleep(0.5)
# Navigation is handled by BrowserSession via events
self.logger.info(f'🔜 Navigated forward to {entries[current_index + 1]["url"]}')
except Exception as e:
raise
async def on_RefreshEvent(self, event: RefreshEvent) -> None:
"""Handle target refresh request with CDP."""
cdp_session = await self.browser_session.get_or_create_cdp_session()
try:
# Reload the target
await cdp_session.cdp_client.send.Page.reload(session_id=cdp_session.session_id)
# Wait for reload
await asyncio.sleep(1.0)
# Note: We don't clear cached state here - let the next state fetch rebuild as needed
# Navigation is handled by BrowserSession via events
self.logger.info('🔄 Target refreshed')
except Exception as e:
raise
@observe_debug(ignore_input=True, ignore_output=True, name='wait_event_handler')
async def on_WaitEvent(self, event: WaitEvent) -> None:
"""Handle wait request."""
try:
# Cap wait time at maximum
actual_seconds = min(max(event.seconds, 0), event.max_seconds)
if actual_seconds != event.seconds:
self.logger.info(f'🕒 Waiting for {actual_seconds} seconds (capped from {event.seconds}s)')
else:
self.logger.info(f'🕒 Waiting for {actual_seconds} seconds')
await asyncio.sleep(actual_seconds)
except Exception as e:
raise
async def _dispatch_key_event(self, cdp_session, event_type: str, key: str, modifiers: int = 0) -> None:
"""Helper to dispatch a keyboard event with proper key codes."""
code, vk_code = get_key_info(key)
params: DispatchKeyEventParameters = {
'type': event_type,
'key': key,
'code': code,
}
if modifiers:
params['modifiers'] = modifiers
if vk_code is not None:
params['windowsVirtualKeyCode'] = vk_code
await cdp_session.cdp_client.send.Input.dispatchKeyEvent(params=params, session_id=cdp_session.session_id)
async def on_SendKeysEvent(self, event: SendKeysEvent) -> None:
"""Handle send keys request with CDP."""
cdp_session = await self.browser_session.get_or_create_cdp_session(focus=True)
try:
# Normalize key names from common aliases
key_aliases = {
'ctrl': 'Control',
'control': 'Control',
'alt': 'Alt',
'option': 'Alt',
'meta': 'Meta',
'cmd': 'Meta',
'command': 'Meta',
'shift': 'Shift',
'enter': 'Enter',
'return': 'Enter',
'tab': 'Tab',
'delete': 'Delete',
'backspace': 'Backspace',
'escape': 'Escape',
'esc': 'Escape',
'space': ' ',
'up': 'ArrowUp',
'down': 'ArrowDown',
'left': 'ArrowLeft',
'right': 'ArrowRight',
'pageup': 'PageUp',
'pagedown': 'PageDown',
'home': 'Home',
'end': 'End',
}
# Parse and normalize the key string
keys = event.keys
if '+' in keys:
# Handle key combinations like "ctrl+a"
parts = keys.split('+')
normalized_parts = []
for part in parts:
part_lower = part.strip().lower()
normalized = key_aliases.get(part_lower, part)
normalized_parts.append(normalized)
normalized_keys = '+'.join(normalized_parts)
else:
# Single key
keys_lower = keys.strip().lower()
normalized_keys = key_aliases.get(keys_lower, keys)
# Handle key combinations like "Control+A"
if '+' in normalized_keys:
parts = normalized_keys.split('+')
modifiers = parts[:-1]
main_key = parts[-1]
# Calculate modifier bitmask
modifier_value = 0
modifier_map = {'Alt': 1, 'Control': 2, 'Meta': 4, 'Shift': 8}
for mod in modifiers:
modifier_value |= modifier_map.get(mod, 0)
# Press modifier keys
for mod in modifiers:
await self._dispatch_key_event(cdp_session, 'keyDown', mod)
# Press main key with modifiers bitmask
await self._dispatch_key_event(cdp_session, 'keyDown', main_key, modifier_value)
await self._dispatch_key_event(cdp_session, 'keyUp', main_key, modifier_value)
# Release modifier keys
for mod in reversed(modifiers):
await self._dispatch_key_event(cdp_session, 'keyUp', mod)
else:
# Check if this is a text string or special key
special_keys = {
'Enter',
'Tab',
'Delete',
'Backspace',
'Escape',
'ArrowUp',
'ArrowDown',
'ArrowLeft',
'ArrowRight',
'PageUp',
'PageDown',
'Home',
'End',
'Control',
'Alt',
'Meta',
'Shift',
'F1',
'F2',
'F3',
'F4',
'F5',
'F6',
'F7',
'F8',
'F9',
'F10',
'F11',
'F12',
}
# If it's a special key, use original logic
if normalized_keys in special_keys:
await self._dispatch_key_event(cdp_session, 'keyDown', normalized_keys)
# For Enter key, also dispatch a char event to trigger keypress listeners
if normalized_keys == 'Enter':
await cdp_session.cdp_client.send.Input.dispatchKeyEvent(
params={
'type': 'char',
'text': '\r',
'key': 'Enter',
},
session_id=cdp_session.session_id,
)
await self._dispatch_key_event(cdp_session, 'keyUp', normalized_keys)
else:
# It's text (single character or string) - send each character as text input
# This is crucial for text to appear in focused input fields
for char in normalized_keys:
# Special-case newline characters to dispatch as Enter
if char in ('\n', '\r'):
await cdp_session.cdp_client.send.Input.dispatchKeyEvent(
params={
'type': 'rawKeyDown',
'windowsVirtualKeyCode': 13,
'unmodifiedText': '\r',
'text': '\r',
},
session_id=cdp_session.session_id,
)
await cdp_session.cdp_client.send.Input.dispatchKeyEvent(
params={
'type': 'char',
'windowsVirtualKeyCode': 13,
'unmodifiedText': '\r',
'text': '\r',
},
session_id=cdp_session.session_id,
)
await cdp_session.cdp_client.send.Input.dispatchKeyEvent(
params={
'type': 'keyUp',
'windowsVirtualKeyCode': 13,
'unmodifiedText': '\r',
'text': '\r',
},
session_id=cdp_session.session_id,
)
continue
# Get proper modifiers and key info for the character
modifiers, vk_code, base_key = self._get_char_modifiers_and_vk(char)
key_code = self._get_key_code_for_char(base_key)
# Send keyDown
await cdp_session.cdp_client.send.Input.dispatchKeyEvent(
params={
'type': 'keyDown',
'key': base_key,
'code': key_code,
'modifiers': modifiers,
'windowsVirtualKeyCode': vk_code,
},
session_id=cdp_session.session_id,
)
# Send char event with text - this is what makes text appear in input fields
await cdp_session.cdp_client.send.Input.dispatchKeyEvent(
params={
'type': 'char',
'text': char,
'key': char,
},
session_id=cdp_session.session_id,
)
# Send keyUp
await cdp_session.cdp_client.send.Input.dispatchKeyEvent(
params={
'type': 'keyUp',
'key': base_key,
'code': key_code,
'modifiers': modifiers,
'windowsVirtualKeyCode': vk_code,
},
session_id=cdp_session.session_id,
)
# Small delay between characters (10ms)
await asyncio.sleep(0.010)
self.logger.info(f'⌨️ Sent keys: {event.keys}')
# Note: We don't clear cached state on Enter; multi_act will detect DOM changes
# and rebuild explicitly. We still wait briefly for potential navigation.
if 'enter' in event.keys.lower() or 'return' in event.keys.lower():
await asyncio.sleep(0.1)
except Exception as e:
raise
async def on_UploadFileEvent(self, event: UploadFileEvent) -> None:
"""Handle file upload request with CDP."""
try:
# Use the provided node
element_node = event.node
index_for_logging = element_node.backend_node_id or 'unknown'
# Check if it's a file input
if not self.browser_session.is_file_input(element_node):
msg = f'Upload failed - element {index_for_logging} is not a file input.'
raise BrowserError(message=msg, long_term_memory=msg)
# Get CDP client and session
cdp_client = self.browser_session.cdp_client
session_id = await self._get_session_id_for_element(element_node)
# Validate file before upload
if os.path.exists(event.file_path):
file_size = os.path.getsize(event.file_path)
if file_size == 0:
msg = f'Upload failed - file {event.file_path} is empty (0 bytes).'
raise BrowserError(message=msg, long_term_memory=msg)
self.logger.debug(f'📎 File {event.file_path} validated ({file_size} bytes)')
# Set file(s) to upload
backend_node_id = element_node.backend_node_id
await cdp_client.send.DOM.setFileInputFiles(
params={
'files': [event.file_path],
'backendNodeId': backend_node_id,
},
session_id=session_id,
)
self.logger.info(f'📎 Uploaded file {event.file_path} to element {index_for_logging}')
except Exception as e:
raise
async def on_ScrollToTextEvent(self, event: ScrollToTextEvent) -> None:
"""Handle scroll to text request with CDP. Raises exception if text not found."""
# TODO: handle looking for text inside cross-origin iframes as well
# Get focused CDP session using public API (validates and waits for recovery if needed)
cdp_session = await self.browser_session.get_or_create_cdp_session()
cdp_client = cdp_session.cdp_client
session_id = cdp_session.session_id
# Enable DOM
await cdp_client.send.DOM.enable(session_id=session_id)
# Get document
doc = await cdp_client.send.DOM.getDocument(params={'depth': -1}, session_id=session_id)
root_node_id = doc['root']['nodeId']
# Search for text using XPath
search_queries = [
f'//*[contains(text(), "{event.text}")]',
f'//*[contains(., "{event.text}")]',
f'//*[@*[contains(., "{event.text}")]]',
]
found = False
for query in search_queries:
try:
# Perform search
search_result = await cdp_client.send.DOM.performSearch(params={'query': query}, session_id=session_id)
search_id = search_result['searchId']
result_count = search_result['resultCount']
if result_count > 0:
# Get the first match
node_ids = await cdp_client.send.DOM.getSearchResults(
params={'searchId': search_id, 'fromIndex': 0, 'toIndex': 1},
session_id=session_id,
)
if node_ids['nodeIds']:
node_id = node_ids['nodeIds'][0]
# Scroll the element into view
await cdp_client.send.DOM.scrollIntoViewIfNeeded(params={'nodeId': node_id}, session_id=session_id)
found = True
self.logger.debug(f'📜 Scrolled to text: "{event.text}"')
break
# Clean up search
await cdp_client.send.DOM.discardSearchResults(params={'searchId': search_id}, session_id=session_id)
except Exception as e:
self.logger.debug(f'Search query failed: {query}, error: {e}')
continue
if not found:
# Fallback: Try JavaScript search
js_result = await cdp_client.send.Runtime.evaluate(
params={
'expression': f'''
(() => {{
const walker = document.createTreeWalker(
document.body,
NodeFilter.SHOW_TEXT,
null,
false
);
let node;
while (node = walker.nextNode()) {{
if (node.textContent.includes("{event.text}")) {{
node.parentElement.scrollIntoView({{behavior: 'smooth', block: 'center'}});
return true;
}}
}}
return false;
}})()
'''
},
session_id=session_id,
)
if js_result.get('result', {}).get('value'):
self.logger.debug(f'📜 Scrolled to text: "{event.text}" (via JS)')
return None
else:
self.logger.warning(f'⚠️ Text not found: "{event.text}"')
raise BrowserError(f'Text not found: "{event.text}"', details={'text': event.text})
# If we got here and found is True, return None (success)
if found:
return None
else:
raise BrowserError(f'Text not found: "{event.text}"', details={'text': event.text})
async def on_GetDropdownOptionsEvent(self, event: GetDropdownOptionsEvent) -> dict[str, str]:
"""Handle get dropdown options request with CDP."""
try:
# Use the provided node
element_node = event.node
index_for_logging = element_node.backend_node_id or 'unknown'
# Get CDP session for this node
cdp_session = await self.browser_session.cdp_client_for_node(element_node)
# Convert node to object ID for CDP operations
try:
object_result = await cdp_session.cdp_client.send.DOM.resolveNode(
params={'backendNodeId': element_node.backend_node_id}, session_id=cdp_session.session_id
)
remote_object = object_result.get('object', {})
object_id = remote_object.get('objectId')
if not object_id:
raise ValueError('Could not get object ID from resolved node')
except Exception as e:
raise ValueError(f'Failed to resolve node to object: {e}') from e
# Check if this is an ARIA combobox that needs expansion
# ARIA comboboxes have options in a separate element referenced by aria-controls
check_combobox_script = """
function() {
const element = this;
const role = element.getAttribute('role');
const ariaControls = element.getAttribute('aria-controls');
const ariaExpanded = element.getAttribute('aria-expanded');
if (role === 'combobox' && ariaControls) {
return {
isCombobox: true,
ariaControls: ariaControls,
isExpanded: ariaExpanded === 'true',
tagName: element.tagName.toLowerCase()
};
}
return { isCombobox: false };
}
"""
combobox_check = await cdp_session.cdp_client.send.Runtime.callFunctionOn(
params={
'functionDeclaration': check_combobox_script,
'objectId': object_id,
'returnByValue': True,
},
session_id=cdp_session.session_id,
)
combobox_info = combobox_check.get('result', {}).get('value', {})
# If it's an ARIA combobox with aria-controls, handle it specially
if combobox_info.get('isCombobox'):
return await self._handle_aria_combobox_options(cdp_session, object_id, combobox_info, index_for_logging)
# Use JavaScript to extract dropdown options (existing logic for non-combobox elements)
options_script = """
function() {
const startElement = this;
// Function to check if an element is a dropdown and extract options
function checkDropdownElement(element) {
// Check if it's a native select element
if (element.tagName.toLowerCase() === 'select') {
return {
type: 'select',
options: Array.from(element.options).map((opt, idx) => ({
text: opt.text.trim(),
value: opt.value,
index: idx,
selected: opt.selected
})),
id: element.id || '',
name: element.name || '',
source: 'target'
};
}
// Check if it's an ARIA dropdown/menu (not combobox - handled separately)
const role = element.getAttribute('role');
if (role === 'menu' || role === 'listbox') {
// Find all menu items/options
const menuItems = element.querySelectorAll('[role="menuitem"], [role="option"]');
const options = [];
menuItems.forEach((item, idx) => {
const text = item.textContent ? item.textContent.trim() : '';
if (text) {
options.push({
text: text,
value: item.getAttribute('data-value') || text,
index: idx,
selected: item.getAttribute('aria-selected') === 'true' || item.classList.contains('selected')
});
}
});
return {
type: 'aria',
options: options,
id: element.id || '',
name: element.getAttribute('aria-label') || '',
source: 'target'
};
}
// Check if it's a Semantic UI dropdown or similar
if (element.classList.contains('dropdown') || element.classList.contains('ui')) {
const menuItems = element.querySelectorAll('.item, .option, [data-value]');
const options = [];
menuItems.forEach((item, idx) => {
const text = item.textContent ? item.textContent.trim() : '';
if (text) {
options.push({
text: text,
value: item.getAttribute('data-value') || text,
index: idx,
selected: item.classList.contains('selected') || item.classList.contains('active')
});
}
});
if (options.length > 0) {
return {
type: 'custom',
options: options,
id: element.id || '',
name: element.getAttribute('aria-label') || '',
source: 'target'
};
}
}
return null;
}
// Function to recursively search children up to specified depth
function searchChildrenForDropdowns(element, maxDepth, currentDepth = 0) {
if (currentDepth >= maxDepth) return null;
// Check all direct children
for (let child of element.children) {
// Check if this child is a dropdown
const result = checkDropdownElement(child);
if (result) {
result.source = `child-depth-${currentDepth + 1}`;
return result;
}
// Recursively check this child's children
const childResult = searchChildrenForDropdowns(child, maxDepth, currentDepth + 1);
if (childResult) {
return childResult;
}
}
return null;
}
// First check the target element itself
let dropdownResult = checkDropdownElement(startElement);
if (dropdownResult) {
return dropdownResult;
}
// If target element is not a dropdown, search children up to depth 4
dropdownResult = searchChildrenForDropdowns(startElement, 4);
if (dropdownResult) {
return dropdownResult;
}
return {
error: `Element and its children (depth 4) are not recognizable dropdown types (tag: ${startElement.tagName}, role: ${startElement.getAttribute('role')}, classes: ${startElement.className})`
};
}
"""
result = await cdp_session.cdp_client.send.Runtime.callFunctionOn(
params={
'functionDeclaration': options_script,
'objectId': object_id,
'returnByValue': True,
},
session_id=cdp_session.session_id,
)
dropdown_data = result.get('result', {}).get('value', {})
if dropdown_data.get('error'):
raise BrowserError(message=dropdown_data['error'], long_term_memory=dropdown_data['error'])
if not dropdown_data.get('options'):
msg = f'No options found in dropdown at index {index_for_logging}'
return {
'error': msg,
'short_term_memory': msg,
'long_term_memory': msg,
'backend_node_id': str(index_for_logging),
}
# Format options for display
formatted_options = []
for opt in dropdown_data['options']:
# Use JSON encoding to ensure exact string matching
encoded_text = json.dumps(opt['text'])
status = ' (selected)' if opt.get('selected') else ''
formatted_options.append(f'{opt["index"]}: text={encoded_text}, value={json.dumps(opt["value"])}{status}')
dropdown_type = dropdown_data.get('type', 'select')
element_info = f'Index: {index_for_logging}, Type: {dropdown_type}, ID: {dropdown_data.get("id", "none")}, Name: {dropdown_data.get("name", "none")}'
source_info = dropdown_data.get('source', 'unknown')
if source_info == 'target':
msg = f'Found {dropdown_type} dropdown ({element_info}):\n' + '\n'.join(formatted_options)
else:
msg = f'Found {dropdown_type} dropdown in {source_info} ({element_info}):\n' + '\n'.join(formatted_options)
msg += (
f'\n\nUse the exact text or value string (without quotes) in select_dropdown(index={index_for_logging}, text=...)'
)
if source_info == 'target':
self.logger.info(f'📋 Found {len(dropdown_data["options"])} dropdown options for index {index_for_logging}')
else:
self.logger.info(
f'📋 Found {len(dropdown_data["options"])} dropdown options for index {index_for_logging} in {source_info}'
)
# Create structured memory for the response
short_term_memory = msg
long_term_memory = f'Got dropdown options for index {index_for_logging}'
# Return the dropdown data as a dict with structured memory
return {
'type': dropdown_type,
'options': json.dumps(dropdown_data['options']), # Convert list to JSON string for dict[str, str] type
'element_info': element_info,
'source': source_info,
'formatted_options': '\n'.join(formatted_options),
'message': msg,
'short_term_memory': short_term_memory,
'long_term_memory': long_term_memory,
'backend_node_id': str(index_for_logging),
}
except BrowserError:
# Re-raise BrowserError as-is to preserve structured memory
raise
except TimeoutError:
msg = f'Failed to get dropdown options for index {index_for_logging} due to timeout.'
self.logger.error(msg)
raise BrowserError(message=msg, long_term_memory=msg)
except Exception as e:
msg = 'Failed to get dropdown options'
error_msg = f'{msg}: {str(e)}'
self.logger.error(error_msg)
raise BrowserError(
message=error_msg, long_term_memory=f'Failed to get dropdown options for index {index_for_logging}.'
)
async def _handle_aria_combobox_options(
self,
cdp_session,
object_id: str,
combobox_info: dict,
index_for_logging: int | str,
) -> dict[str, str]:
"""Handle ARIA combobox elements with options in a separate listbox element.
ARIA comboboxes (role="combobox") have options in a separate element referenced
by aria-controls. Options may only be rendered when the combobox is expanded.
This method:
1. Expands the combobox if collapsed (by clicking/focusing it)
2. Waits for options to render
3. Finds options in the aria-controls referenced element
4. Collapses the combobox after extracting options
"""
aria_controls_id = combobox_info.get('ariaControls')
was_expanded = combobox_info.get('isExpanded', False)
# If combobox is collapsed, expand it first to trigger option rendering
if not was_expanded:
# Use more robust expansion: dispatch proper DOM events that trigger event listeners
expand_script = """
function() {
const element = this;
// Dispatch focus event properly
const focusEvent = new FocusEvent('focus', { bubbles: true, cancelable: true });
element.dispatchEvent(focusEvent);
// Also call native focus
element.focus();
// Dispatch focusin event (bubbles, unlike focus)
const focusInEvent = new FocusEvent('focusin', { bubbles: true, cancelable: true });
element.dispatchEvent(focusInEvent);
// For some comboboxes, a click is needed
const clickEvent = new MouseEvent('click', {
bubbles: true,
cancelable: true,
view: window
});
element.dispatchEvent(clickEvent);
// Some comboboxes respond to mousedown
const mousedownEvent = new MouseEvent('mousedown', {
bubbles: true,
cancelable: true,
view: window
});
element.dispatchEvent(mousedownEvent);
return {
success: true,
ariaExpanded: element.getAttribute('aria-expanded')
};
}
"""
await cdp_session.cdp_client.send.Runtime.callFunctionOn(
params={
'functionDeclaration': expand_script,
'objectId': object_id,
'returnByValue': True,
},
session_id=cdp_session.session_id,
)
await asyncio.sleep(0.5)
# Now extract options from the aria-controls referenced element
extract_options_script = """
function(ariaControlsId) {
const combobox = this;
// Find the listbox element referenced by aria-controls
const listbox = document.getElementById(ariaControlsId);
if (!listbox) {
return {
error: `Could not find listbox element with id "${ariaControlsId}" referenced by aria-controls`,
ariaControlsId: ariaControlsId
};
}
// Find all option elements in the listbox
const optionElements = listbox.querySelectorAll('[role="option"]');
const options = [];
optionElements.forEach((item, idx) => {
const text = item.textContent ? item.textContent.trim() : '';
if (text) {
options.push({
text: text,
value: item.getAttribute('data-value') || item.getAttribute('value') || text,
index: idx,
selected: item.getAttribute('aria-selected') === 'true' || item.classList.contains('selected')
});
}
});
// If no options with role="option", try other common patterns
if (options.length === 0) {
// Try li elements inside
const liElements = listbox.querySelectorAll('li');
liElements.forEach((item, idx) => {
const text = item.textContent ? item.textContent.trim() : '';
if (text) {
options.push({
text: text,
value: item.getAttribute('data-value') || item.getAttribute('value') || text,
index: idx,
selected: item.getAttribute('aria-selected') === 'true' || item.classList.contains('selected')
});
}
});
}
return {
type: 'aria-combobox',
options: options,
id: combobox.id || '',
name: combobox.getAttribute('aria-label') || combobox.getAttribute('name') || '',
listboxId: ariaControlsId,
source: 'aria-controls'
};
}
"""
result = await cdp_session.cdp_client.send.Runtime.callFunctionOn(
params={
'functionDeclaration': extract_options_script,
'objectId': object_id,
'arguments': [{'value': aria_controls_id}],
'returnByValue': True,
},
session_id=cdp_session.session_id,
)
dropdown_data = result.get('result', {}).get('value', {})
# Collapse the combobox if we expanded it (blur to close)
if not was_expanded:
collapse_script = """
function() {
this.blur();
// Also dispatch escape key to close dropdowns
const escEvent = new KeyboardEvent('keydown', { key: 'Escape', bubbles: true });
this.dispatchEvent(escEvent);
return true;
}
"""
await cdp_session.cdp_client.send.Runtime.callFunctionOn(
params={
'functionDeclaration': collapse_script,
'objectId': object_id,
'returnByValue': True,
},
session_id=cdp_session.session_id,
)
# Handle errors
if dropdown_data.get('error'):
raise BrowserError(message=dropdown_data['error'], long_term_memory=dropdown_data['error'])
if not dropdown_data.get('options'):
msg = f'No options found in ARIA combobox at index {index_for_logging} (listbox: {aria_controls_id})'
return {
'error': msg,
'short_term_memory': msg,
'long_term_memory': msg,
'backend_node_id': str(index_for_logging),
}
# Format options for display
formatted_options = []
for opt in dropdown_data['options']:
encoded_text = json.dumps(opt['text'])
status = ' (selected)' if opt.get('selected') else ''
formatted_options.append(f'{opt["index"]}: text={encoded_text}, value={json.dumps(opt["value"])}{status}')
dropdown_type = dropdown_data.get('type', 'aria-combobox')
element_info = f'Index: {index_for_logging}, Type: {dropdown_type}, ID: {dropdown_data.get("id", "none")}, Name: {dropdown_data.get("name", "none")}'
source_info = f'aria-controls → {aria_controls_id}'
msg = f'Found {dropdown_type} dropdown ({element_info}):\n' + '\n'.join(formatted_options)
msg += f'\n\nUse the exact text or value string (without quotes) in select_dropdown(index={index_for_logging}, text=...)'
self.logger.info(f'📋 Found {len(dropdown_data["options"])} options in ARIA combobox at index {index_for_logging}')
return {
'type': dropdown_type,
'options': json.dumps(dropdown_data['options']),
'element_info': element_info,
'source': source_info,
'formatted_options': '\n'.join(formatted_options),
'message': msg,
'short_term_memory': msg,
'long_term_memory': f'Got dropdown options for ARIA combobox at index {index_for_logging}',
'backend_node_id': str(index_for_logging),
}
async def on_SelectDropdownOptionEvent(self, event: SelectDropdownOptionEvent) -> dict[str, str]:
"""Handle select dropdown option request with CDP."""
try:
# Use the provided node
element_node = event.node
index_for_logging = element_node.backend_node_id or 'unknown'
target_text = event.text
# Get CDP session for this node
cdp_session = await self.browser_session.cdp_client_for_node(element_node)
# Convert node to object ID for CDP operations
try:
object_result = await cdp_session.cdp_client.send.DOM.resolveNode(
params={'backendNodeId': element_node.backend_node_id}, session_id=cdp_session.session_id
)
remote_object = object_result.get('object', {})
object_id = remote_object.get('objectId')
if not object_id:
raise ValueError('Could not get object ID from resolved node')
except Exception as e:
raise ValueError(f'Failed to resolve node to object: {e}') from e
try:
# Use JavaScript to select the option
selection_script = """
function(targetText) {
const startElement = this;
// Function to attempt selection on a dropdown element
function attemptSelection(element) {
// Handle native select elements
if (element.tagName.toLowerCase() === 'select') {
const options = Array.from(element.options);
const targetTextLower = targetText.toLowerCase();
for (const option of options) {
const optionTextLower = option.text.trim().toLowerCase();
const optionValueLower = option.value.toLowerCase();
// Match against both text and value (case-insensitive)
if (optionTextLower === targetTextLower || optionValueLower === targetTextLower) {
const expectedValue = option.value;
// Focus the element FIRST (important for Svelte/Vue/React and other reactive frameworks)
// This simulates the user focusing on the dropdown before changing it
element.focus();
// Then set the value using multiple methods for maximum compatibility
element.value = expectedValue;
option.selected = true;
element.selectedIndex = option.index;
// Trigger all necessary events for reactive frameworks
// 1. input event - critical for Vue's v-model and Svelte's bind:value
const inputEvent = new Event('input', { bubbles: true, cancelable: true });
element.dispatchEvent(inputEvent);
// 2. change event - traditional form validation and framework reactivity
const changeEvent = new Event('change', { bubbles: true, cancelable: true });
element.dispatchEvent(changeEvent);
// 3. blur event - completes the interaction, triggers validation
element.blur();
// Verification: Check if the selection actually stuck (avoid intercepting and resetting the value)
if (element.value !== expectedValue) {
// Selection was reverted - need to try clicking instead
return {
success: false,
error: `Selection was set but reverted by page framework. The dropdown may require clicking.`,
selectionReverted: true,
targetOption: {
text: option.text.trim(),
value: expectedValue,
index: option.index
},
availableOptions: Array.from(element.options).map(opt => ({
text: opt.text.trim(),
value: opt.value
}))
};
}
return {
success: true,
message: `Selected option: ${option.text.trim()} (value: ${option.value})`,
value: option.value
};
}
}
// Return available options as separate field
const availableOptions = options.map(opt => ({
text: opt.text.trim(),
value: opt.value
}));
return {
success: false,
error: `Option with text or value '${targetText}' not found in select element`,
availableOptions: availableOptions
};
}
// Handle ARIA dropdowns/menus
const role = element.getAttribute('role');
if (role === 'menu' || role === 'listbox' || role === 'combobox') {
const menuItems = element.querySelectorAll('[role="menuitem"], [role="option"]');
const targetTextLower = targetText.toLowerCase();
for (const item of menuItems) {
if (item.textContent) {
const itemTextLower = item.textContent.trim().toLowerCase();
const itemValueLower = (item.getAttribute('data-value') || '').toLowerCase();
// Match against both text and data-value (case-insensitive)
if (itemTextLower === targetTextLower || itemValueLower === targetTextLower) {
// Clear previous selections
menuItems.forEach(mi => {
mi.setAttribute('aria-selected', 'false');
mi.classList.remove('selected');
});
// Select this item
item.setAttribute('aria-selected', 'true');
item.classList.add('selected');
// Trigger click and change events
item.click();
const clickEvent = new MouseEvent('click', { view: window, bubbles: true, cancelable: true });
item.dispatchEvent(clickEvent);
return {
success: true,
message: `Selected ARIA menu item: ${item.textContent.trim()}`
};
}
}
}
// Return available options as separate field
const availableOptions = Array.from(menuItems).map(item => ({
text: item.textContent ? item.textContent.trim() : '',
value: item.getAttribute('data-value') || ''
})).filter(opt => opt.text || opt.value);
return {
success: false,
error: `Menu item with text or value '${targetText}' not found`,
availableOptions: availableOptions
};
}
// Handle Semantic UI or custom dropdowns
if (element.classList.contains('dropdown') || element.classList.contains('ui')) {
const menuItems = element.querySelectorAll('.item, .option, [data-value]');
const targetTextLower = targetText.toLowerCase();
for (const item of menuItems) {
if (item.textContent) {
const itemTextLower = item.textContent.trim().toLowerCase();
const itemValueLower = (item.getAttribute('data-value') || '').toLowerCase();
// Match against both text and data-value (case-insensitive)
if (itemTextLower === targetTextLower || itemValueLower === targetTextLower) {
// Clear previous selections
menuItems.forEach(mi => {
mi.classList.remove('selected', 'active');
});
// Select this item
item.classList.add('selected', 'active');
// Update dropdown text if there's a text element
const textElement = element.querySelector('.text');
if (textElement) {
textElement.textContent = item.textContent.trim();
}
// Trigger click and change events
item.click();
const clickEvent = new MouseEvent('click', { view: window, bubbles: true, cancelable: true });
item.dispatchEvent(clickEvent);
// Also dispatch on the main dropdown element
const dropdownChangeEvent = new Event('change', { bubbles: true });
element.dispatchEvent(dropdownChangeEvent);
return {
success: true,
message: `Selected custom dropdown item: ${item.textContent.trim()}`
};
}
}
}
// Return available options as separate field
const availableOptions = Array.from(menuItems).map(item => ({
text: item.textContent ? item.textContent.trim() : '',
value: item.getAttribute('data-value') || ''
})).filter(opt => opt.text || opt.value);
return {
success: false,
error: `Custom dropdown item with text or value '${targetText}' not found`,
availableOptions: availableOptions
};
}
return null; // Not a dropdown element
}
// Function to recursively search children for dropdowns
function searchChildrenForSelection(element, maxDepth, currentDepth = 0) {
if (currentDepth >= maxDepth) return null;
// Check all direct children
for (let child of element.children) {
// Try selection on this child
const result = attemptSelection(child);
if (result && result.success) {
return result;
}
// Recursively check this child's children
const childResult = searchChildrenForSelection(child, maxDepth, currentDepth + 1);
if (childResult && childResult.success) {
return childResult;
}
}
return null;
}
// First try the target element itself
let selectionResult = attemptSelection(startElement);
if (selectionResult) {
// If attemptSelection returned a result (success or failure), use it
// Don't search children if we found a dropdown element but selection failed
return selectionResult;
}
// Only search children if target element is not a dropdown element
selectionResult = searchChildrenForSelection(startElement, 4);
if (selectionResult && selectionResult.success) {
return selectionResult;
}
return {
success: false,
error: `Element and its children (depth 4) do not contain a dropdown with option '${targetText}' (tag: ${startElement.tagName}, role: ${startElement.getAttribute('role')}, classes: ${startElement.className})`
};
}
"""
result = await cdp_session.cdp_client.send.Runtime.callFunctionOn(
params={
'functionDeclaration': selection_script,
'arguments': [{'value': target_text}],
'objectId': object_id,
'returnByValue': True,
},
session_id=cdp_session.session_id,
)
selection_result = result.get('result', {}).get('value', {})
# If selection failed and all options are empty, the dropdown may be lazily populated.
# Focus the element (triggers lazy loaders) and retry once after a wait.
if not selection_result.get('success'):
available_options = selection_result.get('availableOptions', [])
all_empty = available_options and all(
(not opt.get('text', '').strip() and not opt.get('value', '').strip())
if isinstance(opt, dict)
else not str(opt).strip()
for opt in available_options
)
if all_empty:
self.logger.info(
'⚠️ All dropdown options are empty — options may be lazily loaded. Focusing element and retrying...'
)
# Use element.focus() only — no synthetic mouse events that leak isTrusted=false
try:
await cdp_session.cdp_client.send.Runtime.callFunctionOn(
params={
'functionDeclaration': 'function() { this.focus(); }',
'objectId': object_id,
},
session_id=cdp_session.session_id,
)
except Exception:
pass # non-fatal, best-effort
await asyncio.sleep(1.0)
retry_result = await cdp_session.cdp_client.send.Runtime.callFunctionOn(
params={
'functionDeclaration': selection_script,
'arguments': [{'value': target_text}],
'objectId': object_id,
'returnByValue': True,
},
session_id=cdp_session.session_id,
)
selection_result = retry_result.get('result', {}).get('value', {})
# Check if selection was reverted by framework - try clicking as fallback
if selection_result.get('selectionReverted'):
self.logger.info('⚠️ Selection was reverted by page framework, trying click fallback...')
target_option = selection_result.get('targetOption', {})
option_index = target_option.get('index', 0)
# Try clicking on the option element directly
click_fallback_script = """
function(optionIndex) {
const select = this;
if (select.tagName.toLowerCase() !== 'select') return { success: false, error: 'Not a select element' };
const option = select.options[optionIndex];
if (!option) return { success: false, error: 'Option not found at index ' + optionIndex };
// Method 1: Try using the native selectedIndex setter with a small delay
const originalValue = select.value;
// Simulate opening the dropdown (some frameworks need this)
select.focus();
const mouseDown = new MouseEvent('mousedown', { bubbles: true, cancelable: true, view: window });
select.dispatchEvent(mouseDown);
// Set using selectedIndex (more reliable for some frameworks)
select.selectedIndex = optionIndex;
// Click the option
option.selected = true;
const optionClick = new MouseEvent('click', { bubbles: true, cancelable: true, view: window });
option.dispatchEvent(optionClick);
// Close dropdown
const mouseUp = new MouseEvent('mouseup', { bubbles: true, cancelable: true, view: window });
select.dispatchEvent(mouseUp);
// Fire change event
const changeEvent = new Event('change', { bubbles: true, cancelable: true });
select.dispatchEvent(changeEvent);
// Blur to finalize
select.blur();
// Verify
if (select.value === option.value || select.selectedIndex === optionIndex) {
return {
success: true,
message: 'Selected via click fallback: ' + option.text.trim(),
value: option.value
};
}
return {
success: false,
error: 'Click fallback also failed - framework may block all programmatic selection',
finalValue: select.value,
expectedValue: option.value
};
}
"""
fallback_result = await cdp_session.cdp_client.send.Runtime.callFunctionOn(
params={
'functionDeclaration': click_fallback_script,
'arguments': [{'value': option_index}],
'objectId': object_id,
'returnByValue': True,
},
session_id=cdp_session.session_id,
)
fallback_data = fallback_result.get('result', {}).get('value', {})
if fallback_data.get('success'):
msg = fallback_data.get('message', f'Selected option via click: {target_text}')
self.logger.info(f'✅ {msg}')
return {
'success': 'true',
'message': msg,
'value': fallback_data.get('value', target_text),
'backend_node_id': str(index_for_logging),
}
else:
self.logger.warning(f'⚠️ Click fallback also failed: {fallback_data.get("error", "unknown")}')
# Continue to error handling below
if selection_result.get('success'):
msg = selection_result.get('message', f'Selected option: {target_text}')
self.logger.debug(f'{msg}')
# Return the result as a dict
return {
'success': 'true',
'message': msg,
'value': selection_result.get('value', target_text),
'backend_node_id': str(index_for_logging),
}
else:
error_msg = selection_result.get('error', f'Failed to select option: {target_text}')
available_options = selection_result.get('availableOptions', [])
self.logger.error(f'❌ {error_msg}')
self.logger.debug(f'Available options from JavaScript: {available_options}')
# If we have available options, return structured error data
if available_options:
# Format options for short_term_memory (simple bulleted list)
short_term_options = []
for opt in available_options:
if isinstance(opt, dict):
text = opt.get('text', '').strip()
value = opt.get('value', '').strip()
if text:
short_term_options.append(f'- {text}')
elif value:
short_term_options.append(f'- {value}')
elif isinstance(opt, str):
short_term_options.append(f'- {opt}')
if short_term_options:
short_term_memory = 'Available dropdown options are:\n' + '\n'.join(short_term_options)
long_term_memory = (
f"Couldn't select the dropdown option as '{target_text}' is not one of the available options."
)
# Return error result with structured memory instead of raising exception
return {
'success': 'false',
'error': error_msg,
'short_term_memory': short_term_memory,
'long_term_memory': long_term_memory,
'backend_node_id': str(index_for_logging),
}
# Fallback to regular error result if no available options
return {
'success': 'false',
'error': error_msg,
'backend_node_id': str(index_for_logging),
}
except Exception as e:
error_msg = f'Failed to select dropdown option: {str(e)}'
self.logger.error(error_msg)
raise ValueError(error_msg) from e
except Exception as e:
error_msg = f'Failed to select dropdown option "{target_text}" for element {index_for_logging}: {str(e)}'
self.logger.error(error_msg)
raise ValueError(error_msg) from e
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/browser/watchdogs/default_action_watchdog.py",
"license": "MIT License",
"lines": 3141,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/browser/watchdogs/dom_watchdog.py | """DOM watchdog for browser DOM tree management using CDP."""
import asyncio
import time
from typing import TYPE_CHECKING
from browser_use.browser.events import (
BrowserErrorEvent,
BrowserStateRequestEvent,
ScreenshotEvent,
TabCreatedEvent,
)
from browser_use.browser.watchdog_base import BaseWatchdog
from browser_use.dom.service import DomService
from browser_use.dom.views import (
EnhancedDOMTreeNode,
SerializedDOMState,
)
from browser_use.observability import observe_debug
from browser_use.utils import create_task_with_error_handling, time_execution_async
if TYPE_CHECKING:
from browser_use.browser.views import BrowserStateSummary, NetworkRequest, PageInfo, PaginationButton
class DOMWatchdog(BaseWatchdog):
"""Handles DOM tree building, serialization, and element access via CDP.
This watchdog acts as a bridge between the event-driven browser session
and the DomService implementation, maintaining cached state and providing
helper methods for other watchdogs.
"""
LISTENS_TO = [TabCreatedEvent, BrowserStateRequestEvent]
EMITS = [BrowserErrorEvent]
# Public properties for other watchdogs
selector_map: dict[int, EnhancedDOMTreeNode] | None = None
current_dom_state: SerializedDOMState | None = None
enhanced_dom_tree: EnhancedDOMTreeNode | None = None
# Internal DOM service
_dom_service: DomService | None = None
# Network tracking - maps request_id to (url, start_time, method, resource_type)
_pending_requests: dict[str, tuple[str, float, str, str | None]] = {}
async def on_TabCreatedEvent(self, event: TabCreatedEvent) -> None:
# self.logger.debug('Setting up init scripts in browser')
return None
def _get_recent_events_str(self, limit: int = 10) -> str | None:
"""Get the most recent events from the event bus as JSON.
Args:
limit: Maximum number of recent events to include
Returns:
JSON string of recent events or None if not available
"""
import json
try:
# Get all events from history, sorted by creation time (most recent first)
all_events = sorted(
self.browser_session.event_bus.event_history.values(), key=lambda e: e.event_created_at.timestamp(), reverse=True
)
# Take the most recent events and create JSON-serializable data
recent_events_data = []
for event in all_events[:limit]:
event_data = {
'event_type': event.event_type,
'timestamp': event.event_created_at.isoformat(),
}
# Add specific fields for certain event types
if hasattr(event, 'url'):
event_data['url'] = getattr(event, 'url')
if hasattr(event, 'error_message'):
event_data['error_message'] = getattr(event, 'error_message')
if hasattr(event, 'target_id'):
event_data['target_id'] = getattr(event, 'target_id')
recent_events_data.append(event_data)
return json.dumps(recent_events_data) # Return empty array if no events
except Exception as e:
self.logger.debug(f'Failed to get recent events: {e}')
return json.dumps([]) # Return empty JSON array on error
async def _get_pending_network_requests(self) -> list['NetworkRequest']:
"""Get list of currently pending network requests.
Uses document.readyState and performance API to detect pending requests.
Filters out ads, tracking, and other noise.
Returns:
List of NetworkRequest objects representing currently loading resources
"""
from browser_use.browser.views import NetworkRequest
try:
# get_or_create_cdp_session() now handles focus validation automatically
cdp_session = await self.browser_session.get_or_create_cdp_session(focus=True)
# Use performance API to get pending requests
js_code = """
(function() {
const now = performance.now();
const resources = performance.getEntriesByType('resource');
const pending = [];
// Check document readyState
const docLoading = document.readyState !== 'complete';
// Common ad/tracking domains and patterns to filter out
const adDomains = [
// Standard ad/tracking networks
'doubleclick.net', 'googlesyndication.com', 'googletagmanager.com',
'facebook.net', 'analytics', 'ads', 'tracking', 'pixel',
'hotjar.com', 'clarity.ms', 'mixpanel.com', 'segment.com',
// Analytics platforms
'demdex.net', 'omtrdc.net', 'adobedtm.com', 'ensighten.com',
'newrelic.com', 'nr-data.net', 'google-analytics.com',
// Social media trackers
'connect.facebook.net', 'platform.twitter.com', 'platform.linkedin.com',
// CDN/image hosts (usually not critical for functionality)
'.cloudfront.net/image/', '.akamaized.net/image/',
// Common tracking paths
'/tracker/', '/collector/', '/beacon/', '/telemetry/', '/log/',
'/events/', '/eventBatch', '/track.', '/metrics/'
];
// Get resources that are still loading (responseEnd is 0)
let totalResourcesChecked = 0;
let filteredByResponseEnd = 0;
const allDomains = new Set();
for (const entry of resources) {
totalResourcesChecked++;
// Track all domains from recent resources (for logging)
try {
const hostname = new URL(entry.name).hostname;
if (hostname) allDomains.add(hostname);
} catch (e) {}
if (entry.responseEnd === 0) {
filteredByResponseEnd++;
const url = entry.name;
// Filter out ads and tracking
const isAd = adDomains.some(domain => url.includes(domain));
if (isAd) continue;
// Filter out data: URLs and very long URLs (often inline resources)
if (url.startsWith('data:') || url.length > 500) continue;
const loadingDuration = now - entry.startTime;
// Skip requests that have been loading for >10 seconds (likely stuck/polling)
if (loadingDuration > 10000) continue;
const resourceType = entry.initiatorType || 'unknown';
// Filter out non-critical resources (images, fonts, icons) if loading >3 seconds
const nonCriticalTypes = ['img', 'image', 'icon', 'font'];
if (nonCriticalTypes.includes(resourceType) && loadingDuration > 3000) continue;
// Filter out image URLs even if type is unknown
const isImageUrl = /\\.(jpg|jpeg|png|gif|webp|svg|ico)(\\?|$)/i.test(url);
if (isImageUrl && loadingDuration > 3000) continue;
pending.push({
url: url,
method: 'GET',
loading_duration_ms: Math.round(loadingDuration),
resource_type: resourceType
});
}
}
return {
pending_requests: pending,
document_loading: docLoading,
document_ready_state: document.readyState,
debug: {
total_resources: totalResourcesChecked,
with_response_end_zero: filteredByResponseEnd,
after_all_filters: pending.length,
all_domains: Array.from(allDomains)
}
};
})()
"""
result = await cdp_session.cdp_client.send.Runtime.evaluate(
params={'expression': js_code, 'returnByValue': True}, session_id=cdp_session.session_id
)
if result.get('result', {}).get('type') == 'object':
data = result['result'].get('value', {})
pending = data.get('pending_requests', [])
doc_state = data.get('document_ready_state', 'unknown')
doc_loading = data.get('document_loading', False)
debug_info = data.get('debug', {})
# Get all domains that had recent activity (from JS)
all_domains = debug_info.get('all_domains', [])
all_domains_str = ', '.join(sorted(all_domains)[:5]) if all_domains else 'none'
if len(all_domains) > 5:
all_domains_str += f' +{len(all_domains) - 5} more'
# Debug logging
self.logger.debug(
f'🔍 Network check: document.readyState={doc_state}, loading={doc_loading}, '
f'total_resources={debug_info.get("total_resources", 0)}, '
f'responseEnd=0: {debug_info.get("with_response_end_zero", 0)}, '
f'after_filters={len(pending)}, domains=[{all_domains_str}]'
)
# Convert to NetworkRequest objects
network_requests = []
for req in pending[:20]: # Limit to 20 to avoid overwhelming the context
network_requests.append(
NetworkRequest(
url=req['url'],
method=req.get('method', 'GET'),
loading_duration_ms=req.get('loading_duration_ms', 0.0),
resource_type=req.get('resource_type'),
)
)
return network_requests
except Exception as e:
self.logger.debug(f'Failed to get pending network requests: {e}')
return []
@observe_debug(ignore_input=True, ignore_output=True, name='browser_state_request_event')
async def on_BrowserStateRequestEvent(self, event: BrowserStateRequestEvent) -> 'BrowserStateSummary':
"""Handle browser state request by coordinating DOM building and screenshot capture.
This is the main entry point for getting the complete browser state.
Args:
event: The browser state request event with options
Returns:
Complete BrowserStateSummary with DOM, screenshot, and target info
"""
from browser_use.browser.views import BrowserStateSummary, PageInfo
self.logger.debug('🔍 DOMWatchdog.on_BrowserStateRequestEvent: STARTING browser state request')
page_url = await self.browser_session.get_current_page_url()
self.logger.debug(f'🔍 DOMWatchdog.on_BrowserStateRequestEvent: Got page URL: {page_url}')
# Get focused session for logging (validation already done by get_current_page_url)
if self.browser_session.agent_focus_target_id:
self.logger.debug(f'Current page URL: {page_url}, target_id: {self.browser_session.agent_focus_target_id}')
# check if we should skip DOM tree build for pointless pages
not_a_meaningful_website = page_url.lower().split(':', 1)[0] not in ('http', 'https')
# Check for pending network requests BEFORE waiting (so we can see what's loading)
pending_requests_before_wait = []
if not not_a_meaningful_website:
try:
pending_requests_before_wait = await self._get_pending_network_requests()
if pending_requests_before_wait:
self.logger.debug(f'🔍 Found {len(pending_requests_before_wait)} pending requests before stability wait')
except Exception as e:
self.logger.debug(f'Failed to get pending requests before wait: {e}')
pending_requests = pending_requests_before_wait
# Wait for page stability using browser profile settings (main branch pattern)
if not not_a_meaningful_website:
self.logger.debug('🔍 DOMWatchdog.on_BrowserStateRequestEvent: ⏳ Waiting for page stability...')
try:
if pending_requests_before_wait:
# Reduced from 1s to 0.3s for faster DOM builds while still allowing critical resources to load
await asyncio.sleep(0.3)
self.logger.debug('🔍 DOMWatchdog.on_BrowserStateRequestEvent: ✅ Page stability complete')
except Exception as e:
self.logger.warning(
f'🔍 DOMWatchdog.on_BrowserStateRequestEvent: Network waiting failed: {e}, continuing anyway...'
)
# Get tabs info once at the beginning for all paths
self.logger.debug('🔍 DOMWatchdog.on_BrowserStateRequestEvent: Getting tabs info...')
tabs_info = await self.browser_session.get_tabs()
self.logger.debug(f'🔍 DOMWatchdog.on_BrowserStateRequestEvent: Got {len(tabs_info)} tabs')
self.logger.debug(f'🔍 DOMWatchdog.on_BrowserStateRequestEvent: Tabs info: {tabs_info}')
# Get viewport / scroll position info, remember changing scroll position should invalidate selector_map cache because it only includes visible elements
# cdp_session = await self.browser_session.get_or_create_cdp_session(focus=True)
# scroll_info = await cdp_session.cdp_client.send.Runtime.evaluate(
# params={'expression': 'JSON.stringify({y: document.body.scrollTop, x: document.body.scrollLeft, width: document.documentElement.clientWidth, height: document.documentElement.clientHeight})'},
# session_id=cdp_session.session_id,
# )
# self.logger.debug(f'🔍 DOMWatchdog.on_BrowserStateRequestEvent: Got scroll info: {scroll_info["result"]}')
try:
# Fast path for empty pages
if not_a_meaningful_website:
self.logger.debug(f'⚡ Skipping BuildDOMTree for empty target: {page_url}')
self.logger.debug(f'📸 Not taking screenshot for empty page: {page_url} (non-http/https URL)')
# Create minimal DOM state
content = SerializedDOMState(_root=None, selector_map={})
# Skip screenshot for empty pages
screenshot_b64 = None
# Try to get page info from CDP, fall back to defaults if unavailable
try:
page_info = await self._get_page_info()
except Exception as e:
self.logger.debug(f'Failed to get page info from CDP for empty page: {e}, using fallback')
# Use default viewport dimensions
viewport = self.browser_session.browser_profile.viewport or {'width': 1280, 'height': 720}
page_info = PageInfo(
viewport_width=viewport['width'],
viewport_height=viewport['height'],
page_width=viewport['width'],
page_height=viewport['height'],
scroll_x=0,
scroll_y=0,
pixels_above=0,
pixels_below=0,
pixels_left=0,
pixels_right=0,
)
return BrowserStateSummary(
dom_state=content,
url=page_url,
title='Empty Tab',
tabs=tabs_info,
screenshot=screenshot_b64,
page_info=page_info,
pixels_above=0,
pixels_below=0,
browser_errors=[],
is_pdf_viewer=False,
recent_events=self._get_recent_events_str() if event.include_recent_events else None,
pending_network_requests=[], # Empty page has no pending requests
pagination_buttons=[], # Empty page has no pagination
closed_popup_messages=self.browser_session._closed_popup_messages.copy(),
)
# Execute DOM building and screenshot capture in parallel
dom_task = None
screenshot_task = None
# Start DOM building task if requested
if event.include_dom:
self.logger.debug('🔍 DOMWatchdog.on_BrowserStateRequestEvent: 🌳 Starting DOM tree build task...')
previous_state = (
self.browser_session._cached_browser_state_summary.dom_state
if self.browser_session._cached_browser_state_summary
else None
)
dom_task = create_task_with_error_handling(
self._build_dom_tree_without_highlights(previous_state),
name='build_dom_tree',
logger_instance=self.logger,
suppress_exceptions=True,
)
# Start clean screenshot task if requested (without JS highlights)
if event.include_screenshot:
self.logger.debug('🔍 DOMWatchdog.on_BrowserStateRequestEvent: 📸 Starting clean screenshot task...')
screenshot_task = create_task_with_error_handling(
self._capture_clean_screenshot(),
name='capture_screenshot',
logger_instance=self.logger,
suppress_exceptions=True,
)
# Wait for both tasks to complete
content = None
screenshot_b64 = None
if dom_task:
try:
content = await dom_task
self.logger.debug('🔍 DOMWatchdog.on_BrowserStateRequestEvent: ✅ DOM tree build completed')
except Exception as e:
self.logger.warning(f'🔍 DOMWatchdog.on_BrowserStateRequestEvent: DOM build failed: {e}, using minimal state')
content = SerializedDOMState(_root=None, selector_map={})
else:
content = SerializedDOMState(_root=None, selector_map={})
if screenshot_task:
try:
screenshot_b64 = await screenshot_task
self.logger.debug('🔍 DOMWatchdog.on_BrowserStateRequestEvent: ✅ Clean screenshot captured')
except Exception as e:
self.logger.warning(f'🔍 DOMWatchdog.on_BrowserStateRequestEvent: Clean screenshot failed: {e}')
screenshot_b64 = None
# Add browser-side highlights for user visibility
if content and content.selector_map and self.browser_session.browser_profile.dom_highlight_elements:
try:
self.logger.debug('🔍 DOMWatchdog.on_BrowserStateRequestEvent: 🎨 Adding browser-side highlights...')
await self.browser_session.add_highlights(content.selector_map)
self.logger.debug(
f'🔍 DOMWatchdog.on_BrowserStateRequestEvent: ✅ Added browser highlights for {len(content.selector_map)} elements'
)
except Exception as e:
self.logger.warning(f'🔍 DOMWatchdog.on_BrowserStateRequestEvent: Browser highlighting failed: {e}')
# Ensure we have valid content
if not content:
content = SerializedDOMState(_root=None, selector_map={})
# Tabs info already fetched at the beginning
# Get target title safely
try:
self.logger.debug('🔍 DOMWatchdog.on_BrowserStateRequestEvent: Getting page title...')
title = await asyncio.wait_for(self.browser_session.get_current_page_title(), timeout=1.0)
self.logger.debug(f'🔍 DOMWatchdog.on_BrowserStateRequestEvent: Got title: {title}')
except Exception as e:
self.logger.debug(f'🔍 DOMWatchdog.on_BrowserStateRequestEvent: Failed to get title: {e}')
title = 'Page'
# Get comprehensive page info from CDP with timeout
try:
self.logger.debug('🔍 DOMWatchdog.on_BrowserStateRequestEvent: Getting page info from CDP...')
page_info = await asyncio.wait_for(self._get_page_info(), timeout=1.0)
self.logger.debug(f'🔍 DOMWatchdog.on_BrowserStateRequestEvent: Got page info from CDP: {page_info}')
except Exception as e:
self.logger.debug(
f'🔍 DOMWatchdog.on_BrowserStateRequestEvent: Failed to get page info from CDP: {e}, using fallback'
)
# Fallback to default viewport dimensions
viewport = self.browser_session.browser_profile.viewport or {'width': 1280, 'height': 720}
page_info = PageInfo(
viewport_width=viewport['width'],
viewport_height=viewport['height'],
page_width=viewport['width'],
page_height=viewport['height'],
scroll_x=0,
scroll_y=0,
pixels_above=0,
pixels_below=0,
pixels_left=0,
pixels_right=0,
)
# Check for PDF viewer
is_pdf_viewer = page_url.endswith('.pdf') or '/pdf/' in page_url
# Detect pagination buttons from the DOM
pagination_buttons_data = []
if content and content.selector_map:
pagination_buttons_data = self._detect_pagination_buttons(content.selector_map)
# Build and cache the browser state summary
if screenshot_b64:
self.logger.debug(
f'🔍 DOMWatchdog.on_BrowserStateRequestEvent: 📸 Creating BrowserStateSummary with screenshot, length: {len(screenshot_b64)}'
)
else:
self.logger.debug(
'🔍 DOMWatchdog.on_BrowserStateRequestEvent: 📸 Creating BrowserStateSummary WITHOUT screenshot'
)
browser_state = BrowserStateSummary(
dom_state=content,
url=page_url,
title=title,
tabs=tabs_info,
screenshot=screenshot_b64,
page_info=page_info,
pixels_above=0,
pixels_below=0,
browser_errors=[],
is_pdf_viewer=is_pdf_viewer,
recent_events=self._get_recent_events_str() if event.include_recent_events else None,
pending_network_requests=pending_requests,
pagination_buttons=pagination_buttons_data,
closed_popup_messages=self.browser_session._closed_popup_messages.copy(),
)
# Cache the state
self.browser_session._cached_browser_state_summary = browser_state
# Cache viewport size for coordinate conversion (if llm_screenshot_size is enabled)
if page_info:
self.browser_session._original_viewport_size = (page_info.viewport_width, page_info.viewport_height)
self.logger.debug('🔍 DOMWatchdog.on_BrowserStateRequestEvent: ✅ COMPLETED - Returning browser state')
return browser_state
except Exception as e:
self.logger.error(f'Failed to get browser state: {e}')
# Return minimal recovery state
return BrowserStateSummary(
dom_state=SerializedDOMState(_root=None, selector_map={}),
url=page_url if 'page_url' in locals() else '',
title='Error',
tabs=[],
screenshot=None,
page_info=PageInfo(
viewport_width=1280,
viewport_height=720,
page_width=1280,
page_height=720,
scroll_x=0,
scroll_y=0,
pixels_above=0,
pixels_below=0,
pixels_left=0,
pixels_right=0,
),
pixels_above=0,
pixels_below=0,
browser_errors=[str(e)],
is_pdf_viewer=False,
recent_events=None,
pending_network_requests=[], # Error state has no pending requests
pagination_buttons=[], # Error state has no pagination
closed_popup_messages=self.browser_session._closed_popup_messages.copy()
if hasattr(self, 'browser_session') and self.browser_session is not None
else [],
)
@time_execution_async('build_dom_tree_without_highlights')
@observe_debug(ignore_input=True, ignore_output=True, name='build_dom_tree_without_highlights')
async def _build_dom_tree_without_highlights(self, previous_state: SerializedDOMState | None = None) -> SerializedDOMState:
"""Build DOM tree without injecting JavaScript highlights (for parallel execution)."""
try:
self.logger.debug('🔍 DOMWatchdog._build_dom_tree_without_highlights: STARTING DOM tree build')
# Create or reuse DOM service
if self._dom_service is None:
self._dom_service = DomService(
browser_session=self.browser_session,
logger=self.logger,
cross_origin_iframes=self.browser_session.browser_profile.cross_origin_iframes,
paint_order_filtering=self.browser_session.browser_profile.paint_order_filtering,
max_iframes=self.browser_session.browser_profile.max_iframes,
max_iframe_depth=self.browser_session.browser_profile.max_iframe_depth,
)
# Get serialized DOM tree using the service
self.logger.debug('🔍 DOMWatchdog._build_dom_tree_without_highlights: Calling DomService.get_serialized_dom_tree...')
start = time.time()
self.current_dom_state, self.enhanced_dom_tree, timing_info = await self._dom_service.get_serialized_dom_tree(
previous_cached_state=previous_state,
)
end = time.time()
total_time_ms = (end - start) * 1000
self.logger.debug(
'🔍 DOMWatchdog._build_dom_tree_without_highlights: ✅ DomService.get_serialized_dom_tree completed'
)
# Build hierarchical timing breakdown as single multi-line string
timing_lines = [f'⏱️ Total DOM tree time: {total_time_ms:.2f}ms', '📊 Timing breakdown:']
# get_all_trees breakdown
get_all_trees_ms = timing_info.get('get_all_trees_total_ms', 0)
if get_all_trees_ms > 0:
timing_lines.append(f' ├─ get_all_trees: {get_all_trees_ms:.2f}ms')
iframe_scroll_ms = timing_info.get('iframe_scroll_detection_ms', 0)
cdp_parallel_ms = timing_info.get('cdp_parallel_calls_ms', 0)
snapshot_proc_ms = timing_info.get('snapshot_processing_ms', 0)
if iframe_scroll_ms > 0.01:
timing_lines.append(f' │ ├─ iframe_scroll_detection: {iframe_scroll_ms:.2f}ms')
if cdp_parallel_ms > 0.01:
timing_lines.append(f' │ ├─ cdp_parallel_calls: {cdp_parallel_ms:.2f}ms')
if snapshot_proc_ms > 0.01:
timing_lines.append(f' │ └─ snapshot_processing: {snapshot_proc_ms:.2f}ms')
# build_ax_lookup
build_ax_ms = timing_info.get('build_ax_lookup_ms', 0)
if build_ax_ms > 0.01:
timing_lines.append(f' ├─ build_ax_lookup: {build_ax_ms:.2f}ms')
# build_snapshot_lookup
build_snapshot_ms = timing_info.get('build_snapshot_lookup_ms', 0)
if build_snapshot_ms > 0.01:
timing_lines.append(f' ├─ build_snapshot_lookup: {build_snapshot_ms:.2f}ms')
# construct_enhanced_tree
construct_tree_ms = timing_info.get('construct_enhanced_tree_ms', 0)
if construct_tree_ms > 0.01:
timing_lines.append(f' ├─ construct_enhanced_tree: {construct_tree_ms:.2f}ms')
# serialize_accessible_elements breakdown
serialize_total_ms = timing_info.get('serialize_accessible_elements_total_ms', 0)
if serialize_total_ms > 0.01:
timing_lines.append(f' ├─ serialize_accessible_elements: {serialize_total_ms:.2f}ms')
create_simp_ms = timing_info.get('create_simplified_tree_ms', 0)
paint_order_ms = timing_info.get('calculate_paint_order_ms', 0)
optimize_ms = timing_info.get('optimize_tree_ms', 0)
bbox_ms = timing_info.get('bbox_filtering_ms', 0)
assign_idx_ms = timing_info.get('assign_interactive_indices_ms', 0)
clickable_ms = timing_info.get('clickable_detection_time_ms', 0)
if create_simp_ms > 0.01:
timing_lines.append(f' │ ├─ create_simplified_tree: {create_simp_ms:.2f}ms')
if clickable_ms > 0.01:
timing_lines.append(f' │ │ └─ clickable_detection: {clickable_ms:.2f}ms')
if paint_order_ms > 0.01:
timing_lines.append(f' │ ├─ calculate_paint_order: {paint_order_ms:.2f}ms')
if optimize_ms > 0.01:
timing_lines.append(f' │ ├─ optimize_tree: {optimize_ms:.2f}ms')
if bbox_ms > 0.01:
timing_lines.append(f' │ ├─ bbox_filtering: {bbox_ms:.2f}ms')
if assign_idx_ms > 0.01:
timing_lines.append(f' │ └─ assign_interactive_indices: {assign_idx_ms:.2f}ms')
# Overheads
get_dom_overhead_ms = timing_info.get('get_dom_tree_overhead_ms', 0)
serialize_overhead_ms = timing_info.get('serialization_overhead_ms', 0)
get_serialized_overhead_ms = timing_info.get('get_serialized_dom_tree_overhead_ms', 0)
if get_dom_overhead_ms > 0.1:
timing_lines.append(f' ├─ get_dom_tree_overhead: {get_dom_overhead_ms:.2f}ms')
if serialize_overhead_ms > 0.1:
timing_lines.append(f' ├─ serialization_overhead: {serialize_overhead_ms:.2f}ms')
if get_serialized_overhead_ms > 0.1:
timing_lines.append(f' └─ get_serialized_dom_tree_overhead: {get_serialized_overhead_ms:.2f}ms')
# Calculate total tracked time for validation
main_operations_ms = (
get_all_trees_ms
+ build_ax_ms
+ build_snapshot_ms
+ construct_tree_ms
+ serialize_total_ms
+ get_dom_overhead_ms
+ serialize_overhead_ms
+ get_serialized_overhead_ms
)
untracked_time_ms = total_time_ms - main_operations_ms
if untracked_time_ms > 1.0: # Only log if significant
timing_lines.append(f' ⚠️ untracked_time: {untracked_time_ms:.2f}ms')
# Single log call with all timing info
self.logger.debug('\n'.join(timing_lines))
# Update selector map for other watchdogs
self.logger.debug('🔍 DOMWatchdog._build_dom_tree_without_highlights: Updating selector maps...')
self.selector_map = self.current_dom_state.selector_map
# Update BrowserSession's cached selector map
if self.browser_session:
self.browser_session.update_cached_selector_map(self.selector_map)
self.logger.debug(
f'🔍 DOMWatchdog._build_dom_tree_without_highlights: ✅ Selector maps updated, {len(self.selector_map)} elements'
)
# Skip JavaScript highlighting injection - Python highlighting will be applied later
self.logger.debug('🔍 DOMWatchdog._build_dom_tree_without_highlights: ✅ COMPLETED DOM tree build (no JS highlights)')
return self.current_dom_state
except Exception as e:
self.logger.error(f'Failed to build DOM tree without highlights: {e}')
self.event_bus.dispatch(
BrowserErrorEvent(
error_type='DOMBuildFailed',
message=str(e),
)
)
raise
@time_execution_async('capture_clean_screenshot')
@observe_debug(ignore_input=True, ignore_output=True, name='capture_clean_screenshot')
async def _capture_clean_screenshot(self) -> str:
"""Capture a clean screenshot without JavaScript highlights."""
try:
self.logger.debug('🔍 DOMWatchdog._capture_clean_screenshot: Capturing clean screenshot...')
await self.browser_session.get_or_create_cdp_session(target_id=self.browser_session.agent_focus_target_id, focus=True)
# Check if handler is registered
handlers = self.event_bus.handlers.get('ScreenshotEvent', [])
handler_names = [getattr(h, '__name__', str(h)) for h in handlers]
self.logger.debug(f'📸 ScreenshotEvent handlers registered: {len(handlers)} - {handler_names}')
screenshot_event = self.event_bus.dispatch(ScreenshotEvent(full_page=False))
self.logger.debug('📸 Dispatched ScreenshotEvent, waiting for event to complete...')
# Wait for the event itself to complete (this waits for all handlers)
await screenshot_event
# Get the single handler result
screenshot_b64 = await screenshot_event.event_result(raise_if_any=True, raise_if_none=True)
if screenshot_b64 is None:
raise RuntimeError('Screenshot handler returned None')
self.logger.debug('🔍 DOMWatchdog._capture_clean_screenshot: ✅ Clean screenshot captured successfully')
return str(screenshot_b64)
except TimeoutError:
self.logger.warning('📸 Clean screenshot timed out after 6 seconds - no handler registered or slow page?')
raise
except Exception as e:
self.logger.warning(f'📸 Clean screenshot failed: {type(e).__name__}: {e}')
raise
def _detect_pagination_buttons(self, selector_map: dict[int, EnhancedDOMTreeNode]) -> list['PaginationButton']:
"""Detect pagination buttons from the DOM selector map.
Args:
selector_map: Dictionary mapping element indices to DOM tree nodes
Returns:
List of PaginationButton instances found in the DOM
"""
from browser_use.browser.views import PaginationButton
pagination_buttons_data = []
try:
self.logger.debug('🔍 DOMWatchdog._detect_pagination_buttons: Detecting pagination buttons...')
pagination_buttons_raw = DomService.detect_pagination_buttons(selector_map)
# Convert to PaginationButton instances
pagination_buttons_data = [
PaginationButton(
button_type=btn['button_type'], # type: ignore
backend_node_id=btn['backend_node_id'], # type: ignore
text=btn['text'], # type: ignore
selector=btn['selector'], # type: ignore
is_disabled=btn['is_disabled'], # type: ignore
)
for btn in pagination_buttons_raw
]
if pagination_buttons_data:
self.logger.debug(
f'🔍 DOMWatchdog._detect_pagination_buttons: Found {len(pagination_buttons_data)} pagination buttons'
)
except Exception as e:
self.logger.warning(f'🔍 DOMWatchdog._detect_pagination_buttons: Pagination detection failed: {e}')
return pagination_buttons_data
async def _get_page_info(self) -> 'PageInfo':
"""Get comprehensive page information using a single CDP call.
TODO: should we make this an event as well?
Returns:
PageInfo with all viewport, page dimensions, and scroll information
"""
from browser_use.browser.views import PageInfo
# get_or_create_cdp_session() handles focus validation automatically
cdp_session = await self.browser_session.get_or_create_cdp_session(
target_id=self.browser_session.agent_focus_target_id, focus=True
)
# Get layout metrics which includes all the information we need
metrics = await asyncio.wait_for(
cdp_session.cdp_client.send.Page.getLayoutMetrics(session_id=cdp_session.session_id), timeout=10.0
)
# Extract different viewport types
layout_viewport = metrics.get('layoutViewport', {})
visual_viewport = metrics.get('visualViewport', {})
css_visual_viewport = metrics.get('cssVisualViewport', {})
css_layout_viewport = metrics.get('cssLayoutViewport', {})
content_size = metrics.get('contentSize', {})
# Calculate device pixel ratio to convert between device pixels and CSS pixels
# This matches the approach in dom/service.py _get_viewport_ratio method
css_width = css_visual_viewport.get('clientWidth', css_layout_viewport.get('clientWidth', 1280.0))
device_width = visual_viewport.get('clientWidth', css_width)
device_pixel_ratio = device_width / css_width if css_width > 0 else 1.0
# For viewport dimensions, use CSS pixels (what JavaScript sees)
# Prioritize CSS layout viewport, then fall back to layout viewport
viewport_width = int(css_layout_viewport.get('clientWidth') or layout_viewport.get('clientWidth', 1280))
viewport_height = int(css_layout_viewport.get('clientHeight') or layout_viewport.get('clientHeight', 720))
# For total page dimensions, content size is typically in device pixels, so convert to CSS pixels
# by dividing by device pixel ratio
raw_page_width = content_size.get('width', viewport_width * device_pixel_ratio)
raw_page_height = content_size.get('height', viewport_height * device_pixel_ratio)
page_width = int(raw_page_width / device_pixel_ratio)
page_height = int(raw_page_height / device_pixel_ratio)
# For scroll position, use CSS visual viewport if available, otherwise CSS layout viewport
# These should already be in CSS pixels
scroll_x = int(css_visual_viewport.get('pageX') or css_layout_viewport.get('pageX', 0))
scroll_y = int(css_visual_viewport.get('pageY') or css_layout_viewport.get('pageY', 0))
# Calculate scroll information - pixels that are above/below/left/right of current viewport
pixels_above = scroll_y
pixels_below = max(0, page_height - viewport_height - scroll_y)
pixels_left = scroll_x
pixels_right = max(0, page_width - viewport_width - scroll_x)
page_info = PageInfo(
viewport_width=viewport_width,
viewport_height=viewport_height,
page_width=page_width,
page_height=page_height,
scroll_x=scroll_x,
scroll_y=scroll_y,
pixels_above=pixels_above,
pixels_below=pixels_below,
pixels_left=pixels_left,
pixels_right=pixels_right,
)
return page_info
# ========== Public Helper Methods ==========
async def get_element_by_index(self, index: int) -> EnhancedDOMTreeNode | None:
"""Get DOM element by index from cached selector map.
Builds DOM if not cached.
Returns:
EnhancedDOMTreeNode or None if index not found
"""
if not self.selector_map:
# Build DOM if not cached
await self._build_dom_tree_without_highlights()
return self.selector_map.get(index) if self.selector_map else None
def clear_cache(self) -> None:
"""Clear cached DOM state to force rebuild on next access."""
self.selector_map = None
self.current_dom_state = None
self.enhanced_dom_tree = None
# Keep the DOM service instance to reuse its CDP client connection
def is_file_input(self, element: EnhancedDOMTreeNode) -> bool:
"""Check if element is a file input."""
return element.node_name.upper() == 'INPUT' and element.attributes.get('type', '').lower() == 'file'
@staticmethod
def is_element_visible_according_to_all_parents(node: EnhancedDOMTreeNode, html_frames: list[EnhancedDOMTreeNode]) -> bool:
"""Check if the element is visible according to all its parent HTML frames.
Delegates to the DomService static method.
"""
return DomService.is_element_visible_according_to_all_parents(node, html_frames)
async def __aexit__(self, exc_type, exc_value, traceback):
"""Clean up DOM service on exit."""
if self._dom_service:
await self._dom_service.__aexit__(exc_type, exc_value, traceback)
self._dom_service = None
def __del__(self):
"""Clean up DOM service on deletion."""
super().__del__()
# DOM service will clean up its own CDP client
self._dom_service = None
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/browser/watchdogs/dom_watchdog.py",
"license": "MIT License",
"lines": 723,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/browser/watchdogs/downloads_watchdog.py | """Downloads watchdog for monitoring and handling file downloads."""
import asyncio
import json
import os
import tempfile
from pathlib import Path
from typing import TYPE_CHECKING, Any, ClassVar
from urllib.parse import urlparse
import anyio
from bubus import BaseEvent
from cdp_use.cdp.browser import DownloadProgressEvent as CDPDownloadProgressEvent
from cdp_use.cdp.browser import DownloadWillBeginEvent
from cdp_use.cdp.network import ResponseReceivedEvent
from cdp_use.cdp.target import SessionID, TargetID
from pydantic import PrivateAttr
from browser_use.browser.events import (
BrowserLaunchEvent,
BrowserStateRequestEvent,
BrowserStoppedEvent,
DownloadProgressEvent,
DownloadStartedEvent,
FileDownloadedEvent,
NavigationCompleteEvent,
TabClosedEvent,
TabCreatedEvent,
)
from browser_use.browser.watchdog_base import BaseWatchdog
from browser_use.utils import create_task_with_error_handling
if TYPE_CHECKING:
pass
class DownloadsWatchdog(BaseWatchdog):
"""Monitors downloads and handles file download events."""
# Events this watchdog listens to (for documentation)
LISTENS_TO: ClassVar[list[type[BaseEvent[Any]]]] = [
BrowserLaunchEvent,
BrowserStateRequestEvent,
BrowserStoppedEvent,
TabCreatedEvent,
TabClosedEvent,
NavigationCompleteEvent,
]
# Events this watchdog emits
EMITS: ClassVar[list[type[BaseEvent[Any]]]] = [
DownloadProgressEvent,
DownloadStartedEvent,
FileDownloadedEvent,
]
# Private state
_sessions_with_listeners: set[str] = PrivateAttr(default_factory=set) # Track sessions that already have download listeners
_active_downloads: dict[str, Any] = PrivateAttr(default_factory=dict)
_pdf_viewer_cache: dict[str, bool] = PrivateAttr(default_factory=dict) # Cache PDF viewer status by target URL
_download_cdp_session_setup: bool = PrivateAttr(default=False) # Track if CDP session is set up
_download_cdp_session: Any = PrivateAttr(default=None) # Store CDP session reference
_cdp_event_tasks: set[asyncio.Task] = PrivateAttr(default_factory=set) # Track CDP event handler tasks
_cdp_downloads_info: dict[str, dict[str, Any]] = PrivateAttr(default_factory=dict) # Map guid -> info
_session_pdf_urls: dict[str, str] = PrivateAttr(default_factory=dict) # URL -> path for PDFs downloaded this session
_initial_downloads_snapshot: set[str] = PrivateAttr(default_factory=set) # Files present when watchdog started
_network_monitored_targets: set[str] = PrivateAttr(default_factory=set) # Track targets with network monitoring enabled
_detected_downloads: set[str] = PrivateAttr(default_factory=set) # Track detected download URLs to avoid duplicates
_network_callback_registered: bool = PrivateAttr(default=False) # Track if global network callback is registered
# Direct callback support for download waiting (bypasses event bus for synchronization)
_download_start_callbacks: list[Any] = PrivateAttr(default_factory=list) # Callbacks for download start
_download_progress_callbacks: list[Any] = PrivateAttr(default_factory=list) # Callbacks for download progress
_download_complete_callbacks: list[Any] = PrivateAttr(default_factory=list) # Callbacks for download complete
def register_download_callbacks(
self,
on_start: Any | None = None,
on_progress: Any | None = None,
on_complete: Any | None = None,
) -> None:
"""Register direct callbacks for download events
Callbacks called sync from CDP event handlers, so click
handlers receive download notif without waiting for event bus to process
"""
self.logger.debug(
f'[DownloadsWatchdog] Registering callbacks: start={on_start is not None}, progress={on_progress is not None}, complete={on_complete is not None}'
)
if on_start:
self._download_start_callbacks.append(on_start)
self.logger.debug(
f'[DownloadsWatchdog] Registered start callback, now have {len(self._download_start_callbacks)} start callbacks'
)
if on_progress:
self._download_progress_callbacks.append(on_progress)
if on_complete:
self._download_complete_callbacks.append(on_complete)
def unregister_download_callbacks(
self,
on_start: Any | None = None,
on_progress: Any | None = None,
on_complete: Any | None = None,
) -> None:
"""Unregister previously registered download callbacks."""
if on_start and on_start in self._download_start_callbacks:
self._download_start_callbacks.remove(on_start)
if on_progress and on_progress in self._download_progress_callbacks:
self._download_progress_callbacks.remove(on_progress)
if on_complete and on_complete in self._download_complete_callbacks:
self._download_complete_callbacks.remove(on_complete)
async def on_BrowserLaunchEvent(self, event: BrowserLaunchEvent) -> None:
self.logger.debug(f'[DownloadsWatchdog] Received BrowserLaunchEvent, EventBus ID: {id(self.event_bus)}')
# Ensure downloads directory exists
downloads_path = self.browser_session.browser_profile.downloads_path
if downloads_path:
expanded_path = Path(downloads_path).expanduser().resolve()
expanded_path.mkdir(parents=True, exist_ok=True)
self.logger.debug(f'[DownloadsWatchdog] Ensured downloads directory exists: {expanded_path}')
# Capture initial files to detect new downloads reliably
if expanded_path.exists():
for f in expanded_path.iterdir():
if f.is_file() and not f.name.startswith('.'):
self._initial_downloads_snapshot.add(f.name)
self.logger.debug(
f'[DownloadsWatchdog] Captured initial downloads: {len(self._initial_downloads_snapshot)} files'
)
async def on_TabCreatedEvent(self, event: TabCreatedEvent) -> None:
"""Monitor new tabs for downloads."""
# logger.info(f'[DownloadsWatchdog] TabCreatedEvent received for tab {event.target_id[-4:]}: {event.url}')
# Assert downloads path is configured (should always be set by BrowserProfile default)
assert self.browser_session.browser_profile.downloads_path is not None, 'Downloads path must be configured'
if event.target_id:
# logger.info(f'[DownloadsWatchdog] Found target for tab {event.target_id}, calling attach_to_target')
await self.attach_to_target(event.target_id)
else:
self.logger.warning(f'[DownloadsWatchdog] No target found for tab {event.target_id}')
async def on_TabClosedEvent(self, event: TabClosedEvent) -> None:
"""Stop monitoring closed tabs."""
pass # No cleanup needed, browser context handles target lifecycle
async def on_BrowserStateRequestEvent(self, event: BrowserStateRequestEvent) -> None:
"""Handle browser state request events."""
# Use public API - automatically validates and waits for recovery if needed
self.logger.debug(f'[DownloadsWatchdog] on_BrowserStateRequestEvent started, event_id={event.event_id[-4:]}')
try:
cdp_session = await self.browser_session.get_or_create_cdp_session()
except ValueError:
self.logger.warning(f'[DownloadsWatchdog] No valid focus, skipping BrowserStateRequestEvent {event.event_id[-4:]}')
return # No valid focus, skip
self.logger.debug(
f'[DownloadsWatchdog] About to call get_current_page_url(), target_id={cdp_session.target_id[-4:] if cdp_session.target_id else "None"}'
)
url = await self.browser_session.get_current_page_url()
self.logger.debug(f'[DownloadsWatchdog] Got URL: {url[:80] if url else "None"}')
if not url:
self.logger.warning(f'[DownloadsWatchdog] No URL found for BrowserStateRequestEvent {event.event_id[-4:]}')
return
target_id = cdp_session.target_id
self.logger.debug(f'[DownloadsWatchdog] About to dispatch NavigationCompleteEvent for target {target_id[-4:]}')
self.event_bus.dispatch(
NavigationCompleteEvent(
event_type='NavigationCompleteEvent',
url=url,
target_id=target_id,
event_parent_id=event.event_id,
)
)
self.logger.debug('[DownloadsWatchdog] Successfully completed BrowserStateRequestEvent')
async def on_BrowserStoppedEvent(self, event: BrowserStoppedEvent) -> None:
"""Clean up when browser stops."""
# Cancel all CDP event handler tasks
for task in list(self._cdp_event_tasks):
if not task.done():
task.cancel()
# Wait for all tasks to complete cancellation
if self._cdp_event_tasks:
await asyncio.gather(*self._cdp_event_tasks, return_exceptions=True)
self._cdp_event_tasks.clear()
# Clean up CDP session
# CDP sessions are now cached and managed by BrowserSession
self._download_cdp_session = None
self._download_cdp_session_setup = False
# Clear other state
self._sessions_with_listeners.clear()
self._active_downloads.clear()
self._pdf_viewer_cache.clear()
self._session_pdf_urls.clear()
self._network_monitored_targets.clear()
self._detected_downloads.clear()
self._initial_downloads_snapshot.clear()
self._network_callback_registered = False
async def on_NavigationCompleteEvent(self, event: NavigationCompleteEvent) -> None:
"""Check for PDFs after navigation completes."""
self.logger.debug(f'[DownloadsWatchdog] NavigationCompleteEvent received for {event.url}, tab #{event.target_id[-4:]}')
# Clear PDF cache for the navigated URL since content may have changed
if event.url in self._pdf_viewer_cache:
del self._pdf_viewer_cache[event.url]
# Check if auto-download is enabled
auto_download_enabled = self._is_auto_download_enabled()
if not auto_download_enabled:
return
# Note: Using network-based PDF detection that doesn't require JavaScript
target_id = event.target_id
self.logger.debug(f'[DownloadsWatchdog] Got target_id={target_id} for tab #{event.target_id[-4:]}')
is_pdf = await self.check_for_pdf_viewer(target_id)
if is_pdf:
self.logger.debug(f'[DownloadsWatchdog] 📄 PDF detected at {event.url}, triggering auto-download...')
download_path = await self.trigger_pdf_download(target_id)
if not download_path:
self.logger.warning(f'[DownloadsWatchdog] ⚠️ PDF download failed for {event.url}')
def _is_auto_download_enabled(self) -> bool:
"""Check if auto-download PDFs is enabled in browser profile."""
return self.browser_session.browser_profile.auto_download_pdfs
async def attach_to_target(self, target_id: TargetID) -> None:
"""Set up download monitoring for a specific target."""
# Define CDP event handlers outside of try to avoid indentation/scope issues
def download_will_begin_handler(event: DownloadWillBeginEvent, session_id: SessionID | None) -> None:
self.logger.debug(f'[DownloadsWatchdog] Download will begin: {event}')
# Cache info for later completion event handling (esp. remote browsers)
guid = event.get('guid', '')
url = event.get('url', '')
suggested_filename = event.get('suggestedFilename', 'download')
try:
assert suggested_filename, 'CDP DownloadWillBegin missing suggestedFilename'
self._cdp_downloads_info[guid] = {
'url': url,
'suggested_filename': suggested_filename,
'handled': False,
}
except (AssertionError, KeyError):
pass
# Call direct callbacks first (for click handlers waiting for downloads)
download_info = {
'guid': guid,
'url': url,
'suggested_filename': suggested_filename,
'auto_download': False,
}
self.logger.debug(f'[DownloadsWatchdog] Calling {len(self._download_start_callbacks)} start callbacks')
for callback in self._download_start_callbacks:
try:
self.logger.debug(f'[DownloadsWatchdog] Calling start callback: {callback}')
callback(download_info)
except Exception as e:
self.logger.debug(f'[DownloadsWatchdog] Error in download start callback: {e}')
# Emit DownloadStartedEvent so other components can react
self.event_bus.dispatch(
DownloadStartedEvent(
guid=guid,
url=url,
suggested_filename=suggested_filename,
auto_download=False, # CDP-triggered downloads are user-initiated
)
)
# Create and track the task
task = create_task_with_error_handling(
self._handle_cdp_download(event, target_id, session_id),
name='handle_cdp_download',
logger_instance=self.logger,
suppress_exceptions=True,
)
self._cdp_event_tasks.add(task)
# Remove from set when done
task.add_done_callback(lambda t: self._cdp_event_tasks.discard(t))
def download_progress_handler(event: CDPDownloadProgressEvent, session_id: SessionID | None) -> None:
guid = event.get('guid', '')
state = event.get('state', '')
received_bytes = int(event.get('receivedBytes', 0))
total_bytes = int(event.get('totalBytes', 0))
# Call direct callbacks first (for click handlers tracking progress)
progress_info = {
'guid': guid,
'received_bytes': received_bytes,
'total_bytes': total_bytes,
'state': state,
}
for callback in self._download_progress_callbacks:
try:
callback(progress_info)
except Exception as e:
self.logger.debug(f'[DownloadsWatchdog] Error in download progress callback: {e}')
# Emit progress event for all states so listeners can track progress
from browser_use.browser.events import DownloadProgressEvent as DownloadProgressEventInternal
self.event_bus.dispatch(
DownloadProgressEventInternal(
guid=guid,
received_bytes=received_bytes,
total_bytes=total_bytes,
state=state,
)
)
# Check if download is complete
if state == 'completed':
file_path = event.get('filePath')
if self.browser_session.is_local:
if file_path:
self.logger.debug(f'[DownloadsWatchdog] Download completed: {file_path}')
# Track the download
self._track_download(file_path, guid=guid)
# Mark as handled to prevent fallback duplicate dispatch
try:
if guid in self._cdp_downloads_info:
self._cdp_downloads_info[guid]['handled'] = True
except (KeyError, AttributeError):
pass
else:
# No filePath provided - detect by comparing with initial snapshot
self.logger.debug('[DownloadsWatchdog] No filePath in progress event; detecting via filesystem')
downloads_path = self.browser_session.browser_profile.downloads_path
if downloads_path:
downloads_dir = Path(downloads_path).expanduser().resolve()
if downloads_dir.exists():
for f in downloads_dir.iterdir():
if (
f.is_file()
and not f.name.startswith('.')
and f.name not in self._initial_downloads_snapshot
):
# Check file has content before processing
if f.stat().st_size > 4:
# Found a new file! Add to snapshot immediately to prevent duplicate detection
self._initial_downloads_snapshot.add(f.name)
self.logger.debug(f'[DownloadsWatchdog] Detected new download: {f.name}')
self._track_download(str(f))
# Mark as handled
try:
if guid in self._cdp_downloads_info:
self._cdp_downloads_info[guid]['handled'] = True
except (KeyError, AttributeError):
pass
break
else:
# Remote browser: do not touch local filesystem. Fallback to downloadPath+suggestedFilename
info = self._cdp_downloads_info.get(guid, {})
try:
suggested_filename = info.get('suggested_filename') or (Path(file_path).name if file_path else 'download')
downloads_path = str(self.browser_session.browser_profile.downloads_path or '')
effective_path = file_path or str(Path(downloads_path) / suggested_filename)
file_name = Path(effective_path).name
file_ext = Path(file_name).suffix.lower().lstrip('.')
self.event_bus.dispatch(
FileDownloadedEvent(
guid=guid,
url=info.get('url', ''),
path=str(effective_path),
file_name=file_name,
file_size=0,
file_type=file_ext if file_ext else None,
)
)
self.logger.debug(f'[DownloadsWatchdog] ✅ (remote) Download completed: {effective_path}')
finally:
if guid in self._cdp_downloads_info:
del self._cdp_downloads_info[guid]
try:
downloads_path_raw = self.browser_session.browser_profile.downloads_path
if not downloads_path_raw:
# logger.info(f'[DownloadsWatchdog] No downloads path configured, skipping target: {target_id}')
return # No downloads path configured
# Check if we already have a download listener on this session
# to prevent duplicate listeners from being added
# Note: Since download listeners are set up once per browser session, not per target,
# we just track if we've set up the browser-level listener
if self._download_cdp_session_setup:
self.logger.debug('[DownloadsWatchdog] Download listener already set up for browser session')
return
# logger.debug(f'[DownloadsWatchdog] Setting up CDP download listener for target: {target_id}')
# Use CDP session for download events but store reference in watchdog
if not self._download_cdp_session_setup:
# Set up CDP session for downloads (only once per browser session)
cdp_client = self.browser_session.cdp_client
# Set download behavior to allow downloads and enable events
downloads_path = self.browser_session.browser_profile.downloads_path
if not downloads_path:
self.logger.warning('[DownloadsWatchdog] No downloads path configured, skipping CDP download setup')
return
# Ensure path is properly expanded (~ -> absolute path)
expanded_downloads_path = Path(downloads_path).expanduser().resolve()
await cdp_client.send.Browser.setDownloadBehavior(
params={
'behavior': 'allow',
'downloadPath': str(expanded_downloads_path), # Use expanded absolute path
'eventsEnabled': True,
}
)
# Register the handlers with CDP
cdp_client.register.Browser.downloadWillBegin(download_will_begin_handler) # type: ignore[arg-type]
cdp_client.register.Browser.downloadProgress(download_progress_handler) # type: ignore[arg-type]
self._download_cdp_session_setup = True
self.logger.debug('[DownloadsWatchdog] Set up CDP download listeners')
# No need to track individual targets since download listener is browser-level
# logger.debug(f'[DownloadsWatchdog] Successfully set up CDP download listener for target: {target_id}')
except Exception as e:
self.logger.warning(f'[DownloadsWatchdog] Failed to set up CDP download listener for target {target_id}: {e}')
# Set up network monitoring for this target (catches ALL download variants)
await self._setup_network_monitoring(target_id)
async def _setup_network_monitoring(self, target_id: TargetID) -> None:
"""Set up network monitoring to detect PDFs and downloads from ALL sources.
This catches:
- Direct PDF navigation
- PDFs in iframes
- PDFs with embed/object tags
- JavaScript-triggered downloads
- Any Content-Disposition: attachment headers
"""
# Skip if already monitoring this target
if target_id in self._network_monitored_targets:
self.logger.debug(f'[DownloadsWatchdog] Network monitoring already enabled for target {target_id[-4:]}')
return
# Check if auto-download is enabled
if not self._is_auto_download_enabled():
self.logger.debug('[DownloadsWatchdog] Auto-download disabled, skipping network monitoring')
return
try:
cdp_client = self.browser_session.cdp_client
# Register the global callback once
if not self._network_callback_registered:
def on_response_received(event: ResponseReceivedEvent, session_id: str | None) -> None:
"""Handle Network.responseReceived event to detect downloadable content.
This callback is registered globally and uses session_id to determine the correct target.
"""
try:
# Check if session_manager exists (may be None during browser shutdown)
if not self.browser_session.session_manager:
self.logger.warning('[DownloadsWatchdog] Session manager not found, skipping network monitoring')
return
# Look up target_id from session_id
event_target_id = self.browser_session.session_manager.get_target_id_from_session_id(session_id)
if not event_target_id:
# Session not in pool - might be a stale session or not yet tracked
return
# Only process events for targets we're monitoring
if event_target_id not in self._network_monitored_targets:
return
response = event.get('response', {})
url = response.get('url', '')
content_type = response.get('mimeType', '').lower()
headers = {
k.lower(): v for k, v in response.get('headers', {}).items()
} # Normalize for case-insensitive lookup
request_type = event.get('type', '')
# Skip non-HTTP URLs (data:, about:, chrome-extension:, etc.)
if not url.startswith('http'):
return
# Skip fetch/XHR - real browsers don't download PDFs from programmatic requests
if request_type in ('Fetch', 'XHR'):
return
# Check if it's a PDF
is_pdf = 'application/pdf' in content_type
# Check if it's marked as download via Content-Disposition header
content_disposition = str(headers.get('content-disposition', '')).lower()
is_download_attachment = 'attachment' in content_disposition
# Filter out image/video/audio files even if marked as attachment
# These are likely resources, not intentional downloads
unwanted_content_types = [
'image/',
'video/',
'audio/',
'text/css',
'text/javascript',
'application/javascript',
'application/x-javascript',
'text/html',
'application/json',
'font/',
'application/font',
'application/x-font',
]
is_unwanted_type = any(content_type.startswith(prefix) for prefix in unwanted_content_types)
if is_unwanted_type:
return
# Check URL extension to filter out obvious images/resources
url_lower = url.lower().split('?')[0] # Remove query params
unwanted_extensions = [
'.jpg',
'.jpeg',
'.png',
'.gif',
'.webp',
'.svg',
'.ico',
'.css',
'.js',
'.woff',
'.woff2',
'.ttf',
'.eot',
'.mp4',
'.webm',
'.mp3',
'.wav',
'.ogg',
]
if any(url_lower.endswith(ext) for ext in unwanted_extensions):
return
# Only process if it's a PDF or download
if not (is_pdf or is_download_attachment):
return
# If already downloaded this URL and file still exists, do nothing
existing_path = self._session_pdf_urls.get(url)
if existing_path:
if os.path.exists(existing_path):
return
# Stale cache entry, allow re-download
del self._session_pdf_urls[url]
# Check if we've already processed this URL in this session
if url in self._detected_downloads:
self.logger.debug(f'[DownloadsWatchdog] Already detected download: {url[:80]}...')
return
# Mark as detected to avoid duplicates
self._detected_downloads.add(url)
# Extract filename from Content-Disposition if available
suggested_filename = None
if 'filename=' in content_disposition:
# Parse filename from Content-Disposition header
import re
filename_match = re.search(r'filename[^;=\n]*=(([\'"]).*?\2|[^;\n]*)', content_disposition)
if filename_match:
suggested_filename = filename_match.group(1).strip('\'"')
self.logger.info(f'[DownloadsWatchdog] 🔍 Detected downloadable content via network: {url[:80]}...')
self.logger.debug(
f'[DownloadsWatchdog] Content-Type: {content_type}, Is PDF: {is_pdf}, Is Attachment: {is_download_attachment}'
)
# Trigger download asynchronously in background (don't block event handler)
async def download_in_background():
# Don't permanently block re-processing this URL if download fails
try:
download_path = await self.download_file_from_url(
url=url,
target_id=event_target_id, # Use target_id from session_id lookup
content_type=content_type,
suggested_filename=suggested_filename,
)
if download_path:
self.logger.info(f'[DownloadsWatchdog] ✅ Successfully downloaded: {download_path}')
else:
self.logger.warning(f'[DownloadsWatchdog] ⚠️ Failed to download: {url[:80]}...')
except Exception as e:
self.logger.error(f'[DownloadsWatchdog] Error downloading in background: {type(e).__name__}: {e}')
finally:
# Allow future detections of the same URL
self._detected_downloads.discard(url)
# Create background task
task = create_task_with_error_handling(
download_in_background(),
name='download_in_background',
logger_instance=self.logger,
suppress_exceptions=True,
)
self._cdp_event_tasks.add(task)
task.add_done_callback(lambda t: self._cdp_event_tasks.discard(t))
except Exception as e:
self.logger.error(f'[DownloadsWatchdog] Error in network response handler: {type(e).__name__}: {e}')
# Register the callback globally (once)
cdp_client.register.Network.responseReceived(on_response_received)
self._network_callback_registered = True
self.logger.debug('[DownloadsWatchdog] ✅ Registered global network response callback')
# Get or create CDP session for this target
cdp_session = await self.browser_session.get_or_create_cdp_session(target_id, focus=False)
# Enable Network domain to monitor HTTP responses (per-target/per-session)
await cdp_client.send.Network.enable(session_id=cdp_session.session_id)
self.logger.debug(f'[DownloadsWatchdog] Enabled Network domain for target {target_id[-4:]}')
# Mark this target as monitored
self._network_monitored_targets.add(target_id)
self.logger.debug(f'[DownloadsWatchdog] ✅ Network monitoring enabled for target {target_id[-4:]}')
except Exception as e:
self.logger.warning(f'[DownloadsWatchdog] Failed to set up network monitoring for target {target_id}: {e}')
async def download_file_from_url(
self, url: str, target_id: TargetID, content_type: str | None = None, suggested_filename: str | None = None
) -> str | None:
"""Generic method to download any file from a URL.
Args:
url: The URL to download
target_id: The target ID for CDP session
content_type: Optional content type (e.g., 'application/pdf')
suggested_filename: Optional filename from Content-Disposition header
Returns:
Path to downloaded file, or None if download failed
"""
if not self.browser_session.browser_profile.downloads_path:
self.logger.warning('[DownloadsWatchdog] No downloads path configured')
return None
# Check if already downloaded in this session
if url in self._session_pdf_urls:
existing_path = self._session_pdf_urls[url]
if os.path.exists(existing_path):
self.logger.debug(f'[DownloadsWatchdog] File already downloaded in session: {existing_path}')
return existing_path
# Stale cache entry: the file was removed/cleaned up after we cached it.
self.logger.debug(f'[DownloadsWatchdog] Cached download path no longer exists, re-downloading: {existing_path}')
del self._session_pdf_urls[url]
try:
# Get or create CDP session for this target
temp_session = await self.browser_session.get_or_create_cdp_session(target_id, focus=False)
# Determine filename
if suggested_filename:
filename = suggested_filename
else:
# Extract from URL
filename = os.path.basename(url.split('?')[0]) # Remove query params
if not filename or '.' not in filename:
# Fallback: use content type to determine extension
if content_type and 'pdf' in content_type:
filename = 'document.pdf'
else:
filename = 'download'
# Ensure downloads directory exists
downloads_dir = str(self.browser_session.browser_profile.downloads_path)
os.makedirs(downloads_dir, exist_ok=True)
# Generate unique filename if file exists
final_filename = filename
existing_files = os.listdir(downloads_dir)
if filename in existing_files:
base, ext = os.path.splitext(filename)
counter = 1
while f'{base} ({counter}){ext}' in existing_files:
counter += 1
final_filename = f'{base} ({counter}){ext}'
self.logger.debug(f'[DownloadsWatchdog] File exists, using: {final_filename}')
self.logger.debug(f'[DownloadsWatchdog] Downloading from: {url[:100]}...')
# Download using JavaScript fetch to leverage browser cache
escaped_url = json.dumps(url)
result = await asyncio.wait_for(
temp_session.cdp_client.send.Runtime.evaluate(
params={
'expression': f"""
(async () => {{
try {{
const response = await fetch({escaped_url}, {{
cache: 'force-cache'
}});
if (!response.ok) {{
throw new Error(`HTTP error! status: ${{response.status}}`);
}}
const blob = await response.blob();
const arrayBuffer = await blob.arrayBuffer();
const uint8Array = new Uint8Array(arrayBuffer);
return {{
data: Array.from(uint8Array),
responseSize: uint8Array.length
}};
}} catch (error) {{
throw new Error(`Fetch failed: ${{error.message}}`);
}}
}})()
""",
'awaitPromise': True,
'returnByValue': True,
},
session_id=temp_session.session_id,
),
timeout=15.0, # 15 second timeout
)
download_result = result.get('result', {}).get('value', {})
if download_result and download_result.get('data') and len(download_result['data']) > 0:
download_path = os.path.join(downloads_dir, final_filename)
# Save the file asynchronously
async with await anyio.open_file(download_path, 'wb') as f:
await f.write(bytes(download_result['data']))
# Verify file was written successfully
if os.path.exists(download_path):
actual_size = os.path.getsize(download_path)
self.logger.debug(f'[DownloadsWatchdog] File written: {download_path} ({actual_size} bytes)')
# Determine file type
file_ext = Path(final_filename).suffix.lower().lstrip('.')
mime_type = content_type or f'application/{file_ext}'
# Store URL->path mapping for this session
self._session_pdf_urls[url] = download_path
# Emit file downloaded event
self.logger.debug(f'[DownloadsWatchdog] Dispatching FileDownloadedEvent for {final_filename}')
self.event_bus.dispatch(
FileDownloadedEvent(
url=url,
path=download_path,
file_name=final_filename,
file_size=actual_size,
file_type=file_ext if file_ext else None,
mime_type=mime_type,
auto_download=True,
)
)
return download_path
else:
self.logger.error(f'[DownloadsWatchdog] Failed to write file: {download_path}')
return None
else:
self.logger.warning(f'[DownloadsWatchdog] No data received when downloading from {url}')
return None
except TimeoutError:
self.logger.warning(f'[DownloadsWatchdog] Download timed out: {url[:80]}...')
return None
except Exception as e:
self.logger.warning(f'[DownloadsWatchdog] Download failed: {type(e).__name__}: {e}')
return None
def _track_download(self, file_path: str, guid: str | None = None) -> None:
"""Track a completed download and dispatch the appropriate event.
Args:
file_path: The path to the downloaded file
guid: Optional CDP download GUID for correlation with DownloadStartedEvent
"""
try:
# Get file info
path = Path(file_path)
if path.exists():
file_size = path.stat().st_size
self.logger.debug(f'[DownloadsWatchdog] Tracked download: {path.name} ({file_size} bytes)')
# Get file extension for file_type
file_ext = path.suffix.lower().lstrip('.')
# Call direct callbacks first (for click handlers waiting for downloads)
complete_info = {
'guid': guid,
'url': str(path),
'path': str(path),
'file_name': path.name,
'file_size': file_size,
'file_type': file_ext if file_ext else None,
'auto_download': False,
}
for callback in self._download_complete_callbacks:
try:
callback(complete_info)
except Exception as e:
self.logger.debug(f'[DownloadsWatchdog] Error in download complete callback: {e}')
# Dispatch download event
from browser_use.browser.events import FileDownloadedEvent
self.event_bus.dispatch(
FileDownloadedEvent(
guid=guid,
url=str(path), # Use the file path as URL for local files
path=str(path),
file_name=path.name,
file_size=file_size,
)
)
else:
self.logger.warning(f'[DownloadsWatchdog] Downloaded file not found: {file_path}')
except Exception as e:
self.logger.error(f'[DownloadsWatchdog] Error tracking download: {e}')
async def _handle_cdp_download(
self, event: DownloadWillBeginEvent, target_id: TargetID, session_id: SessionID | None
) -> None:
"""Handle a CDP Page.downloadWillBegin event."""
downloads_dir = (
Path(
self.browser_session.browser_profile.downloads_path
or f'{tempfile.gettempdir()}/browser_use_downloads.{str(self.browser_session.id)[-4:]}'
)
.expanduser()
.resolve()
) # Ensure path is properly expanded
# Initialize variables that may be used outside try blocks
unique_filename = None
file_size = 0
expected_path = None
download_result = None
download_url = event.get('url', '')
suggested_filename = event.get('suggestedFilename', 'download')
guid = event.get('guid', '')
try:
self.logger.debug(f'[DownloadsWatchdog] ⬇️ File download starting: {suggested_filename} from {download_url[:100]}...')
self.logger.debug(f'[DownloadsWatchdog] Full CDP event: {event}')
# Since Browser.setDownloadBehavior is already configured, the browser will download the file
# We just need to wait for it to appear in the downloads directory
expected_path = downloads_dir / suggested_filename
# For remote browsers, don't poll local filesystem; downloadProgress handler will emit the event
if not self.browser_session.is_local:
return
except Exception as e:
self.logger.error(f'[DownloadsWatchdog] ❌ Error handling CDP download: {type(e).__name__} {e}')
# If we reach here, the fetch method failed, so wait for native download
# Poll the downloads directory for new files
self.logger.debug(f'[DownloadsWatchdog] Checking if browser auto-download saved the file for us: {suggested_filename}')
# Poll for new files
max_wait = 20 # seconds
start_time = asyncio.get_event_loop().time()
while asyncio.get_event_loop().time() - start_time < max_wait: # noqa: ASYNC110
await asyncio.sleep(5.0) # Check every 5 seconds
if Path(downloads_dir).exists():
for file_path in Path(downloads_dir).iterdir():
# Skip hidden files and files that were already there
if (
file_path.is_file()
and not file_path.name.startswith('.')
and file_path.name not in self._initial_downloads_snapshot
):
# Add to snapshot immediately to prevent duplicate detection
self._initial_downloads_snapshot.add(file_path.name)
# Check if file has content (> 4 bytes)
try:
file_size = file_path.stat().st_size
if file_size > 4:
# Found a new download!
self.logger.debug(
f'[DownloadsWatchdog] ✅ Found downloaded file: {file_path} ({file_size} bytes)'
)
# Determine file type from extension
file_ext = file_path.suffix.lower().lstrip('.')
file_type = file_ext if file_ext else None
# Dispatch download event
# Skip if already handled by progress/JS fetch
info = self._cdp_downloads_info.get(guid, {})
if info.get('handled'):
return
self.event_bus.dispatch(
FileDownloadedEvent(
guid=guid,
url=download_url,
path=str(file_path),
file_name=file_path.name,
file_size=file_size,
file_type=file_type,
)
)
# Mark as handled after dispatch
try:
if guid in self._cdp_downloads_info:
self._cdp_downloads_info[guid]['handled'] = True
except (KeyError, AttributeError):
pass
return
except Exception as e:
self.logger.debug(f'[DownloadsWatchdog] Error checking file {file_path}: {e}')
self.logger.warning(f'[DownloadsWatchdog] Download did not complete within {max_wait} seconds')
async def _handle_download(self, download: Any) -> None:
"""Handle a download event."""
download_id = f'{id(download)}'
self._active_downloads[download_id] = download
self.logger.debug(f'[DownloadsWatchdog] ⬇️ Handling download: {download.suggested_filename} from {download.url[:100]}...')
# Debug: Check if download is already being handled elsewhere
failure = (
await download.failure()
) # TODO: it always fails for some reason, figure out why connect_over_cdp makes accept_downloads not work
self.logger.warning(f'[DownloadsWatchdog] ❌ Download state - canceled: {failure}, url: {download.url}')
# logger.info(f'[DownloadsWatchdog] Active downloads count: {len(self._active_downloads)}')
try:
current_step = 'getting_download_info'
# Get download info immediately
url = download.url
suggested_filename = download.suggested_filename
current_step = 'determining_download_directory'
# Determine download directory from browser profile
downloads_dir = self.browser_session.browser_profile.downloads_path
if not downloads_dir:
downloads_dir = str(Path.home() / 'Downloads')
else:
downloads_dir = str(downloads_dir) # Ensure it's a string
# Check if Playwright already auto-downloaded the file (due to CDP setup)
original_path = Path(downloads_dir) / suggested_filename
if original_path.exists() and original_path.stat().st_size > 0:
self.logger.debug(
f'[DownloadsWatchdog] File already downloaded by Playwright: {original_path} ({original_path.stat().st_size} bytes)'
)
# Use the existing file instead of creating a duplicate
download_path = original_path
file_size = original_path.stat().st_size
unique_filename = suggested_filename
else:
current_step = 'generating_unique_filename'
# Ensure unique filename
unique_filename = await self._get_unique_filename(downloads_dir, suggested_filename)
download_path = Path(downloads_dir) / unique_filename
self.logger.debug(f'[DownloadsWatchdog] Download started: {unique_filename} from {url[:100]}...')
current_step = 'calling_save_as'
# Save the download using Playwright's save_as method
self.logger.debug(f'[DownloadsWatchdog] Saving download to: {download_path}')
self.logger.debug(f'[DownloadsWatchdog] Download path exists: {download_path.parent.exists()}')
self.logger.debug(f'[DownloadsWatchdog] Download path writable: {os.access(download_path.parent, os.W_OK)}')
try:
self.logger.debug('[DownloadsWatchdog] About to call download.save_as()...')
await download.save_as(str(download_path))
self.logger.debug(f'[DownloadsWatchdog] Successfully saved download to: {download_path}')
current_step = 'save_as_completed'
except Exception as save_error:
self.logger.error(f'[DownloadsWatchdog] save_as() failed with error: {save_error}')
raise save_error
# Get file info
file_size = download_path.stat().st_size if download_path.exists() else 0
# Determine file type from extension
file_ext = download_path.suffix.lower().lstrip('.')
file_type = file_ext if file_ext else None
# Try to get MIME type from response headers if available
mime_type = None
# Note: Playwright doesn't expose response headers directly from Download object
# Check if this was a PDF auto-download
auto_download = False
if file_type == 'pdf':
auto_download = self._is_auto_download_enabled()
# Emit download event
self.event_bus.dispatch(
FileDownloadedEvent(
url=url,
path=str(download_path),
file_name=suggested_filename,
file_size=file_size,
file_type=file_type,
mime_type=mime_type,
from_cache=False,
auto_download=auto_download,
)
)
self.logger.debug(
f'[DownloadsWatchdog] ✅ Download completed: {suggested_filename} ({file_size} bytes) saved to {download_path}'
)
# File is now tracked on filesystem, no need to track in memory
except Exception as e:
self.logger.error(
f'[DownloadsWatchdog] Error handling download at step "{locals().get("current_step", "unknown")}", error: {e}'
)
self.logger.error(
f'[DownloadsWatchdog] Download state - URL: {download.url}, filename: {download.suggested_filename}'
)
finally:
# Clean up tracking
if download_id in self._active_downloads:
del self._active_downloads[download_id]
async def check_for_pdf_viewer(self, target_id: TargetID) -> bool:
"""Check if the current target is a PDF using network-based detection.
This method avoids JavaScript execution that can crash WebSocket connections.
Returns True if a PDF is detected and should be downloaded.
"""
self.logger.debug(f'[DownloadsWatchdog] Checking if target {target_id} is PDF viewer...')
# Use safe API - focus=False to avoid changing focus during PDF check
try:
session = await self.browser_session.get_or_create_cdp_session(target_id, focus=False)
except ValueError as e:
self.logger.warning(f'[DownloadsWatchdog] No session found for {target_id}: {e}')
return False
# Get URL from target
target = self.browser_session.session_manager.get_target(target_id)
if not target:
self.logger.warning(f'[DownloadsWatchdog] No target found for {target_id}')
return False
page_url = target.url
# Check cache first
if page_url in self._pdf_viewer_cache:
cached_result = self._pdf_viewer_cache[page_url]
self.logger.debug(f'[DownloadsWatchdog] Using cached PDF check result for {page_url}: {cached_result}')
return cached_result
try:
# Method 1: Check URL patterns (fastest, most reliable)
url_is_pdf = self._check_url_for_pdf(page_url)
if url_is_pdf:
self.logger.debug(f'[DownloadsWatchdog] PDF detected via URL pattern: {page_url}')
self._pdf_viewer_cache[page_url] = True
return True
chrome_pdf_viewer = self._is_chrome_pdf_viewer_url(page_url)
if chrome_pdf_viewer:
self.logger.debug(f'[DownloadsWatchdog] Chrome PDF viewer detected: {page_url}')
self._pdf_viewer_cache[page_url] = True
return True
# Not a PDF
self._pdf_viewer_cache[page_url] = False
return False
except Exception as e:
self.logger.warning(f'[DownloadsWatchdog] ❌ Error checking for PDF viewer: {e}')
self._pdf_viewer_cache[page_url] = False
return False
def _check_url_for_pdf(self, url: str) -> bool:
"""Check if URL indicates a PDF file."""
if not url:
return False
url_lower = url.lower()
# Direct PDF file extensions
if url_lower.endswith('.pdf'):
return True
# PDF in path
if '.pdf' in url_lower:
return True
# PDF MIME type in URL parameters
if any(
param in url_lower
for param in [
'content-type=application/pdf',
'content-type=application%2fpdf',
'mimetype=application/pdf',
'type=application/pdf',
]
):
return True
return False
def _is_chrome_pdf_viewer_url(self, url: str) -> bool:
"""Check if this is Chrome's internal PDF viewer URL."""
if not url:
return False
url_lower = url.lower()
# Chrome PDF viewer uses chrome-extension:// URLs
if 'chrome-extension://' in url_lower and 'pdf' in url_lower:
return True
# Chrome PDF viewer internal URLs
if url_lower.startswith('chrome://') and 'pdf' in url_lower:
return True
return False
async def _check_network_headers_for_pdf(self, target_id: TargetID) -> bool:
"""Infer PDF via navigation history/URL; headers are not available post-navigation in this context."""
try:
import asyncio
# Get CDP session
temp_session = await self.browser_session.get_or_create_cdp_session(target_id, focus=False)
# Get navigation history to find the main resource
history = await asyncio.wait_for(
temp_session.cdp_client.send.Page.getNavigationHistory(session_id=temp_session.session_id), timeout=3.0
)
current_entry = history.get('entries', [])
if current_entry:
current_index = history.get('currentIndex', 0)
if 0 <= current_index < len(current_entry):
current_url = current_entry[current_index].get('url', '')
# Check if the URL itself suggests PDF
if self._check_url_for_pdf(current_url):
return True
# Note: CDP doesn't easily expose response headers for completed navigations
# For more complex cases, we'd need to set up Network.responseReceived listeners
# before navigation, but that's overkill for most PDF detection cases
return False
except Exception as e:
self.logger.debug(f'[DownloadsWatchdog] Network headers check failed (non-critical): {e}')
return False
async def trigger_pdf_download(self, target_id: TargetID) -> str | None:
"""Trigger download of a PDF from Chrome's PDF viewer.
Returns the download path if successful, None otherwise.
"""
self.logger.debug(f'[DownloadsWatchdog] trigger_pdf_download called for target_id={target_id}')
if not self.browser_session.browser_profile.downloads_path:
self.logger.warning('[DownloadsWatchdog] ❌ No downloads path configured, cannot save PDF download')
return None
downloads_path = self.browser_session.browser_profile.downloads_path
self.logger.debug(f'[DownloadsWatchdog] Downloads path: {downloads_path}')
try:
# Create a temporary CDP session for this target without switching focus
import asyncio
self.logger.debug(f'[DownloadsWatchdog] Creating CDP session for PDF download from target {target_id}')
temp_session = await self.browser_session.get_or_create_cdp_session(target_id, focus=False)
# Try to get the PDF URL with timeout
result = await asyncio.wait_for(
temp_session.cdp_client.send.Runtime.evaluate(
params={
'expression': """
(() => {
// For Chrome's PDF viewer, the actual URL is in window.location.href
// The embed element's src is often "about:blank"
const embedElement = document.querySelector('embed[type="application/x-google-chrome-pdf"]') ||
document.querySelector('embed[type="application/pdf"]');
if (embedElement) {
// Chrome PDF viewer detected - use the page URL
return { url: window.location.href };
}
// Fallback to window.location.href anyway
return { url: window.location.href };
})()
""",
'returnByValue': True,
},
session_id=temp_session.session_id,
),
timeout=5.0, # 5 second timeout to prevent hanging
)
pdf_info = result.get('result', {}).get('value', {})
pdf_url = pdf_info.get('url', '')
if not pdf_url:
self.logger.warning(f'[DownloadsWatchdog] ❌ Could not determine PDF URL for download {pdf_info}')
return None
# Generate filename from URL
pdf_filename = os.path.basename(pdf_url.split('?')[0]) # Remove query params
if not pdf_filename or not pdf_filename.endswith('.pdf'):
parsed = urlparse(pdf_url)
pdf_filename = os.path.basename(parsed.path) or 'document.pdf'
if not pdf_filename.endswith('.pdf'):
pdf_filename += '.pdf'
self.logger.debug(f'[DownloadsWatchdog] Generated filename: {pdf_filename}')
# Check if already downloaded in this session
self.logger.debug(f'[DownloadsWatchdog] PDF_URL: {pdf_url}, session_pdf_urls: {self._session_pdf_urls}')
if pdf_url in self._session_pdf_urls:
existing_path = self._session_pdf_urls[pdf_url]
self.logger.debug(f'[DownloadsWatchdog] PDF already downloaded in session: {existing_path}')
return existing_path
# Generate unique filename if file exists from previous run
downloads_dir = str(self.browser_session.browser_profile.downloads_path)
os.makedirs(downloads_dir, exist_ok=True)
final_filename = pdf_filename
existing_files = os.listdir(downloads_dir)
if pdf_filename in existing_files:
# Generate unique name with (1), (2), etc.
base, ext = os.path.splitext(pdf_filename)
counter = 1
while f'{base} ({counter}){ext}' in existing_files:
counter += 1
final_filename = f'{base} ({counter}){ext}'
self.logger.debug(f'[DownloadsWatchdog] File exists, using: {final_filename}')
self.logger.debug(f'[DownloadsWatchdog] Starting PDF download from: {pdf_url[:100]}...')
# Download using JavaScript fetch to leverage browser cache
try:
# Properly escape the URL to prevent JavaScript injection
escaped_pdf_url = json.dumps(pdf_url)
result = await asyncio.wait_for(
temp_session.cdp_client.send.Runtime.evaluate(
params={
'expression': f"""
(async () => {{
try {{
// Use fetch with cache: 'force-cache' to prioritize cached version
const response = await fetch({escaped_pdf_url}, {{
cache: 'force-cache'
}});
if (!response.ok) {{
throw new Error(`HTTP error! status: ${{response.status}}`);
}}
const blob = await response.blob();
const arrayBuffer = await blob.arrayBuffer();
const uint8Array = new Uint8Array(arrayBuffer);
// Check if served from cache
const fromCache = response.headers.has('age') ||
!response.headers.has('date');
return {{
data: Array.from(uint8Array),
fromCache: fromCache,
responseSize: uint8Array.length,
transferSize: response.headers.get('content-length') || 'unknown'
}};
}} catch (error) {{
throw new Error(`Fetch failed: ${{error.message}}`);
}}
}})()
""",
'awaitPromise': True,
'returnByValue': True,
},
session_id=temp_session.session_id,
),
timeout=10.0, # 10 second timeout for download operation
)
download_result = result.get('result', {}).get('value', {})
if download_result and download_result.get('data') and len(download_result['data']) > 0:
# Ensure downloads directory exists
downloads_dir = str(self.browser_session.browser_profile.downloads_path)
os.makedirs(downloads_dir, exist_ok=True)
download_path = os.path.join(downloads_dir, final_filename)
# Save the PDF asynchronously
async with await anyio.open_file(download_path, 'wb') as f:
await f.write(bytes(download_result['data']))
# Verify file was written successfully
if os.path.exists(download_path):
actual_size = os.path.getsize(download_path)
self.logger.debug(
f'[DownloadsWatchdog] PDF file written successfully: {download_path} ({actual_size} bytes)'
)
else:
self.logger.error(f'[DownloadsWatchdog] ❌ Failed to write PDF file to: {download_path}')
return None
# Log cache information
cache_status = 'from cache' if download_result.get('fromCache') else 'from network'
response_size = download_result.get('responseSize', 0)
self.logger.debug(
f'[DownloadsWatchdog] ✅ Auto-downloaded PDF ({cache_status}, {response_size:,} bytes): {download_path}'
)
# Store URL->path mapping for this session
self._session_pdf_urls[pdf_url] = download_path
# Emit file downloaded event
self.logger.debug(f'[DownloadsWatchdog] Dispatching FileDownloadedEvent for {final_filename}')
self.event_bus.dispatch(
FileDownloadedEvent(
url=pdf_url,
path=download_path,
file_name=final_filename,
file_size=response_size,
file_type='pdf',
mime_type='application/pdf',
from_cache=download_result.get('fromCache', False),
auto_download=True,
)
)
# No need to detach - session is cached
return download_path
else:
self.logger.warning(f'[DownloadsWatchdog] No data received when downloading PDF from {pdf_url}')
return None
except Exception as e:
self.logger.warning(f'[DownloadsWatchdog] Failed to auto-download PDF from {pdf_url}: {type(e).__name__}: {e}')
return None
except TimeoutError:
self.logger.debug('[DownloadsWatchdog] PDF download operation timed out')
return None
except Exception as e:
self.logger.error(f'[DownloadsWatchdog] Error in PDF download: {type(e).__name__}: {e}')
return None
@staticmethod
async def _get_unique_filename(directory: str, filename: str) -> str:
"""Generate a unique filename for downloads by appending (1), (2), etc., if a file already exists."""
base, ext = os.path.splitext(filename)
counter = 1
new_filename = filename
while os.path.exists(os.path.join(directory, new_filename)):
new_filename = f'{base} ({counter}){ext}'
counter += 1
return new_filename
# Fix Pydantic circular dependency - this will be called from session.py after BrowserSession is defined
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/browser/watchdogs/downloads_watchdog.py",
"license": "MIT License",
"lines": 1183,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/browser/watchdogs/local_browser_watchdog.py | """Local browser watchdog for managing browser subprocess lifecycle."""
from __future__ import annotations
import asyncio
import os
import shutil
import tempfile
from pathlib import Path
from typing import TYPE_CHECKING, Any, ClassVar
import psutil
from bubus import BaseEvent
from pydantic import PrivateAttr
from browser_use.browser.events import (
BrowserKillEvent,
BrowserLaunchEvent,
BrowserLaunchResult,
BrowserStopEvent,
)
from browser_use.browser.watchdog_base import BaseWatchdog
from browser_use.observability import observe_debug
if TYPE_CHECKING:
from browser_use.browser.profile import BrowserChannel
class LocalBrowserWatchdog(BaseWatchdog):
"""Manages local browser subprocess lifecycle."""
# Events this watchdog listens to
LISTENS_TO: ClassVar[list[type[BaseEvent[Any]]]] = [
BrowserLaunchEvent,
BrowserKillEvent,
BrowserStopEvent,
]
# Events this watchdog emits
EMITS: ClassVar[list[type[BaseEvent[Any]]]] = []
# Private state for subprocess management
_subprocess: psutil.Process | None = PrivateAttr(default=None)
_owns_browser_resources: bool = PrivateAttr(default=True)
_temp_dirs_to_cleanup: list[Path] = PrivateAttr(default_factory=list)
_original_user_data_dir: str | None = PrivateAttr(default=None)
@observe_debug(ignore_input=True, ignore_output=True, name='browser_launch_event')
async def on_BrowserLaunchEvent(self, event: BrowserLaunchEvent) -> BrowserLaunchResult:
"""Launch a local browser process."""
try:
self.logger.debug('[LocalBrowserWatchdog] Received BrowserLaunchEvent, launching local browser...')
# self.logger.debug('[LocalBrowserWatchdog] Calling _launch_browser...')
process, cdp_url = await self._launch_browser()
self._subprocess = process
# self.logger.debug(f'[LocalBrowserWatchdog] _launch_browser returned: process={process}, cdp_url={cdp_url}')
return BrowserLaunchResult(cdp_url=cdp_url)
except Exception as e:
self.logger.error(f'[LocalBrowserWatchdog] Exception in on_BrowserLaunchEvent: {e}', exc_info=True)
raise
async def on_BrowserKillEvent(self, event: BrowserKillEvent) -> None:
"""Kill the local browser subprocess."""
self.logger.debug('[LocalBrowserWatchdog] Killing local browser process')
if self._subprocess:
await self._cleanup_process(self._subprocess)
self._subprocess = None
# Clean up temp directories if any were created
for temp_dir in self._temp_dirs_to_cleanup:
self._cleanup_temp_dir(temp_dir)
self._temp_dirs_to_cleanup.clear()
# Restore original user_data_dir if it was modified
if self._original_user_data_dir is not None:
self.browser_session.browser_profile.user_data_dir = self._original_user_data_dir
self._original_user_data_dir = None
self.logger.debug('[LocalBrowserWatchdog] Browser cleanup completed')
async def on_BrowserStopEvent(self, event: BrowserStopEvent) -> None:
"""Listen for BrowserStopEvent and dispatch BrowserKillEvent without awaiting it."""
if self.browser_session.is_local and self._subprocess:
self.logger.debug('[LocalBrowserWatchdog] BrowserStopEvent received, dispatching BrowserKillEvent')
# Dispatch BrowserKillEvent without awaiting so it gets processed after all BrowserStopEvent handlers
self.event_bus.dispatch(BrowserKillEvent())
@observe_debug(ignore_input=True, ignore_output=True, name='launch_browser_process')
async def _launch_browser(self, max_retries: int = 3) -> tuple[psutil.Process, str]:
"""Launch browser process and return (process, cdp_url).
Handles launch errors by falling back to temporary directories if needed.
Returns:
Tuple of (psutil.Process, cdp_url)
"""
# Keep track of original user_data_dir to restore if needed
profile = self.browser_session.browser_profile
self._original_user_data_dir = str(profile.user_data_dir) if profile.user_data_dir else None
self._temp_dirs_to_cleanup = []
for attempt in range(max_retries):
try:
# Get launch args from profile
launch_args = profile.get_args()
# Add debugging port
debug_port = self._find_free_port()
launch_args.extend(
[
f'--remote-debugging-port={debug_port}',
]
)
assert '--user-data-dir' in str(launch_args), (
'User data dir must be set somewhere in launch args to a non-default path, otherwise Chrome will not let us attach via CDP'
)
# Get browser executable
# Priority: custom executable > fallback paths > playwright subprocess
if profile.executable_path:
browser_path = profile.executable_path
self.logger.debug(f'[LocalBrowserWatchdog] 📦 Using custom local browser executable_path= {browser_path}')
else:
# self.logger.debug('[LocalBrowserWatchdog] 🔍 Looking for local browser binary path...')
# Try fallback paths first (system browsers preferred)
browser_path = self._find_installed_browser_path(channel=profile.channel)
if not browser_path:
self.logger.error(
'[LocalBrowserWatchdog] ⚠️ No local browser binary found, installing browser using playwright subprocess...'
)
browser_path = await self._install_browser_with_playwright()
self.logger.debug(f'[LocalBrowserWatchdog] 📦 Found local browser installed at executable_path= {browser_path}')
if not browser_path:
raise RuntimeError('No local Chrome/Chromium install found, and failed to install with playwright')
# Launch browser subprocess directly
self.logger.debug(f'[LocalBrowserWatchdog] 🚀 Launching browser subprocess with {len(launch_args)} args...')
self.logger.debug(
f'[LocalBrowserWatchdog] 📂 user_data_dir={profile.user_data_dir}, profile_directory={profile.profile_directory}'
)
subprocess = await asyncio.create_subprocess_exec(
browser_path,
*launch_args,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
self.logger.debug(
f'[LocalBrowserWatchdog] 🎭 Browser running with browser_pid= {subprocess.pid} 🔗 listening on CDP port :{debug_port}'
)
# Convert to psutil.Process
process = psutil.Process(subprocess.pid)
# Wait for CDP to be ready and get the URL
cdp_url = await self._wait_for_cdp_url(debug_port)
# Success! Clean up only the temp dirs we created but didn't use
currently_used_dir = str(profile.user_data_dir)
unused_temp_dirs = [tmp_dir for tmp_dir in self._temp_dirs_to_cleanup if str(tmp_dir) != currently_used_dir]
for tmp_dir in unused_temp_dirs:
try:
shutil.rmtree(tmp_dir, ignore_errors=True)
except Exception:
pass
# Keep only the in-use directory for cleanup during browser kill
if currently_used_dir and 'browseruse-tmp-' in currently_used_dir:
self._temp_dirs_to_cleanup = [Path(currently_used_dir)]
else:
self._temp_dirs_to_cleanup = []
return process, cdp_url
except Exception as e:
error_str = str(e).lower()
# Check if this is a user_data_dir related error
if any(err in error_str for err in ['singletonlock', 'user data directory', 'cannot create', 'already in use']):
self.logger.warning(f'Browser launch failed (attempt {attempt + 1}/{max_retries}): {e}')
if attempt < max_retries - 1:
# Create a temporary directory for next attempt
tmp_dir = Path(tempfile.mkdtemp(prefix='browseruse-tmp-'))
self._temp_dirs_to_cleanup.append(tmp_dir)
# Update profile to use temp directory
profile.user_data_dir = str(tmp_dir)
self.logger.debug(f'Retrying with temporary user_data_dir: {tmp_dir}')
# Small delay before retry
await asyncio.sleep(0.5)
continue
# Not a recoverable error or last attempt failed
# Restore original user_data_dir before raising
if self._original_user_data_dir is not None:
profile.user_data_dir = self._original_user_data_dir
# Clean up any temp dirs we created
for tmp_dir in self._temp_dirs_to_cleanup:
try:
shutil.rmtree(tmp_dir, ignore_errors=True)
except Exception:
pass
raise
# Should not reach here, but just in case
if self._original_user_data_dir is not None:
profile.user_data_dir = self._original_user_data_dir
raise RuntimeError(f'Failed to launch browser after {max_retries} attempts')
@staticmethod
def _find_installed_browser_path(channel: BrowserChannel | None = None) -> str | None:
"""Try to find browser executable from common fallback locations.
If a channel is specified, paths for that browser are searched first.
Falls back to all known browser paths if the channel-specific search fails.
Prioritizes:
1. Channel-specific paths (if channel is set)
2. System Chrome stable
3. Playwright chromium
4. Other system native browsers (Chromium -> Chrome Canary/Dev -> Brave -> Edge)
5. Playwright headless-shell fallback
Returns:
Path to browser executable or None if not found
"""
import glob
import platform
from pathlib import Path
from browser_use.browser.profile import BROWSERUSE_DEFAULT_CHANNEL, BrowserChannel
system = platform.system()
# Get playwright browsers path from environment variable if set
playwright_path = os.environ.get('PLAYWRIGHT_BROWSERS_PATH')
# Build tagged pattern lists per OS: (browser_group, path)
# browser_group is used to match against the requested channel
if system == 'Darwin': # macOS
if not playwright_path:
playwright_path = '~/Library/Caches/ms-playwright'
all_patterns = [
('chrome', '/Applications/Google Chrome.app/Contents/MacOS/Google Chrome'),
('chromium', f'{playwright_path}/chromium-*/chrome-mac/Chromium.app/Contents/MacOS/Chromium'),
('chromium', '/Applications/Chromium.app/Contents/MacOS/Chromium'),
('chrome-canary', '/Applications/Google Chrome Canary.app/Contents/MacOS/Google Chrome Canary'),
('brave', '/Applications/Brave Browser.app/Contents/MacOS/Brave Browser'),
('msedge', '/Applications/Microsoft Edge.app/Contents/MacOS/Microsoft Edge'),
('chromium', f'{playwright_path}/chromium_headless_shell-*/chrome-mac/Chromium.app/Contents/MacOS/Chromium'),
]
elif system == 'Linux':
if not playwright_path:
playwright_path = '~/.cache/ms-playwright'
all_patterns = [
('chrome', '/usr/bin/google-chrome-stable'),
('chrome', '/usr/bin/google-chrome'),
('chrome', '/usr/local/bin/google-chrome'),
('chromium', f'{playwright_path}/chromium-*/chrome-linux*/chrome'),
('chromium', '/usr/bin/chromium'),
('chromium', '/usr/bin/chromium-browser'),
('chromium', '/usr/local/bin/chromium'),
('chromium', '/snap/bin/chromium'),
('chrome-beta', '/usr/bin/google-chrome-beta'),
('chrome-dev', '/usr/bin/google-chrome-dev'),
('brave', '/usr/bin/brave-browser'),
('msedge', '/usr/bin/microsoft-edge-stable'),
('msedge', '/usr/bin/microsoft-edge'),
('chromium', f'{playwright_path}/chromium_headless_shell-*/chrome-linux*/chrome'),
]
elif system == 'Windows':
if not playwright_path:
playwright_path = r'%LOCALAPPDATA%\ms-playwright'
all_patterns = [
('chrome', r'C:\Program Files\Google\Chrome\Application\chrome.exe'),
('chrome', r'C:\Program Files (x86)\Google\Chrome\Application\chrome.exe'),
('chrome', r'%LOCALAPPDATA%\Google\Chrome\Application\chrome.exe'),
('chrome', r'%PROGRAMFILES%\Google\Chrome\Application\chrome.exe'),
('chrome', r'%PROGRAMFILES(X86)%\Google\Chrome\Application\chrome.exe'),
('chromium', f'{playwright_path}\\chromium-*\\chrome-win\\chrome.exe'),
('chromium', r'C:\Program Files\Chromium\Application\chrome.exe'),
('chromium', r'C:\Program Files (x86)\Chromium\Application\chrome.exe'),
('chromium', r'%LOCALAPPDATA%\Chromium\Application\chrome.exe'),
('brave', r'C:\Program Files\BraveSoftware\Brave-Browser\Application\brave.exe'),
('brave', r'C:\Program Files (x86)\BraveSoftware\Brave-Browser\Application\brave.exe'),
('msedge', r'C:\Program Files (x86)\Microsoft\Edge\Application\msedge.exe'),
('msedge', r'C:\Program Files\Microsoft\Edge\Application\msedge.exe'),
('msedge', r'%LOCALAPPDATA%\Microsoft\Edge\Application\msedge.exe'),
('chromium', f'{playwright_path}\\chromium_headless_shell-*\\chrome-win\\chrome.exe'),
]
else:
all_patterns = []
# Map channel enum values to browser group tags
_channel_to_group: dict[BrowserChannel, str] = {
BrowserChannel.CHROME: 'chrome',
BrowserChannel.CHROME_BETA: 'chrome-beta',
BrowserChannel.CHROME_DEV: 'chrome-dev',
BrowserChannel.CHROME_CANARY: 'chrome-canary',
BrowserChannel.CHROMIUM: 'chromium',
BrowserChannel.MSEDGE: 'msedge',
BrowserChannel.MSEDGE_BETA: 'msedge',
BrowserChannel.MSEDGE_DEV: 'msedge',
BrowserChannel.MSEDGE_CANARY: 'msedge',
}
# If a non-default channel is specified, put matching patterns first, then the rest as fallback
if channel and channel != BROWSERUSE_DEFAULT_CHANNEL and channel in _channel_to_group:
target_group = _channel_to_group[channel]
prioritized = [p for g, p in all_patterns if g == target_group]
rest = [p for g, p in all_patterns if g != target_group]
patterns = prioritized + rest
else:
patterns = [p for _, p in all_patterns]
for pattern in patterns:
# Expand user home directory
expanded_pattern = Path(pattern).expanduser()
# Handle Windows environment variables
if system == 'Windows':
pattern_str = str(expanded_pattern)
for env_var in ['%LOCALAPPDATA%', '%PROGRAMFILES%', '%PROGRAMFILES(X86)%']:
if env_var in pattern_str:
env_key = env_var.strip('%').replace('(X86)', ' (x86)')
env_value = os.environ.get(env_key, '')
if env_value:
pattern_str = pattern_str.replace(env_var, env_value)
expanded_pattern = Path(pattern_str)
# Convert to string for glob
pattern_str = str(expanded_pattern)
# Check if pattern contains wildcards
if '*' in pattern_str:
# Use glob to expand the pattern
matches = glob.glob(pattern_str)
if matches:
# Sort matches and take the last one (alphanumerically highest version)
matches.sort()
browser_path = matches[-1]
if Path(browser_path).exists() and Path(browser_path).is_file():
return browser_path
else:
# Direct path check
if expanded_pattern.exists() and expanded_pattern.is_file():
return str(expanded_pattern)
return None
async def _install_browser_with_playwright(self) -> str:
"""Get browser executable path from playwright in a subprocess to avoid thread issues."""
import platform
# Build command - only use --with-deps on Linux (it fails on Windows/macOS)
cmd = ['uvx', 'playwright', 'install', 'chrome']
if platform.system() == 'Linux':
cmd.append('--with-deps')
# Run in subprocess with timeout
process = await asyncio.create_subprocess_exec(
*cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
try:
stdout, stderr = await asyncio.wait_for(process.communicate(), timeout=60.0)
self.logger.debug(f'[LocalBrowserWatchdog] 📦 Playwright install output: {stdout}')
browser_path = self._find_installed_browser_path()
if browser_path:
return browser_path
self.logger.error(f'[LocalBrowserWatchdog] ❌ Playwright local browser installation error: \n{stdout}\n{stderr}')
raise RuntimeError('No local browser path found after: uvx playwright install chrome')
except TimeoutError:
# Kill the subprocess if it times out
process.kill()
await process.wait()
raise RuntimeError('Timeout getting browser path from playwright')
except Exception as e:
# Make sure subprocess is terminated
if process.returncode is None:
process.kill()
await process.wait()
raise RuntimeError(f'Error getting browser path: {e}')
@staticmethod
def _find_free_port() -> int:
"""Find a free port for the debugging interface."""
import socket
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(('127.0.0.1', 0))
s.listen(1)
port = s.getsockname()[1]
return port
@staticmethod
async def _wait_for_cdp_url(port: int, timeout: float = 30) -> str:
"""Wait for the browser to start and return the CDP URL."""
import aiohttp
start_time = asyncio.get_event_loop().time()
while asyncio.get_event_loop().time() - start_time < timeout:
try:
async with aiohttp.ClientSession() as session:
async with session.get(f'http://127.0.0.1:{port}/json/version') as resp:
if resp.status == 200:
# Chrome is ready
return f'http://127.0.0.1:{port}/'
else:
# Chrome is starting up and returning 502/500 errors
await asyncio.sleep(0.1)
except Exception:
# Connection error - Chrome might not be ready yet
await asyncio.sleep(0.1)
raise TimeoutError(f'Browser did not start within {timeout} seconds')
@staticmethod
async def _cleanup_process(process: psutil.Process) -> None:
"""Clean up browser process.
Args:
process: psutil.Process to terminate
"""
if not process:
return
try:
# Try graceful shutdown first
process.terminate()
# Use async wait instead of blocking wait
for _ in range(50): # Wait up to 5 seconds (50 * 0.1)
if not process.is_running():
return
await asyncio.sleep(0.1)
# If still running after 5 seconds, force kill
if process.is_running():
process.kill()
# Give it a moment to die
await asyncio.sleep(0.1)
except psutil.NoSuchProcess:
# Process already gone
pass
except Exception:
# Ignore any other errors during cleanup
pass
def _cleanup_temp_dir(self, temp_dir: Path | str) -> None:
"""Clean up temporary directory.
Args:
temp_dir: Path to temporary directory to remove
"""
if not temp_dir:
return
try:
temp_path = Path(temp_dir)
# Only remove if it's actually a temp directory we created
if 'browseruse-tmp-' in str(temp_path):
shutil.rmtree(temp_path, ignore_errors=True)
except Exception as e:
self.logger.debug(f'Failed to cleanup temp dir {temp_dir}: {e}')
@property
def browser_pid(self) -> int | None:
"""Get the browser process ID."""
if self._subprocess:
return self._subprocess.pid
return None
@staticmethod
async def get_browser_pid_via_cdp(browser) -> int | None:
"""Get the browser process ID via CDP SystemInfo.getProcessInfo.
Args:
browser: Playwright Browser instance
Returns:
Process ID or None if failed
"""
try:
cdp_session = await browser.new_browser_cdp_session()
result = await cdp_session.send('SystemInfo.getProcessInfo')
process_info = result.get('processInfo', {})
pid = process_info.get('id')
await cdp_session.detach()
return pid
except Exception:
# If we can't get PID via CDP, it's not critical
return None
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/browser/watchdogs/local_browser_watchdog.py",
"license": "MIT License",
"lines": 425,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/browser/watchdogs/permissions_watchdog.py | """Permissions watchdog for granting browser permissions on connection."""
from typing import TYPE_CHECKING, ClassVar
from bubus import BaseEvent
from browser_use.browser.events import BrowserConnectedEvent
from browser_use.browser.watchdog_base import BaseWatchdog
if TYPE_CHECKING:
pass
class PermissionsWatchdog(BaseWatchdog):
"""Grants browser permissions when browser connects."""
# Event contracts
LISTENS_TO: ClassVar[list[type[BaseEvent]]] = [
BrowserConnectedEvent,
]
EMITS: ClassVar[list[type[BaseEvent]]] = []
async def on_BrowserConnectedEvent(self, event: BrowserConnectedEvent) -> None:
"""Grant permissions when browser connects."""
permissions = self.browser_session.browser_profile.permissions
if not permissions:
self.logger.debug('No permissions to grant')
return
self.logger.debug(f'🔓 Granting browser permissions: {permissions}')
try:
# Grant permissions using CDP Browser.grantPermissions
# origin=None means grant to all origins
# Browser domain commands don't use session_id
await self.browser_session.cdp_client.send.Browser.grantPermissions(
params={'permissions': permissions} # type: ignore
)
self.logger.debug(f'✅ Successfully granted permissions: {permissions}')
except Exception as e:
self.logger.error(f'❌ Failed to grant permissions: {str(e)}')
# Don't raise - permissions are not critical to browser operation
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/browser/watchdogs/permissions_watchdog.py",
"license": "MIT License",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:browser_use/browser/watchdogs/popups_watchdog.py | """Watchdog for handling JavaScript dialogs (alert, confirm, prompt) automatically."""
import asyncio
from typing import ClassVar
from bubus import BaseEvent
from pydantic import PrivateAttr
from browser_use.browser.events import TabCreatedEvent
from browser_use.browser.watchdog_base import BaseWatchdog
class PopupsWatchdog(BaseWatchdog):
"""Handles JavaScript dialogs (alert, confirm, prompt) by automatically accepting them immediately."""
# Events this watchdog listens to and emits
LISTENS_TO: ClassVar[list[type[BaseEvent]]] = [TabCreatedEvent]
EMITS: ClassVar[list[type[BaseEvent]]] = []
# Track which targets have dialog handlers registered
_dialog_listeners_registered: set[str] = PrivateAttr(default_factory=set)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.logger.debug(f'🚀 PopupsWatchdog initialized with browser_session={self.browser_session}, ID={id(self)}')
async def on_TabCreatedEvent(self, event: TabCreatedEvent) -> None:
"""Set up JavaScript dialog handling when a new tab is created."""
target_id = event.target_id
self.logger.debug(f'🎯 PopupsWatchdog received TabCreatedEvent for target {target_id}')
# Skip if we've already registered for this target
if target_id in self._dialog_listeners_registered:
self.logger.debug(f'Already registered dialog handlers for target {target_id}')
return
self.logger.debug(f'📌 Starting dialog handler setup for target {target_id}')
try:
# Get all CDP sessions for this target and any child frames
cdp_session = await self.browser_session.get_or_create_cdp_session(
target_id, focus=False
) # don't auto-focus new tabs! sometimes we need to open tabs in background
# CRITICAL: Enable Page domain to receive dialog events
try:
await cdp_session.cdp_client.send.Page.enable(session_id=cdp_session.session_id)
self.logger.debug(f'✅ Enabled Page domain for session {cdp_session.session_id[-8:]}')
except Exception as e:
self.logger.debug(f'Failed to enable Page domain: {e}')
# Also register for the root CDP client to catch dialogs from any frame
if self.browser_session._cdp_client_root:
self.logger.debug('📌 Also registering handler on root CDP client')
try:
# Enable Page domain on root client too
await self.browser_session._cdp_client_root.send.Page.enable()
self.logger.debug('✅ Enabled Page domain on root CDP client')
except Exception as e:
self.logger.debug(f'Failed to enable Page domain on root: {e}')
# Set up async handler for JavaScript dialogs - accept immediately without event dispatch
async def handle_dialog(event_data, session_id: str | None = None):
"""Handle JavaScript dialog events - accept immediately."""
try:
dialog_type = event_data.get('type', 'alert')
message = event_data.get('message', '')
# Store the popup message in browser session for inclusion in browser state
if message:
formatted_message = f'[{dialog_type}] {message}'
self.browser_session._closed_popup_messages.append(formatted_message)
self.logger.debug(f'📝 Stored popup message: {formatted_message[:100]}')
# Choose action based on dialog type:
# - alert: accept=true (click OK to dismiss)
# - confirm: accept=true (click OK to proceed - safer for automation)
# - prompt: accept=false (click Cancel since we can't provide input)
# - beforeunload: accept=true (allow navigation)
should_accept = dialog_type in ('alert', 'confirm', 'beforeunload')
action_str = 'accepting (OK)' if should_accept else 'dismissing (Cancel)'
self.logger.info(f"🔔 JavaScript {dialog_type} dialog: '{message[:100]}' - {action_str}...")
dismissed = False
# Approach 1: Use the session that detected the dialog (most reliable)
if self.browser_session._cdp_client_root and session_id:
try:
self.logger.debug(f'🔄 Approach 1: Using detecting session {session_id[-8:]}')
await asyncio.wait_for(
self.browser_session._cdp_client_root.send.Page.handleJavaScriptDialog(
params={'accept': should_accept},
session_id=session_id,
),
timeout=0.5,
)
dismissed = True
self.logger.info('✅ Dialog handled successfully via detecting session')
except (TimeoutError, Exception) as e:
self.logger.debug(f'Approach 1 failed: {type(e).__name__}')
# Approach 2: Try with current agent focus session
if not dismissed and self.browser_session._cdp_client_root and self.browser_session.agent_focus_target_id:
try:
# Use public API with focus=False to avoid changing focus during popup dismissal
cdp_session = await self.browser_session.get_or_create_cdp_session(
self.browser_session.agent_focus_target_id, focus=False
)
self.logger.debug(f'🔄 Approach 2: Using agent focus session {cdp_session.session_id[-8:]}')
await asyncio.wait_for(
self.browser_session._cdp_client_root.send.Page.handleJavaScriptDialog(
params={'accept': should_accept},
session_id=cdp_session.session_id,
),
timeout=0.5,
)
dismissed = True
self.logger.info('✅ Dialog handled successfully via agent focus session')
except (TimeoutError, Exception) as e:
self.logger.debug(f'Approach 2 failed: {type(e).__name__}')
except Exception as e:
self.logger.error(f'❌ Critical error in dialog handler: {type(e).__name__}: {e}')
# Register handler on the specific session
cdp_session.cdp_client.register.Page.javascriptDialogOpening(handle_dialog) # type: ignore[arg-type]
self.logger.debug(
f'Successfully registered Page.javascriptDialogOpening handler for session {cdp_session.session_id}'
)
# Also register on root CDP client to catch dialogs from any frame
if hasattr(self.browser_session._cdp_client_root, 'register'):
try:
self.browser_session._cdp_client_root.register.Page.javascriptDialogOpening(handle_dialog) # type: ignore[arg-type]
self.logger.debug('Successfully registered dialog handler on root CDP client for all frames')
except Exception as root_error:
self.logger.warning(f'Failed to register on root CDP client: {root_error}')
# Mark this target as having dialog handling set up
self._dialog_listeners_registered.add(target_id)
self.logger.debug(f'Set up JavaScript dialog handling for tab {target_id}')
except Exception as e:
self.logger.warning(f'Failed to set up popup handling for tab {target_id}: {e}')
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/browser/watchdogs/popups_watchdog.py",
"license": "MIT License",
"lines": 119,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/browser/watchdogs/recording_watchdog.py | """Recording Watchdog for Browser Use Sessions."""
import asyncio
from pathlib import Path
from typing import Any, ClassVar
from bubus import BaseEvent
from cdp_use.cdp.page.events import ScreencastFrameEvent
from pydantic import PrivateAttr
from uuid_extensions import uuid7str
from browser_use.browser.events import AgentFocusChangedEvent, BrowserConnectedEvent, BrowserStopEvent
from browser_use.browser.profile import ViewportSize
from browser_use.browser.video_recorder import VideoRecorderService
from browser_use.browser.watchdog_base import BaseWatchdog
from browser_use.utils import create_task_with_error_handling
class RecordingWatchdog(BaseWatchdog):
"""
Manages video recording of a browser session using CDP screencasting.
"""
LISTENS_TO: ClassVar[list[type[BaseEvent]]] = [BrowserConnectedEvent, BrowserStopEvent, AgentFocusChangedEvent]
EMITS: ClassVar[list[type[BaseEvent]]] = []
_recorder: VideoRecorderService | None = PrivateAttr(default=None)
_current_session_id: str | None = PrivateAttr(default=None)
_screencast_params: dict[str, Any] | None = PrivateAttr(default=None)
async def on_BrowserConnectedEvent(self, event: BrowserConnectedEvent) -> None:
"""
Starts video recording if it is configured in the browser profile.
"""
profile = self.browser_session.browser_profile
if not profile.record_video_dir:
return
# Dynamically determine video size
size = profile.record_video_size
if not size:
self.logger.debug('record_video_size not specified, detecting viewport size...')
size = await self._get_current_viewport_size()
if not size:
self.logger.warning('Cannot start video recording: viewport size could not be determined.')
return
video_format = getattr(profile, 'record_video_format', 'mp4').strip('.')
output_path = Path(profile.record_video_dir) / f'{uuid7str()}.{video_format}'
self.logger.debug(f'Initializing video recorder for format: {video_format}')
self._recorder = VideoRecorderService(output_path=output_path, size=size, framerate=profile.record_video_framerate)
self._recorder.start()
if not self._recorder._is_active:
self._recorder = None
return
self.browser_session.cdp_client.register.Page.screencastFrame(self.on_screencastFrame)
self._screencast_params = {
'format': 'png',
'quality': 90,
'maxWidth': size['width'],
'maxHeight': size['height'],
'everyNthFrame': 1,
}
await self._start_screencast()
async def on_AgentFocusChangedEvent(self, event: AgentFocusChangedEvent) -> None:
"""
Switches video recording to the new tab.
"""
if self._recorder:
self.logger.debug(f'Agent focus changed to {event.target_id}, switching screencast...')
await self._start_screencast()
async def _start_screencast(self) -> None:
"""Starts screencast on the currently focused tab."""
if not self._recorder or not self._screencast_params:
return
try:
# Get the current session (for the focused target)
cdp_session = await self.browser_session.get_or_create_cdp_session()
# If we are already recording this session, do nothing
if self._current_session_id == cdp_session.session_id:
return
# Stop recording on the previous session
if self._current_session_id:
try:
# Use the root client to stop screencast on the specific session
await self.browser_session.cdp_client.send.Page.stopScreencast(session_id=self._current_session_id)
except Exception as e:
# It's possible the session is already closed
self.logger.debug(f'Failed to stop screencast on old session {self._current_session_id}: {e}')
self._current_session_id = cdp_session.session_id
# Start recording on the new session
await cdp_session.cdp_client.send.Page.startScreencast(
params=self._screencast_params, # type: ignore
session_id=cdp_session.session_id,
)
self.logger.info(f'📹 Started/Switched video recording to target {cdp_session.target_id}')
except Exception as e:
self.logger.error(f'Failed to switch screencast via CDP: {e}')
# If we fail to start on the new tab, we reset current session id
self._current_session_id = None
async def _get_current_viewport_size(self) -> ViewportSize | None:
"""Gets the current viewport size directly from the browser via CDP."""
try:
cdp_session = await self.browser_session.get_or_create_cdp_session()
metrics = await cdp_session.cdp_client.send.Page.getLayoutMetrics(session_id=cdp_session.session_id)
# Use cssVisualViewport for the most accurate representation of the visible area
viewport = metrics.get('cssVisualViewport', {})
width = viewport.get('clientWidth')
height = viewport.get('clientHeight')
if width and height:
self.logger.debug(f'Detected viewport size: {width}x{height}')
return ViewportSize(width=int(width), height=int(height))
except Exception as e:
self.logger.warning(f'Failed to get viewport size from browser: {e}')
return None
def on_screencastFrame(self, event: ScreencastFrameEvent, session_id: str | None) -> None:
"""
Synchronous handler for incoming screencast frames.
"""
# Only process frames from the current session we intend to record
# This handles race conditions where old session might still send frames before stop completes
if self._current_session_id and session_id != self._current_session_id:
return
if not self._recorder:
return
self._recorder.add_frame(event['data'])
create_task_with_error_handling(
self._ack_screencast_frame(event, session_id),
name='ack_screencast_frame',
logger_instance=self.logger,
suppress_exceptions=True,
)
async def _ack_screencast_frame(self, event: ScreencastFrameEvent, session_id: str | None) -> None:
"""
Asynchronously acknowledges a screencast frame.
"""
try:
await self.browser_session.cdp_client.send.Page.screencastFrameAck(
params={'sessionId': event['sessionId']}, session_id=session_id
)
except Exception as e:
self.logger.debug(f'Failed to acknowledge screencast frame: {e}')
async def on_BrowserStopEvent(self, event: BrowserStopEvent) -> None:
"""
Stops the video recording and finalizes the video file.
"""
if self._recorder:
recorder = self._recorder
self._recorder = None
self._current_session_id = None
self._screencast_params = None
self.logger.debug('Stopping video recording and saving file...')
loop = asyncio.get_event_loop()
await loop.run_in_executor(None, recorder.stop_and_save)
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/browser/watchdogs/recording_watchdog.py",
"license": "MIT License",
"lines": 144,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/browser/watchdogs/screenshot_watchdog.py | """Screenshot watchdog for handling screenshot requests using CDP."""
from typing import TYPE_CHECKING, Any, ClassVar
from bubus import BaseEvent
from cdp_use.cdp.page import CaptureScreenshotParameters
from browser_use.browser.events import ScreenshotEvent
from browser_use.browser.views import BrowserError
from browser_use.browser.watchdog_base import BaseWatchdog
from browser_use.observability import observe_debug
if TYPE_CHECKING:
pass
class ScreenshotWatchdog(BaseWatchdog):
"""Handles screenshot requests using CDP."""
# Events this watchdog listens to
LISTENS_TO: ClassVar[list[type[BaseEvent[Any]]]] = [ScreenshotEvent]
# Events this watchdog emits
EMITS: ClassVar[list[type[BaseEvent[Any]]]] = []
@observe_debug(ignore_input=True, ignore_output=True, name='screenshot_event_handler')
async def on_ScreenshotEvent(self, event: ScreenshotEvent) -> str:
"""Handle screenshot request using CDP.
Args:
event: ScreenshotEvent with optional full_page and clip parameters
Returns:
Dict with 'screenshot' key containing base64-encoded screenshot or None
"""
self.logger.debug('[ScreenshotWatchdog] Handler START - on_ScreenshotEvent called')
try:
# Validate focused target is a top-level page (not iframe/worker)
# CDP Page.captureScreenshot only works on page/tab targets
focused_target = self.browser_session.get_focused_target()
if focused_target and focused_target.target_type in ('page', 'tab'):
target_id = focused_target.target_id
else:
# Focused target is iframe/worker/missing - fall back to any page target
target_type_str = focused_target.target_type if focused_target else 'None'
self.logger.warning(f'[ScreenshotWatchdog] Focused target is {target_type_str}, falling back to page target')
page_targets = self.browser_session.get_page_targets()
if not page_targets:
raise BrowserError('[ScreenshotWatchdog] No page targets available for screenshot')
target_id = page_targets[-1].target_id
cdp_session = await self.browser_session.get_or_create_cdp_session(target_id, focus=True)
# Prepare screenshot parameters
params = CaptureScreenshotParameters(format='png', captureBeyondViewport=False)
# Take screenshot using CDP
self.logger.debug(f'[ScreenshotWatchdog] Taking screenshot with params: {params}')
result = await cdp_session.cdp_client.send.Page.captureScreenshot(params=params, session_id=cdp_session.session_id)
# Return base64-encoded screenshot data
if result and 'data' in result:
self.logger.debug('[ScreenshotWatchdog] Screenshot captured successfully')
return result['data']
raise BrowserError('[ScreenshotWatchdog] Screenshot result missing data')
except Exception as e:
self.logger.error(f'[ScreenshotWatchdog] Screenshot failed: {e}')
raise
finally:
# Try to remove highlights even on failure
try:
await self.browser_session.remove_highlights()
except Exception:
pass
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/browser/watchdogs/screenshot_watchdog.py",
"license": "MIT License",
"lines": 59,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/browser/watchdogs/storage_state_watchdog.py | """Storage state watchdog for managing browser cookies and storage persistence."""
import asyncio
import json
import os
from pathlib import Path
from typing import Any, ClassVar
from bubus import BaseEvent
from cdp_use.cdp.network import Cookie
from pydantic import Field, PrivateAttr
from browser_use.browser.events import (
BrowserConnectedEvent,
BrowserStopEvent,
LoadStorageStateEvent,
SaveStorageStateEvent,
StorageStateLoadedEvent,
StorageStateSavedEvent,
)
from browser_use.browser.watchdog_base import BaseWatchdog
from browser_use.utils import create_task_with_error_handling
class StorageStateWatchdog(BaseWatchdog):
"""Monitors and persists browser storage state including cookies and localStorage."""
# Event contracts
LISTENS_TO: ClassVar[list[type[BaseEvent]]] = [
BrowserConnectedEvent,
BrowserStopEvent,
SaveStorageStateEvent,
LoadStorageStateEvent,
]
EMITS: ClassVar[list[type[BaseEvent]]] = [
StorageStateSavedEvent,
StorageStateLoadedEvent,
]
# Configuration
auto_save_interval: float = Field(default=30.0) # Auto-save every 30 seconds
save_on_change: bool = Field(default=True) # Save immediately when cookies change
# Private state
_monitoring_task: asyncio.Task | None = PrivateAttr(default=None)
_last_cookie_state: list[dict] = PrivateAttr(default_factory=list)
_save_lock: asyncio.Lock = PrivateAttr(default_factory=asyncio.Lock)
async def on_BrowserConnectedEvent(self, event: BrowserConnectedEvent) -> None:
"""Start monitoring when browser starts."""
self.logger.debug('[StorageStateWatchdog] 🍪 Initializing auth/cookies sync <-> with storage_state.json file')
# Start monitoring
await self._start_monitoring()
# Automatically load storage state after browser start
await self.event_bus.dispatch(LoadStorageStateEvent())
async def on_BrowserStopEvent(self, event: BrowserStopEvent) -> None:
"""Stop monitoring when browser stops."""
self.logger.debug('[StorageStateWatchdog] Stopping storage_state monitoring')
await self._stop_monitoring()
async def on_SaveStorageStateEvent(self, event: SaveStorageStateEvent) -> None:
"""Handle storage state save request."""
# Use provided path or fall back to profile default
path = event.path
if path is None:
# Use profile default path if available
if self.browser_session.browser_profile.storage_state:
path = str(self.browser_session.browser_profile.storage_state)
else:
path = None # Skip saving if no path available
await self._save_storage_state(path)
async def on_LoadStorageStateEvent(self, event: LoadStorageStateEvent) -> None:
"""Handle storage state load request."""
# Use provided path or fall back to profile default
path = event.path
if path is None:
# Use profile default path if available
if self.browser_session.browser_profile.storage_state:
path = str(self.browser_session.browser_profile.storage_state)
else:
path = None # Skip loading if no path available
await self._load_storage_state(path)
async def _start_monitoring(self) -> None:
"""Start the monitoring task."""
if self._monitoring_task and not self._monitoring_task.done():
return
assert self.browser_session.cdp_client is not None
self._monitoring_task = create_task_with_error_handling(
self._monitor_storage_changes(), name='monitor_storage_changes', logger_instance=self.logger, suppress_exceptions=True
)
# self.logger'[StorageStateWatchdog] Started storage monitoring task')
async def _stop_monitoring(self) -> None:
"""Stop the monitoring task."""
if self._monitoring_task and not self._monitoring_task.done():
self._monitoring_task.cancel()
try:
await self._monitoring_task
except asyncio.CancelledError:
pass
# self.logger.debug('[StorageStateWatchdog] Stopped storage monitoring task')
async def _check_for_cookie_changes_cdp(self, event: dict) -> None:
"""Check if a CDP network event indicates cookie changes.
This would be called by Network.responseReceivedExtraInfo events
if we set up CDP event listeners.
"""
try:
# Check for Set-Cookie headers in the response
headers = event.get('headers', {})
if 'set-cookie' in headers or 'Set-Cookie' in headers:
self.logger.debug('[StorageStateWatchdog] Cookie change detected via CDP')
# If save on change is enabled, trigger save immediately
if self.save_on_change:
await self._save_storage_state()
except Exception as e:
self.logger.warning(f'[StorageStateWatchdog] Error checking for cookie changes: {e}')
async def _monitor_storage_changes(self) -> None:
"""Periodically check for storage changes and auto-save."""
while True:
try:
await asyncio.sleep(self.auto_save_interval)
# Check if cookies have changed
if await self._have_cookies_changed():
self.logger.debug('[StorageStateWatchdog] Detected changes to sync with storage_state.json')
await self._save_storage_state()
except asyncio.CancelledError:
break
except Exception as e:
self.logger.error(f'[StorageStateWatchdog] Error in monitoring loop: {e}')
async def _have_cookies_changed(self) -> bool:
"""Check if cookies have changed since last save."""
if not self.browser_session.cdp_client:
return False
try:
# Get current cookies using CDP
current_cookies = await self.browser_session._cdp_get_cookies()
# Convert to comparable format, using .get() for optional fields
current_cookie_set = {
(c.get('name', ''), c.get('domain', ''), c.get('path', '')): c.get('value', '') for c in current_cookies
}
last_cookie_set = {
(c.get('name', ''), c.get('domain', ''), c.get('path', '')): c.get('value', '') for c in self._last_cookie_state
}
return current_cookie_set != last_cookie_set
except Exception as e:
self.logger.debug(f'[StorageStateWatchdog] Error comparing cookies: {e}')
return False
async def _save_storage_state(self, path: str | None = None) -> None:
"""Save browser storage state to file."""
async with self._save_lock:
# Check if CDP client is available
assert await self.browser_session.get_or_create_cdp_session(target_id=None)
save_path = path or self.browser_session.browser_profile.storage_state
if not save_path:
return
# Skip saving if the storage state is already a dict (indicates it was loaded from memory)
# We only save to file if it started as a file path
if isinstance(save_path, dict):
self.logger.debug('[StorageStateWatchdog] Storage state is already a dict, skipping file save')
return
try:
# Get current storage state using CDP
storage_state = await self.browser_session._cdp_get_storage_state()
# Update our last known state
self._last_cookie_state = storage_state.get('cookies', []).copy()
# Convert path to Path object
json_path = Path(save_path).expanduser().resolve()
json_path.parent.mkdir(parents=True, exist_ok=True)
# Merge with existing state if file exists
merged_state = storage_state
if json_path.exists():
try:
existing_state = json.loads(json_path.read_text())
merged_state = self._merge_storage_states(existing_state, dict(storage_state))
except Exception as e:
self.logger.error(f'[StorageStateWatchdog] Failed to merge with existing state: {e}')
# Write atomically
temp_path = json_path.with_suffix('.json.tmp')
temp_path.write_text(json.dumps(merged_state, indent=4, ensure_ascii=False), encoding='utf-8')
# Backup existing file
if json_path.exists():
backup_path = json_path.with_suffix('.json.bak')
json_path.replace(backup_path)
# Move temp to final
temp_path.replace(json_path)
# Emit success event
self.event_bus.dispatch(
StorageStateSavedEvent(
path=str(json_path),
cookies_count=len(merged_state.get('cookies', [])),
origins_count=len(merged_state.get('origins', [])),
)
)
self.logger.debug(
f'[StorageStateWatchdog] Saved storage state to {json_path} '
f'({len(merged_state.get("cookies", []))} cookies, '
f'{len(merged_state.get("origins", []))} origins)'
)
except Exception as e:
self.logger.error(f'[StorageStateWatchdog] Failed to save storage state: {e}')
async def _load_storage_state(self, path: str | None = None) -> None:
"""Load browser storage state from file."""
if not self.browser_session.cdp_client:
self.logger.warning('[StorageStateWatchdog] No CDP client available for loading')
return
load_path = path or self.browser_session.browser_profile.storage_state
if not load_path or not os.path.exists(str(load_path)):
return
try:
# Read the storage state file asynchronously
import anyio
content = await anyio.Path(str(load_path)).read_text()
storage = json.loads(content)
# Apply cookies if present
if 'cookies' in storage and storage['cookies']:
# Playwright exports session cookies with expires=0/-1. CDP treats expires=0 as expired.
# Normalize session cookies by omitting expires
normalized_cookies: list[Cookie] = []
for cookie in storage['cookies']:
if not isinstance(cookie, dict):
normalized_cookies.append(cookie) # type: ignore[arg-type]
continue
c = dict(cookie)
expires = c.get('expires')
if expires in (0, 0.0, -1, -1.0):
c.pop('expires', None)
normalized_cookies.append(Cookie(**c))
await self.browser_session._cdp_set_cookies(normalized_cookies)
self._last_cookie_state = storage['cookies'].copy()
self.logger.debug(f'[StorageStateWatchdog] Added {len(storage["cookies"])} cookies from storage state')
# Apply origins (localStorage/sessionStorage) if present
if 'origins' in storage and storage['origins']:
for origin in storage['origins']:
origin_value = origin.get('origin')
if not origin_value:
continue
# Scope storage restoration to its origin to avoid cross-site pollution.
if origin.get('localStorage'):
lines = []
for item in origin['localStorage']:
lines.append(f'window.localStorage.setItem({json.dumps(item["name"])}, {json.dumps(item["value"])});')
script = (
'(function(){\n'
f' if (window.location && window.location.origin !== {json.dumps(origin_value)}) return;\n'
' try {\n'
f' {" ".join(lines)}\n'
' } catch (e) {}\n'
'})();'
)
await self.browser_session._cdp_add_init_script(script)
if origin.get('sessionStorage'):
lines = []
for item in origin['sessionStorage']:
lines.append(
f'window.sessionStorage.setItem({json.dumps(item["name"])}, {json.dumps(item["value"])});'
)
script = (
'(function(){\n'
f' if (window.location && window.location.origin !== {json.dumps(origin_value)}) return;\n'
' try {\n'
f' {" ".join(lines)}\n'
' } catch (e) {}\n'
'})();'
)
await self.browser_session._cdp_add_init_script(script)
self.logger.debug(
f'[StorageStateWatchdog] Applied localStorage/sessionStorage from {len(storage["origins"])} origins'
)
self.event_bus.dispatch(
StorageStateLoadedEvent(
path=str(load_path),
cookies_count=len(storage.get('cookies', [])),
origins_count=len(storage.get('origins', [])),
)
)
self.logger.debug(f'[StorageStateWatchdog] Loaded storage state from: {load_path}')
except Exception as e:
self.logger.error(f'[StorageStateWatchdog] Failed to load storage state: {e}')
@staticmethod
def _merge_storage_states(existing: dict[str, Any], new: dict[str, Any]) -> dict[str, Any]:
"""Merge two storage states, with new values taking precedence."""
merged = existing.copy()
# Merge cookies
existing_cookies = {(c['name'], c['domain'], c['path']): c for c in existing.get('cookies', [])}
for cookie in new.get('cookies', []):
key = (cookie['name'], cookie['domain'], cookie['path'])
existing_cookies[key] = cookie
merged['cookies'] = list(existing_cookies.values())
# Merge origins
existing_origins = {origin['origin']: origin for origin in existing.get('origins', [])}
for origin in new.get('origins', []):
existing_origins[origin['origin']] = origin
merged['origins'] = list(existing_origins.values())
return merged
async def get_current_cookies(self) -> list[dict[str, Any]]:
"""Get current cookies using CDP."""
if not self.browser_session.cdp_client:
return []
try:
cookies = await self.browser_session._cdp_get_cookies()
# Cookie is a TypedDict, cast to dict for compatibility
return [dict(cookie) for cookie in cookies]
except Exception as e:
self.logger.error(f'[StorageStateWatchdog] Failed to get cookies: {e}')
return []
async def add_cookies(self, cookies: list[dict[str, Any]]) -> None:
"""Add cookies using CDP."""
if not self.browser_session.cdp_client:
self.logger.warning('[StorageStateWatchdog] No CDP client available for adding cookies')
return
try:
# Convert dicts to Cookie objects
cookie_objects = [Cookie(**cookie_dict) if isinstance(cookie_dict, dict) else cookie_dict for cookie_dict in cookies]
# Set cookies using CDP
await self.browser_session._cdp_set_cookies(cookie_objects)
self.logger.debug(f'[StorageStateWatchdog] Added {len(cookies)} cookies')
except Exception as e:
self.logger.error(f'[StorageStateWatchdog] Failed to add cookies: {e}')
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/browser/watchdogs/storage_state_watchdog.py",
"license": "MIT License",
"lines": 307,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/config.py | """Configuration system for browser-use with automatic migration support."""
import json
import logging
import os
from datetime import datetime
from functools import cache
from pathlib import Path
from typing import Any
from uuid import uuid4
import psutil
from pydantic import BaseModel, ConfigDict, Field
from pydantic_settings import BaseSettings, SettingsConfigDict
logger = logging.getLogger(__name__)
@cache
def is_running_in_docker() -> bool:
"""Detect if we are running in a docker container, for the purpose of optimizing chrome launch flags (dev shm usage, gpu settings, etc.)"""
try:
if Path('/.dockerenv').exists() or 'docker' in Path('/proc/1/cgroup').read_text().lower():
return True
except Exception:
pass
try:
# if init proc (PID 1) looks like uvicorn/python/uv/etc. then we're in Docker
# if init proc (PID 1) looks like bash/systemd/init/etc. then we're probably NOT in Docker
init_cmd = ' '.join(psutil.Process(1).cmdline())
if ('py' in init_cmd) or ('uv' in init_cmd) or ('app' in init_cmd):
return True
except Exception:
pass
try:
# if less than 10 total running procs, then we're almost certainly in a container
if len(psutil.pids()) < 10:
return True
except Exception:
pass
return False
class OldConfig:
"""Original lazy-loading configuration class for environment variables."""
# Cache for directory creation tracking
_dirs_created = False
@property
def BROWSER_USE_LOGGING_LEVEL(self) -> str:
return os.getenv('BROWSER_USE_LOGGING_LEVEL', 'info').lower()
@property
def ANONYMIZED_TELEMETRY(self) -> bool:
return os.getenv('ANONYMIZED_TELEMETRY', 'true').lower()[:1] in 'ty1'
@property
def BROWSER_USE_CLOUD_SYNC(self) -> bool:
return os.getenv('BROWSER_USE_CLOUD_SYNC', str(self.ANONYMIZED_TELEMETRY)).lower()[:1] in 'ty1'
@property
def BROWSER_USE_CLOUD_API_URL(self) -> str:
url = os.getenv('BROWSER_USE_CLOUD_API_URL', 'https://api.browser-use.com')
assert '://' in url, 'BROWSER_USE_CLOUD_API_URL must be a valid URL'
return url
@property
def BROWSER_USE_CLOUD_UI_URL(self) -> str:
url = os.getenv('BROWSER_USE_CLOUD_UI_URL', '')
# Allow empty string as default, only validate if set
if url and '://' not in url:
raise AssertionError('BROWSER_USE_CLOUD_UI_URL must be a valid URL if set')
return url
# Path configuration
@property
def XDG_CACHE_HOME(self) -> Path:
return Path(os.getenv('XDG_CACHE_HOME', '~/.cache')).expanduser().resolve()
@property
def XDG_CONFIG_HOME(self) -> Path:
return Path(os.getenv('XDG_CONFIG_HOME', '~/.config')).expanduser().resolve()
@property
def BROWSER_USE_CONFIG_DIR(self) -> Path:
path = Path(os.getenv('BROWSER_USE_CONFIG_DIR', str(self.XDG_CONFIG_HOME / 'browseruse'))).expanduser().resolve()
self._ensure_dirs()
return path
@property
def BROWSER_USE_CONFIG_FILE(self) -> Path:
return self.BROWSER_USE_CONFIG_DIR / 'config.json'
@property
def BROWSER_USE_PROFILES_DIR(self) -> Path:
path = self.BROWSER_USE_CONFIG_DIR / 'profiles'
self._ensure_dirs()
return path
@property
def BROWSER_USE_DEFAULT_USER_DATA_DIR(self) -> Path:
return self.BROWSER_USE_PROFILES_DIR / 'default'
@property
def BROWSER_USE_EXTENSIONS_DIR(self) -> Path:
path = self.BROWSER_USE_CONFIG_DIR / 'extensions'
self._ensure_dirs()
return path
def _ensure_dirs(self) -> None:
"""Create directories if they don't exist (only once)"""
if not self._dirs_created:
config_dir = (
Path(os.getenv('BROWSER_USE_CONFIG_DIR', str(self.XDG_CONFIG_HOME / 'browseruse'))).expanduser().resolve()
)
config_dir.mkdir(parents=True, exist_ok=True)
(config_dir / 'profiles').mkdir(parents=True, exist_ok=True)
(config_dir / 'extensions').mkdir(parents=True, exist_ok=True)
self._dirs_created = True
# LLM API key configuration
@property
def OPENAI_API_KEY(self) -> str:
return os.getenv('OPENAI_API_KEY', '')
@property
def ANTHROPIC_API_KEY(self) -> str:
return os.getenv('ANTHROPIC_API_KEY', '')
@property
def GOOGLE_API_KEY(self) -> str:
return os.getenv('GOOGLE_API_KEY', '')
@property
def DEEPSEEK_API_KEY(self) -> str:
return os.getenv('DEEPSEEK_API_KEY', '')
@property
def GROK_API_KEY(self) -> str:
return os.getenv('GROK_API_KEY', '')
@property
def NOVITA_API_KEY(self) -> str:
return os.getenv('NOVITA_API_KEY', '')
@property
def AZURE_OPENAI_ENDPOINT(self) -> str:
return os.getenv('AZURE_OPENAI_ENDPOINT', '')
@property
def AZURE_OPENAI_KEY(self) -> str:
return os.getenv('AZURE_OPENAI_KEY', '')
@property
def SKIP_LLM_API_KEY_VERIFICATION(self) -> bool:
return os.getenv('SKIP_LLM_API_KEY_VERIFICATION', 'false').lower()[:1] in 'ty1'
@property
def DEFAULT_LLM(self) -> str:
return os.getenv('DEFAULT_LLM', '')
# Runtime hints
@property
def IN_DOCKER(self) -> bool:
return os.getenv('IN_DOCKER', 'false').lower()[:1] in 'ty1' or is_running_in_docker()
@property
def IS_IN_EVALS(self) -> bool:
return os.getenv('IS_IN_EVALS', 'false').lower()[:1] in 'ty1'
@property
def BROWSER_USE_VERSION_CHECK(self) -> bool:
return os.getenv('BROWSER_USE_VERSION_CHECK', 'true').lower()[:1] in 'ty1'
@property
def WIN_FONT_DIR(self) -> str:
return os.getenv('WIN_FONT_DIR', 'C:\\Windows\\Fonts')
class FlatEnvConfig(BaseSettings):
"""All environment variables in a flat namespace."""
model_config = SettingsConfigDict(env_file='.env', env_file_encoding='utf-8', case_sensitive=True, extra='allow')
# Logging and telemetry
BROWSER_USE_LOGGING_LEVEL: str = Field(default='info')
CDP_LOGGING_LEVEL: str = Field(default='WARNING')
BROWSER_USE_DEBUG_LOG_FILE: str | None = Field(default=None)
BROWSER_USE_INFO_LOG_FILE: str | None = Field(default=None)
ANONYMIZED_TELEMETRY: bool = Field(default=True)
BROWSER_USE_CLOUD_SYNC: bool | None = Field(default=None)
BROWSER_USE_CLOUD_API_URL: str = Field(default='https://api.browser-use.com')
BROWSER_USE_CLOUD_UI_URL: str = Field(default='')
# Path configuration
XDG_CACHE_HOME: str = Field(default='~/.cache')
XDG_CONFIG_HOME: str = Field(default='~/.config')
BROWSER_USE_CONFIG_DIR: str | None = Field(default=None)
# LLM API keys
OPENAI_API_KEY: str = Field(default='')
ANTHROPIC_API_KEY: str = Field(default='')
GOOGLE_API_KEY: str = Field(default='')
DEEPSEEK_API_KEY: str = Field(default='')
GROK_API_KEY: str = Field(default='')
NOVITA_API_KEY: str = Field(default='')
AZURE_OPENAI_ENDPOINT: str = Field(default='')
AZURE_OPENAI_KEY: str = Field(default='')
SKIP_LLM_API_KEY_VERIFICATION: bool = Field(default=False)
DEFAULT_LLM: str = Field(default='')
# Runtime hints
IN_DOCKER: bool | None = Field(default=None)
IS_IN_EVALS: bool = Field(default=False)
WIN_FONT_DIR: str = Field(default='C:\\Windows\\Fonts')
BROWSER_USE_VERSION_CHECK: bool = Field(default=True)
# MCP-specific env vars
BROWSER_USE_CONFIG_PATH: str | None = Field(default=None)
BROWSER_USE_HEADLESS: bool | None = Field(default=None)
BROWSER_USE_ALLOWED_DOMAINS: str | None = Field(default=None)
BROWSER_USE_LLM_MODEL: str | None = Field(default=None)
# Proxy env vars
BROWSER_USE_PROXY_URL: str | None = Field(default=None)
BROWSER_USE_NO_PROXY: str | None = Field(default=None)
BROWSER_USE_PROXY_USERNAME: str | None = Field(default=None)
BROWSER_USE_PROXY_PASSWORD: str | None = Field(default=None)
# Extension env vars
BROWSER_USE_DISABLE_EXTENSIONS: bool | None = Field(default=None)
class DBStyleEntry(BaseModel):
"""Database-style entry with UUID and metadata."""
id: str = Field(default_factory=lambda: str(uuid4()))
default: bool = Field(default=False)
created_at: str = Field(default_factory=lambda: datetime.utcnow().isoformat())
class BrowserProfileEntry(DBStyleEntry):
"""Browser profile configuration entry - accepts any BrowserProfile fields."""
model_config = ConfigDict(extra='allow')
# Common browser profile fields for reference
headless: bool | None = None
user_data_dir: str | None = None
allowed_domains: list[str] | None = None
downloads_path: str | None = None
class LLMEntry(DBStyleEntry):
"""LLM configuration entry."""
api_key: str | None = None
model: str | None = None
temperature: float | None = None
max_tokens: int | None = None
class AgentEntry(DBStyleEntry):
"""Agent configuration entry."""
max_steps: int | None = None
use_vision: bool | None = None
system_prompt: str | None = None
class DBStyleConfigJSON(BaseModel):
"""New database-style configuration format."""
browser_profile: dict[str, BrowserProfileEntry] = Field(default_factory=dict)
llm: dict[str, LLMEntry] = Field(default_factory=dict)
agent: dict[str, AgentEntry] = Field(default_factory=dict)
def create_default_config() -> DBStyleConfigJSON:
"""Create a fresh default configuration."""
logger.debug('Creating fresh default config.json')
new_config = DBStyleConfigJSON()
# Generate default IDs
profile_id = str(uuid4())
llm_id = str(uuid4())
agent_id = str(uuid4())
# Create default browser profile entry
new_config.browser_profile[profile_id] = BrowserProfileEntry(id=profile_id, default=True, headless=False, user_data_dir=None)
# Create default LLM entry
new_config.llm[llm_id] = LLMEntry(id=llm_id, default=True, model='gpt-4.1-mini', api_key='your-openai-api-key-here')
# Create default agent entry
new_config.agent[agent_id] = AgentEntry(id=agent_id, default=True)
return new_config
def load_and_migrate_config(config_path: Path) -> DBStyleConfigJSON:
"""Load config.json or create fresh one if old format detected."""
if not config_path.exists():
# Create fresh config with defaults
config_path.parent.mkdir(parents=True, exist_ok=True)
new_config = create_default_config()
with open(config_path, 'w') as f:
json.dump(new_config.model_dump(), f, indent=2)
return new_config
try:
with open(config_path) as f:
data = json.load(f)
# Check if it's already in DB-style format
if all(key in data for key in ['browser_profile', 'llm', 'agent']) and all(
isinstance(data.get(key, {}), dict) for key in ['browser_profile', 'llm', 'agent']
):
# Check if the values are DB-style entries (have UUIDs as keys)
if data.get('browser_profile') and all(isinstance(v, dict) and 'id' in v for v in data['browser_profile'].values()):
# Already in new format
return DBStyleConfigJSON(**data)
# Old format detected - delete it and create fresh config
logger.debug(f'Old config format detected at {config_path}, creating fresh config')
new_config = create_default_config()
# Overwrite with new config
with open(config_path, 'w') as f:
json.dump(new_config.model_dump(), f, indent=2)
logger.debug(f'Created fresh config.json at {config_path}')
return new_config
except Exception as e:
logger.error(f'Failed to load config from {config_path}: {e}, creating fresh config')
# On any error, create fresh config
new_config = create_default_config()
try:
with open(config_path, 'w') as f:
json.dump(new_config.model_dump(), f, indent=2)
except Exception as write_error:
logger.error(f'Failed to write fresh config: {write_error}')
return new_config
class Config:
"""Backward-compatible configuration class that merges all config sources.
Re-reads environment variables on every access to maintain compatibility.
"""
def __init__(self):
# Cache for directory creation tracking only
self._dirs_created = False
def __getattr__(self, name: str) -> Any:
"""Dynamically proxy all attributes to fresh instances.
This ensures env vars are re-read on every access.
"""
# Special handling for internal attributes
if name.startswith('_'):
raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'")
# Create fresh instances on every access
old_config = OldConfig()
# Always use old config for all attributes (it handles env vars with proper transformations)
if hasattr(old_config, name):
return getattr(old_config, name)
# For new MCP-specific attributes not in old config
env_config = FlatEnvConfig()
if hasattr(env_config, name):
return getattr(env_config, name)
# Handle special methods
if name == 'get_default_profile':
return lambda: self._get_default_profile()
elif name == 'get_default_llm':
return lambda: self._get_default_llm()
elif name == 'get_default_agent':
return lambda: self._get_default_agent()
elif name == 'load_config':
return lambda: self._load_config()
elif name == '_ensure_dirs':
return lambda: old_config._ensure_dirs()
raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'")
def _get_config_path(self) -> Path:
"""Get config path from fresh env config."""
env_config = FlatEnvConfig()
if env_config.BROWSER_USE_CONFIG_PATH:
return Path(env_config.BROWSER_USE_CONFIG_PATH).expanduser()
elif env_config.BROWSER_USE_CONFIG_DIR:
return Path(env_config.BROWSER_USE_CONFIG_DIR).expanduser() / 'config.json'
else:
xdg_config = Path(env_config.XDG_CONFIG_HOME).expanduser()
return xdg_config / 'browseruse' / 'config.json'
def _get_db_config(self) -> DBStyleConfigJSON:
"""Load and migrate config.json."""
config_path = self._get_config_path()
return load_and_migrate_config(config_path)
def _get_default_profile(self) -> dict[str, Any]:
"""Get the default browser profile configuration."""
db_config = self._get_db_config()
for profile in db_config.browser_profile.values():
if profile.default:
return profile.model_dump(exclude_none=True)
# Return first profile if no default
if db_config.browser_profile:
return next(iter(db_config.browser_profile.values())).model_dump(exclude_none=True)
return {}
def _get_default_llm(self) -> dict[str, Any]:
"""Get the default LLM configuration."""
db_config = self._get_db_config()
for llm in db_config.llm.values():
if llm.default:
return llm.model_dump(exclude_none=True)
# Return first LLM if no default
if db_config.llm:
return next(iter(db_config.llm.values())).model_dump(exclude_none=True)
return {}
def _get_default_agent(self) -> dict[str, Any]:
"""Get the default agent configuration."""
db_config = self._get_db_config()
for agent in db_config.agent.values():
if agent.default:
return agent.model_dump(exclude_none=True)
# Return first agent if no default
if db_config.agent:
return next(iter(db_config.agent.values())).model_dump(exclude_none=True)
return {}
def _load_config(self) -> dict[str, Any]:
"""Load configuration with env var overrides for MCP components."""
config = {
'browser_profile': self._get_default_profile(),
'llm': self._get_default_llm(),
'agent': self._get_default_agent(),
}
# Fresh env config for overrides
env_config = FlatEnvConfig()
# Apply MCP-specific env var overrides
if env_config.BROWSER_USE_HEADLESS is not None:
config['browser_profile']['headless'] = env_config.BROWSER_USE_HEADLESS
if env_config.BROWSER_USE_ALLOWED_DOMAINS:
domains = [d.strip() for d in env_config.BROWSER_USE_ALLOWED_DOMAINS.split(',') if d.strip()]
config['browser_profile']['allowed_domains'] = domains
# Proxy settings (Chromium) -> consolidated `proxy` dict
proxy_dict: dict[str, Any] = {}
if env_config.BROWSER_USE_PROXY_URL:
proxy_dict['server'] = env_config.BROWSER_USE_PROXY_URL
if env_config.BROWSER_USE_NO_PROXY:
# store bypass as comma-separated string to match Chrome flag
proxy_dict['bypass'] = ','.join([d.strip() for d in env_config.BROWSER_USE_NO_PROXY.split(',') if d.strip()])
if env_config.BROWSER_USE_PROXY_USERNAME:
proxy_dict['username'] = env_config.BROWSER_USE_PROXY_USERNAME
if env_config.BROWSER_USE_PROXY_PASSWORD:
proxy_dict['password'] = env_config.BROWSER_USE_PROXY_PASSWORD
if proxy_dict:
# ensure section exists
config.setdefault('browser_profile', {})
config['browser_profile']['proxy'] = proxy_dict
if env_config.OPENAI_API_KEY:
config['llm']['api_key'] = env_config.OPENAI_API_KEY
if env_config.BROWSER_USE_LLM_MODEL:
config['llm']['model'] = env_config.BROWSER_USE_LLM_MODEL
# Extension settings
if env_config.BROWSER_USE_DISABLE_EXTENSIONS is not None:
config['browser_profile']['enable_default_extensions'] = not env_config.BROWSER_USE_DISABLE_EXTENSIONS
return config
# Create singleton instance
CONFIG = Config()
# Helper functions for MCP components
def load_browser_use_config() -> dict[str, Any]:
"""Load browser-use configuration for MCP components."""
return CONFIG.load_config()
def get_default_profile(config: dict[str, Any]) -> dict[str, Any]:
"""Get default browser profile from config dict."""
return config.get('browser_profile', {})
def get_default_llm(config: dict[str, Any]) -> dict[str, Any]:
"""Get default LLM config from config dict."""
return config.get('llm', {})
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/config.py",
"license": "MIT License",
"lines": 398,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/dom/enhanced_snapshot.py | """
Enhanced snapshot processing for browser-use DOM tree extraction.
This module provides stateless functions for parsing Chrome DevTools Protocol (CDP) DOMSnapshot data
to extract visibility, clickability, cursor styles, and other layout information.
"""
from cdp_use.cdp.domsnapshot.commands import CaptureSnapshotReturns
from cdp_use.cdp.domsnapshot.types import (
LayoutTreeSnapshot,
NodeTreeSnapshot,
RareBooleanData,
)
from browser_use.dom.views import DOMRect, EnhancedSnapshotNode
# Only the ESSENTIAL computed styles for interactivity and visibility detection
REQUIRED_COMPUTED_STYLES = [
# Only styles actually accessed in the codebase (prevents Chrome crashes on heavy sites)
'display', # Used in service.py visibility detection
'visibility', # Used in service.py visibility detection
'opacity', # Used in service.py visibility detection
'overflow', # Used in views.py scrollability detection
'overflow-x', # Used in views.py scrollability detection
'overflow-y', # Used in views.py scrollability detection
'cursor', # Used in enhanced_snapshot.py cursor extraction
'pointer-events', # Used for clickability logic
'position', # Used for visibility logic
'background-color', # Used for visibility logic
]
def _parse_rare_boolean_data(rare_data: RareBooleanData, index: int) -> bool | None:
"""Parse rare boolean data from snapshot - returns True if index is in the rare data."""
return index in rare_data['index']
def _parse_computed_styles(strings: list[str], style_indices: list[int]) -> dict[str, str]:
"""Parse computed styles from layout tree using string indices."""
styles = {}
for i, style_index in enumerate(style_indices):
if i < len(REQUIRED_COMPUTED_STYLES) and 0 <= style_index < len(strings):
styles[REQUIRED_COMPUTED_STYLES[i]] = strings[style_index]
return styles
def build_snapshot_lookup(
snapshot: CaptureSnapshotReturns,
device_pixel_ratio: float = 1.0,
) -> dict[int, EnhancedSnapshotNode]:
"""Build a lookup table of backend node ID to enhanced snapshot data with everything calculated upfront."""
import logging
logger = logging.getLogger('browser_use.dom.enhanced_snapshot')
snapshot_lookup: dict[int, EnhancedSnapshotNode] = {}
if not snapshot['documents']:
return snapshot_lookup
strings = snapshot['strings']
logger.debug(f'🔍 SNAPSHOT: Processing {len(snapshot["documents"])} documents with {len(strings)} strings')
for doc_idx, document in enumerate(snapshot['documents']):
nodes: NodeTreeSnapshot = document['nodes']
layout: LayoutTreeSnapshot = document['layout']
# Build backend node id to snapshot index lookup
backend_node_to_snapshot_index = {}
if 'backendNodeId' in nodes:
for i, backend_node_id in enumerate(nodes['backendNodeId']):
backend_node_to_snapshot_index[backend_node_id] = i
# Log document info
doc_url = strings[document.get('documentURL', 0)] if document.get('documentURL', 0) < len(strings) else 'N/A'
logger.debug(
f'🔍 SNAPSHOT doc[{doc_idx}]: url={doc_url[:80]}... has {len(backend_node_to_snapshot_index)} nodes, '
f'layout has {len(layout.get("nodeIndex", []))} entries'
)
# PERFORMANCE: Pre-build layout index map to eliminate O(n²) double lookups
# Preserve original behavior: use FIRST occurrence for duplicates
layout_index_map = {}
if layout and 'nodeIndex' in layout:
for layout_idx, node_index in enumerate(layout['nodeIndex']):
if node_index not in layout_index_map: # Only store first occurrence
layout_index_map[node_index] = layout_idx
# Build snapshot lookup for each backend node id
for backend_node_id, snapshot_index in backend_node_to_snapshot_index.items():
is_clickable = None
if 'isClickable' in nodes:
is_clickable = _parse_rare_boolean_data(nodes['isClickable'], snapshot_index)
# Find corresponding layout node
cursor_style = None
is_visible = None
bounding_box = None
computed_styles = {}
# Look for layout tree node that corresponds to this snapshot node
paint_order = None
client_rects = None
scroll_rects = None
stacking_contexts = None
if snapshot_index in layout_index_map:
layout_idx = layout_index_map[snapshot_index]
if layout_idx < len(layout.get('bounds', [])):
# Parse bounding box
bounds = layout['bounds'][layout_idx]
if len(bounds) >= 4:
# IMPORTANT: CDP coordinates are in device pixels, convert to CSS pixels
# by dividing by the device pixel ratio
raw_x, raw_y, raw_width, raw_height = bounds[0], bounds[1], bounds[2], bounds[3]
# Apply device pixel ratio scaling to convert device pixels to CSS pixels
bounding_box = DOMRect(
x=raw_x / device_pixel_ratio,
y=raw_y / device_pixel_ratio,
width=raw_width / device_pixel_ratio,
height=raw_height / device_pixel_ratio,
)
# Parse computed styles for this layout node
if layout_idx < len(layout.get('styles', [])):
style_indices = layout['styles'][layout_idx]
computed_styles = _parse_computed_styles(strings, style_indices)
cursor_style = computed_styles.get('cursor')
# Extract paint order if available
if layout_idx < len(layout.get('paintOrders', [])):
paint_order = layout.get('paintOrders', [])[layout_idx]
# Extract client rects if available
client_rects_data = layout.get('clientRects', [])
if layout_idx < len(client_rects_data):
client_rect_data = client_rects_data[layout_idx]
if client_rect_data and len(client_rect_data) >= 4:
client_rects = DOMRect(
x=client_rect_data[0],
y=client_rect_data[1],
width=client_rect_data[2],
height=client_rect_data[3],
)
# Extract scroll rects if available
scroll_rects_data = layout.get('scrollRects', [])
if layout_idx < len(scroll_rects_data):
scroll_rect_data = scroll_rects_data[layout_idx]
if scroll_rect_data and len(scroll_rect_data) >= 4:
scroll_rects = DOMRect(
x=scroll_rect_data[0],
y=scroll_rect_data[1],
width=scroll_rect_data[2],
height=scroll_rect_data[3],
)
# Extract stacking contexts if available
if layout_idx < len(layout.get('stackingContexts', [])):
stacking_contexts = layout.get('stackingContexts', {}).get('index', [])[layout_idx]
snapshot_lookup[backend_node_id] = EnhancedSnapshotNode(
is_clickable=is_clickable,
cursor_style=cursor_style,
bounds=bounding_box,
clientRects=client_rects,
scrollRects=scroll_rects,
computed_styles=computed_styles if computed_styles else None,
paint_order=paint_order,
stacking_contexts=stacking_contexts,
)
# Count how many have bounds (are actually visible/laid out)
with_bounds = sum(1 for n in snapshot_lookup.values() if n.bounds)
logger.debug(f'🔍 SNAPSHOT: Built lookup with {len(snapshot_lookup)} total entries, {with_bounds} have bounds')
return snapshot_lookup
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/dom/enhanced_snapshot.py",
"license": "MIT License",
"lines": 147,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/dom/playground/extraction.py | import asyncio
import json
import os
import time
import anyio
import pyperclip
import tiktoken
from browser_use.agent.prompts import AgentMessagePrompt
from browser_use.browser import BrowserProfile, BrowserSession
from browser_use.browser.events import ClickElementEvent, TypeTextEvent
from browser_use.browser.profile import ViewportSize
from browser_use.dom.service import DomService
from browser_use.dom.views import DEFAULT_INCLUDE_ATTRIBUTES
from browser_use.filesystem.file_system import FileSystem
TIMEOUT = 60
async def test_focus_vs_all_elements():
browser_session = BrowserSession(
browser_profile=BrowserProfile(
# executable_path='/Applications/Google Chrome.app/Contents/MacOS/Google Chrome',
window_size=ViewportSize(width=1100, height=1000),
disable_security=False,
wait_for_network_idle_page_load_time=1,
headless=False,
args=['--incognito'],
paint_order_filtering=True,
),
)
# 10 Sample websites with various interactive elements
sample_websites = [
'https://browser-use.github.io/stress-tests/challenges/iframe-inception-level2.html',
'https://www.google.com/travel/flights',
'https://v0-simple-ui-test-site.vercel.app',
'https://browser-use.github.io/stress-tests/challenges/iframe-inception-level1.html',
'https://browser-use.github.io/stress-tests/challenges/angular-form.html',
'https://www.google.com/travel/flights',
'https://www.amazon.com/s?k=laptop',
'https://github.com/trending',
'https://www.reddit.com',
'https://www.ycombinator.com/companies',
'https://www.kayak.com/flights',
'https://www.booking.com',
'https://www.airbnb.com',
'https://www.linkedin.com/jobs',
'https://stackoverflow.com/questions',
]
# 5 Difficult websites with complex elements (iframes, canvas, dropdowns, etc.)
difficult_websites = [
'https://www.w3schools.com/html/tryit.asp?filename=tryhtml_iframe', # Nested iframes
'https://semantic-ui.com/modules/dropdown.html', # Complex dropdowns
'https://www.dezlearn.com/nested-iframes-example/', # Cross-origin nested iframes
'https://codepen.io/towc/pen/mJzOWJ', # Canvas elements with interactions
'https://jqueryui.com/accordion/', # Complex accordion/dropdown widgets
'https://v0-simple-landing-page-seven-xi.vercel.app/', # Simple landing page with iframe
'https://www.unesco.org/en',
]
# Descriptions for difficult websites
difficult_descriptions = {
'https://www.w3schools.com/html/tryit.asp?filename=tryhtml_iframe': '🔸 NESTED IFRAMES: Multiple iframe layers',
'https://semantic-ui.com/modules/dropdown.html': '🔸 COMPLEX DROPDOWNS: Custom dropdown components',
'https://www.dezlearn.com/nested-iframes-example/': '🔸 CROSS-ORIGIN IFRAMES: Different domain iframes',
'https://codepen.io/towc/pen/mJzOWJ': '🔸 CANVAS ELEMENTS: Interactive canvas graphics',
'https://jqueryui.com/accordion/': '🔸 ACCORDION WIDGETS: Collapsible content sections',
}
websites = sample_websites + difficult_websites
current_website_index = 0
def get_website_list_for_prompt() -> str:
"""Get a compact website list for the input prompt."""
lines = []
lines.append('📋 Websites:')
# Sample websites (1-10)
for i, site in enumerate(sample_websites, 1):
current_marker = ' ←' if (i - 1) == current_website_index else ''
domain = site.replace('https://', '').split('/')[0]
lines.append(f' {i:2d}.{domain[:15]:<15}{current_marker}')
# Difficult websites (11-15)
for i, site in enumerate(difficult_websites, len(sample_websites) + 1):
current_marker = ' ←' if (i - 1) == current_website_index else ''
domain = site.replace('https://', '').split('/')[0]
desc = difficult_descriptions.get(site, '')
challenge = desc.split(': ')[1][:15] if ': ' in desc else ''
lines.append(f' {i:2d}.{domain[:15]:<15} ({challenge}){current_marker}')
return '\n'.join(lines)
await browser_session.start()
# Show startup info
print('\n🌐 BROWSER-USE DOM EXTRACTION TESTER')
print(f'📊 {len(websites)} websites total: {len(sample_websites)} standard + {len(difficult_websites)} complex')
print('🔧 Controls: Type 1-15 to jump | Enter to re-run | "n" next | "q" quit')
print('💾 Outputs: tmp/user_message.txt & tmp/element_tree.json\n')
dom_service = DomService(browser_session)
while True:
# Cycle through websites
if current_website_index >= len(websites):
current_website_index = 0
print('Cycled back to first website!')
website = websites[current_website_index]
# sleep 2
await browser_session._cdp_navigate(website)
await asyncio.sleep(1)
last_clicked_index = None # Track the index for text input
while True:
try:
# all_elements_state = await dom_service.get_serialized_dom_tree()
website_type = 'DIFFICULT' if website in difficult_websites else 'SAMPLE'
print(f'\n{"=" * 60}')
print(f'[{current_website_index + 1}/{len(websites)}] [{website_type}] Testing: {website}')
if website in difficult_descriptions:
print(f'{difficult_descriptions[website]}')
print(f'{"=" * 60}')
# Get/refresh the state (includes removing old highlights)
print('\nGetting page state...')
start_time = time.time()
all_elements_state = await browser_session.get_browser_state_summary(True)
end_time = time.time()
get_state_time = end_time - start_time
print(f'get_state_summary took {get_state_time:.2f} seconds')
# Get detailed timing info from DOM service
print('\nGetting detailed DOM timing...')
serialized_state, _, timing_info = await dom_service.get_serialized_dom_tree()
# Combine all timing info
all_timing = {'get_state_summary_total': get_state_time, **timing_info}
selector_map = all_elements_state.dom_state.selector_map
total_elements = len(selector_map.keys())
print(f'Total number of elements: {total_elements}')
# print(all_elements_state.element_tree.clickable_elements_to_string())
prompt = AgentMessagePrompt(
browser_state_summary=all_elements_state,
file_system=FileSystem(base_dir='./tmp'),
include_attributes=DEFAULT_INCLUDE_ATTRIBUTES,
step_info=None,
)
# Write the user message to a file for analysis
user_message = prompt.get_user_message(use_vision=False).text
# clickable_elements_str = all_elements_state.element_tree.clickable_elements_to_string()
text_to_save = user_message
os.makedirs('./tmp', exist_ok=True)
async with await anyio.open_file('./tmp/user_message.txt', 'w', encoding='utf-8') as f:
await f.write(text_to_save)
# save pure clickable elements to a file
if all_elements_state.dom_state._root:
async with await anyio.open_file('./tmp/simplified_element_tree.json', 'w', encoding='utf-8') as f:
await f.write(json.dumps(all_elements_state.dom_state._root.__json__(), indent=2))
async with await anyio.open_file('./tmp/original_element_tree.json', 'w', encoding='utf-8') as f:
await f.write(json.dumps(all_elements_state.dom_state._root.original_node.__json__(), indent=2))
# copy the user message to the clipboard
# pyperclip.copy(text_to_save)
encoding = tiktoken.encoding_for_model('gpt-4.1-mini')
token_count = len(encoding.encode(text_to_save))
print(f'Token count: {token_count}')
print('User message written to ./tmp/user_message.txt')
print('Element tree written to ./tmp/simplified_element_tree.json')
print('Original element tree written to ./tmp/original_element_tree.json')
# Save timing information
timing_text = '🔍 DOM EXTRACTION PERFORMANCE ANALYSIS\n'
timing_text += f'{"=" * 50}\n\n'
timing_text += f'📄 Website: {website}\n'
timing_text += f'📊 Total Elements: {total_elements}\n'
timing_text += f'🎯 Token Count: {token_count}\n\n'
timing_text += '⏱️ TIMING BREAKDOWN:\n'
timing_text += f'{"─" * 30}\n'
for key, value in all_timing.items():
timing_text += f'{key:<35}: {value * 1000:>8.2f} ms\n'
# Calculate percentages
total_time = all_timing.get('get_state_summary_total', 0)
if total_time > 0 and total_elements > 0:
timing_text += '\n📈 PERCENTAGE BREAKDOWN:\n'
timing_text += f'{"─" * 30}\n'
for key, value in all_timing.items():
if key != 'get_state_summary_total':
percentage = (value / total_time) * 100
timing_text += f'{key:<35}: {percentage:>7.1f}%\n'
timing_text += '\n🎯 CLICKABLE DETECTION ANALYSIS:\n'
timing_text += f'{"─" * 35}\n'
clickable_time = all_timing.get('clickable_detection_time', 0)
if clickable_time > 0 and total_elements > 0:
avg_per_element = (clickable_time / total_elements) * 1000000 # microseconds
timing_text += f'Total clickable detection time: {clickable_time * 1000:.2f} ms\n'
timing_text += f'Average per element: {avg_per_element:.2f} μs\n'
timing_text += f'Clickable detection calls: ~{total_elements} (approx)\n'
async with await anyio.open_file('./tmp/timing_analysis.txt', 'w', encoding='utf-8') as f:
await f.write(timing_text)
print('Timing analysis written to ./tmp/timing_analysis.txt')
# also save all_elements_state.element_tree.clickable_elements_to_string() to a file
# with open('./tmp/clickable_elements.json', 'w', encoding='utf-8') as f:
# f.write(json.dumps(all_elements_state.element_tree.__json__(), indent=2))
# print('Clickable elements written to ./tmp/clickable_elements.json')
website_list = get_website_list_for_prompt()
answer = input(
"🎮 Enter: element index | 'index' click (clickable) | 'index,text' input | 'c,index' copy | Enter re-run | 'n' next | 'q' quit: "
)
if answer.lower() == 'q':
return # Exit completely
elif answer.lower() == 'n':
print('Moving to next website...')
current_website_index += 1
break # Break inner loop to go to next website
elif answer.strip() == '':
print('Re-running extraction on current page state...')
continue # Continue inner loop to re-extract DOM without reloading page
elif answer.strip().isdigit():
# Click element format: index
try:
clicked_index = int(answer)
if clicked_index in selector_map:
element_node = selector_map[clicked_index]
print(f'Clicking element {clicked_index}: {element_node.tag_name}')
event = browser_session.event_bus.dispatch(ClickElementEvent(node=element_node))
await event
print('Click successful.')
except ValueError:
print(f"Invalid input: '{answer}'. Enter an index, 'index,text', 'c,index', or 'q'.")
continue
try:
if answer.lower().startswith('c,'):
# Copy element JSON format: c,index
parts = answer.split(',', 1)
if len(parts) == 2:
try:
target_index = int(parts[1].strip())
if target_index in selector_map:
element_node = selector_map[target_index]
element_json = json.dumps(element_node.__json__(), indent=2, default=str)
pyperclip.copy(element_json)
print(f'Copied element {target_index} JSON to clipboard: {element_node.tag_name}')
else:
print(f'Invalid index: {target_index}')
except ValueError:
print(f'Invalid index format: {parts[1]}')
else:
print("Invalid input format. Use 'c,index'.")
elif ',' in answer:
# Input text format: index,text
parts = answer.split(',', 1)
if len(parts) == 2:
try:
target_index = int(parts[0].strip())
text_to_input = parts[1]
if target_index in selector_map:
element_node = selector_map[target_index]
print(
f"Inputting text '{text_to_input}' into element {target_index}: {element_node.tag_name}"
)
event = await browser_session.event_bus.dispatch(
TypeTextEvent(node=element_node, text=text_to_input)
)
print('Input successful.')
else:
print(f'Invalid index: {target_index}')
except ValueError:
print(f'Invalid index format: {parts[0]}')
else:
print("Invalid input format. Use 'index,text'.")
except Exception as action_e:
print(f'Action failed: {action_e}')
# No explicit highlight removal here, get_state handles it at the start of the loop
except Exception as e:
print(f'Error in loop: {e}')
# Optionally add a small delay before retrying
await asyncio.sleep(1)
if __name__ == '__main__':
asyncio.run(test_focus_vs_all_elements())
# asyncio.run(test_process_html_file()) # Commented out the other test
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/dom/playground/extraction.py",
"license": "MIT License",
"lines": 261,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/dom/playground/multi_act.py | from browser_use import Agent
from browser_use.browser import BrowserProfile, BrowserSession
from browser_use.browser.profile import ViewportSize
from browser_use.llm import ChatAzureOpenAI
# Initialize the Azure OpenAI client
llm = ChatAzureOpenAI(
model='gpt-4.1-mini',
)
TASK = """
Go to https://browser-use.github.io/stress-tests/challenges/react-native-web-form.html and complete the React Native Web form by filling in all required fields and submitting.
"""
async def main():
browser = BrowserSession(
browser_profile=BrowserProfile(
window_size=ViewportSize(width=1100, height=1000),
)
)
agent = Agent(task=TASK, llm=llm)
await agent.run()
if __name__ == '__main__':
import asyncio
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/dom/playground/multi_act.py",
"license": "MIT License",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:browser_use/dom/serializer/clickable_elements.py | from browser_use.dom.views import EnhancedDOMTreeNode, NodeType
class ClickableElementDetector:
@staticmethod
def is_interactive(node: EnhancedDOMTreeNode) -> bool:
"""Check if this node is clickable/interactive using enhanced scoring."""
def has_form_control_descendant(element: EnhancedDOMTreeNode, max_depth: int = 2) -> bool:
"""Detect nested form controls within limited depth (handles label/span wrappers)."""
if max_depth <= 0:
return False
for child in element.children_and_shadow_roots:
if child.node_type != NodeType.ELEMENT_NODE:
continue
tag_name = child.tag_name
if tag_name in {'input', 'select', 'textarea'}:
return True
if has_form_control_descendant(child, max_depth=max_depth - 1):
return True
return False
# Skip non-element nodes
if node.node_type != NodeType.ELEMENT_NODE:
return False
# # if ax ignored skip
# if node.ax_node and node.ax_node.ignored:
# return False
# remove html and body nodes
if node.tag_name in {'html', 'body'}:
return False
# Check for JavaScript click event listeners detected via CDP (without DOM mutation)
# this handles vue.js @click, react onClick, angular (click), etc.
if node.has_js_click_listener:
return True
# IFRAME elements should be interactive if they're large enough to potentially need scrolling
# Small iframes (< 100px width or height) are unlikely to have scrollable content
if node.tag_name and node.tag_name.upper() == 'IFRAME' or node.tag_name.upper() == 'FRAME':
if node.snapshot_node and node.snapshot_node.bounds:
width = node.snapshot_node.bounds.width
height = node.snapshot_node.bounds.height
# Only include iframes larger than 100x100px
if width > 100 and height > 100:
return True
# RELAXED SIZE CHECK: Allow all elements including size 0 (they might be interactive overlays, etc.)
# Note: Size 0 elements can still be interactive (e.g., invisible clickable overlays)
# Visibility is determined separately by CSS styles, not just bounding box size
# Specialized handling for labels used as component wrappers (e.g., Ant Design radio/checkbox)
if node.tag_name == 'label':
# Skip labels that proxy via "for" to avoid double-activating external inputs
if node.attributes and node.attributes.get('for'):
return False
# Detect labels that wrap form controls up to two levels deep (label > span > input)
if has_form_control_descendant(node, max_depth=2):
return True
# Fall through to pointer/role/attribute heuristics for other label cases
# Span wrappers for UI components (detect clear interactive signals only)
if node.tag_name == 'span':
if has_form_control_descendant(node, max_depth=2):
return True
# Allow other heuristics (aria roles, event handlers, pointer) to decide
# SEARCH ELEMENT DETECTION: Check for search-related classes and attributes
if node.attributes:
search_indicators = {
'search',
'magnify',
'glass',
'lookup',
'find',
'query',
'search-icon',
'search-btn',
'search-button',
'searchbox',
}
# Check class names for search indicators
class_list = node.attributes.get('class', '').lower().split()
if any(indicator in ' '.join(class_list) for indicator in search_indicators):
return True
# Check id for search indicators
element_id = node.attributes.get('id', '').lower()
if any(indicator in element_id for indicator in search_indicators):
return True
# Check data attributes for search functionality
for attr_name, attr_value in node.attributes.items():
if attr_name.startswith('data-') and any(indicator in attr_value.lower() for indicator in search_indicators):
return True
# Enhanced accessibility property checks - direct clear indicators only
if node.ax_node and node.ax_node.properties:
for prop in node.ax_node.properties:
try:
# aria disabled
if prop.name == 'disabled' and prop.value:
return False
# aria hidden
if prop.name == 'hidden' and prop.value:
return False
# Direct interactiveness indicators
if prop.name in ['focusable', 'editable', 'settable'] and prop.value:
return True
# Interactive state properties (presence indicates interactive widget)
if prop.name in ['checked', 'expanded', 'pressed', 'selected']:
# These properties only exist on interactive elements
return True
# Form-related interactiveness
if prop.name in ['required', 'autocomplete'] and prop.value:
return True
# Elements with keyboard shortcuts are interactive
if prop.name == 'keyshortcuts' and prop.value:
return True
except (AttributeError, ValueError):
# Skip properties we can't process
continue
# ENHANCED TAG CHECK: Include truly interactive elements
# Note: 'label' removed - labels are handled by other attribute checks below - other wise labels with "for" attribute can destroy the real clickable element on apartments.com
interactive_tags = {
'button',
'input',
'select',
'textarea',
'a',
'details',
'summary',
'option',
'optgroup',
}
# Check with case-insensitive comparison
if node.tag_name and node.tag_name.lower() in interactive_tags:
return True
# SVG elements need special handling - only interactive if they have explicit handlers
# svg_tags = {'svg', 'path', 'circle', 'rect', 'polygon', 'ellipse', 'line', 'polyline', 'g'}
# if node.tag_name in svg_tags:
# # Only consider SVG elements interactive if they have:
# # 1. Explicit event handlers
# # 2. Interactive role attributes
# # 3. Cursor pointer style
# if node.attributes:
# # Check for event handlers
# if any(attr.startswith('on') for attr in node.attributes):
# return True
# # Check for interactive roles
# if node.attributes.get('role') in {'button', 'link', 'menuitem'}:
# return True
# # Check for cursor pointer (indicating clickability)
# if node.attributes.get('style') and 'cursor: pointer' in node.attributes.get('style', ''):
# return True
# # Otherwise, SVG elements are decorative
# return False
# Tertiary check: elements with interactive attributes
if node.attributes:
# Check for event handlers or interactive attributes
interactive_attributes = {'onclick', 'onmousedown', 'onmouseup', 'onkeydown', 'onkeyup', 'tabindex'}
if any(attr in node.attributes for attr in interactive_attributes):
return True
# Check for interactive ARIA roles
if 'role' in node.attributes:
interactive_roles = {
'button',
'link',
'menuitem',
'option',
'radio',
'checkbox',
'tab',
'textbox',
'combobox',
'slider',
'spinbutton',
'search',
'searchbox',
'row',
'cell',
'gridcell',
}
if node.attributes['role'] in interactive_roles:
return True
# Quaternary check: accessibility tree roles
if node.ax_node and node.ax_node.role:
interactive_ax_roles = {
'button',
'link',
'menuitem',
'option',
'radio',
'checkbox',
'tab',
'textbox',
'combobox',
'slider',
'spinbutton',
'listbox',
'search',
'searchbox',
'row',
'cell',
'gridcell',
}
if node.ax_node.role in interactive_ax_roles:
return True
# ICON AND SMALL ELEMENT CHECK: Elements that might be icons
if (
node.snapshot_node
and node.snapshot_node.bounds
and 10 <= node.snapshot_node.bounds.width <= 50 # Icon-sized elements
and 10 <= node.snapshot_node.bounds.height <= 50
):
# Check if this small element has interactive properties
if node.attributes:
# Small elements with these attributes are likely interactive icons
icon_attributes = {'class', 'role', 'onclick', 'data-action', 'aria-label'}
if any(attr in node.attributes for attr in icon_attributes):
return True
# Final fallback: cursor style indicates interactivity (for cases Chrome missed)
if node.snapshot_node and node.snapshot_node.cursor_style and node.snapshot_node.cursor_style == 'pointer':
return True
return False
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/dom/serializer/clickable_elements.py",
"license": "MIT License",
"lines": 212,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/dom/serializer/serializer.py | # @file purpose: Serializes enhanced DOM trees to string format for LLM consumption
from typing import Any
from browser_use.dom.serializer.clickable_elements import ClickableElementDetector
from browser_use.dom.serializer.paint_order import PaintOrderRemover
from browser_use.dom.utils import cap_text_length
from browser_use.dom.views import (
DOMRect,
DOMSelectorMap,
EnhancedDOMTreeNode,
NodeType,
PropagatingBounds,
SerializedDOMState,
SimplifiedNode,
)
DISABLED_ELEMENTS = {'style', 'script', 'head', 'meta', 'link', 'title'}
# SVG child elements to skip (decorative only, no interaction value)
SVG_ELEMENTS = {
'path',
'rect',
'g',
'circle',
'ellipse',
'line',
'polyline',
'polygon',
'use',
'defs',
'clipPath',
'mask',
'pattern',
'image',
'text',
'tspan',
}
class DOMTreeSerializer:
"""Serializes enhanced DOM trees to string format."""
# Configuration - elements that propagate bounds to their children
PROPAGATING_ELEMENTS = [
{'tag': 'a', 'role': None}, # Any <a> tag
{'tag': 'button', 'role': None}, # Any <button> tag
{'tag': 'div', 'role': 'button'}, # <div role="button">
{'tag': 'div', 'role': 'combobox'}, # <div role="combobox"> - dropdowns/selects
{'tag': 'span', 'role': 'button'}, # <span role="button">
{'tag': 'span', 'role': 'combobox'}, # <span role="combobox">
{'tag': 'input', 'role': 'combobox'}, # <input role="combobox"> - autocomplete inputs
{'tag': 'input', 'role': 'combobox'}, # <input type="text"> - text inputs with suggestions
# {'tag': 'div', 'role': 'link'}, # <div role="link">
# {'tag': 'span', 'role': 'link'}, # <span role="link">
]
DEFAULT_CONTAINMENT_THRESHOLD = 0.99 # 99% containment by default
def __init__(
self,
root_node: EnhancedDOMTreeNode,
previous_cached_state: SerializedDOMState | None = None,
enable_bbox_filtering: bool = True,
containment_threshold: float | None = None,
paint_order_filtering: bool = True,
session_id: str | None = None,
):
self.root_node = root_node
self._interactive_counter = 1
self._selector_map: DOMSelectorMap = {}
self._previous_cached_selector_map = previous_cached_state.selector_map if previous_cached_state else None
# Add timing tracking
self.timing_info: dict[str, float] = {}
# Cache for clickable element detection to avoid redundant calls
self._clickable_cache: dict[int, bool] = {}
# Bounding box filtering configuration
self.enable_bbox_filtering = enable_bbox_filtering
self.containment_threshold = containment_threshold or self.DEFAULT_CONTAINMENT_THRESHOLD
# Paint order filtering configuration
self.paint_order_filtering = paint_order_filtering
# Session ID for session-specific exclude attribute
self.session_id = session_id
def _safe_parse_number(self, value_str: str, default: float) -> float:
"""Parse string to float, handling negatives and decimals."""
try:
return float(value_str)
except (ValueError, TypeError):
return default
def _safe_parse_optional_number(self, value_str: str | None) -> float | None:
"""Parse string to float, returning None for invalid values."""
if not value_str:
return None
try:
return float(value_str)
except (ValueError, TypeError):
return None
def serialize_accessible_elements(self) -> tuple[SerializedDOMState, dict[str, float]]:
import time
start_total = time.time()
# Reset state
self._interactive_counter = 1
self._selector_map = {}
self._semantic_groups = []
self._clickable_cache = {} # Clear cache for new serialization
# Step 1: Create simplified tree (includes clickable element detection)
start_step1 = time.time()
simplified_tree = self._create_simplified_tree(self.root_node)
end_step1 = time.time()
self.timing_info['create_simplified_tree'] = end_step1 - start_step1
# Step 2: Remove elements based on paint order
start_step3 = time.time()
if self.paint_order_filtering and simplified_tree:
PaintOrderRemover(simplified_tree).calculate_paint_order()
end_step3 = time.time()
self.timing_info['calculate_paint_order'] = end_step3 - start_step3
# Step 3: Optimize tree (remove unnecessary parents)
start_step2 = time.time()
optimized_tree = self._optimize_tree(simplified_tree)
end_step2 = time.time()
self.timing_info['optimize_tree'] = end_step2 - start_step2
# Step 3: Apply bounding box filtering (NEW)
if self.enable_bbox_filtering and optimized_tree:
start_step3 = time.time()
filtered_tree = self._apply_bounding_box_filtering(optimized_tree)
end_step3 = time.time()
self.timing_info['bbox_filtering'] = end_step3 - start_step3
else:
filtered_tree = optimized_tree
# Step 4: Assign interactive indices to clickable elements
start_step4 = time.time()
self._assign_interactive_indices_and_mark_new_nodes(filtered_tree)
end_step4 = time.time()
self.timing_info['assign_interactive_indices'] = end_step4 - start_step4
end_total = time.time()
self.timing_info['serialize_accessible_elements_total'] = end_total - start_total
return SerializedDOMState(_root=filtered_tree, selector_map=self._selector_map), self.timing_info
def _add_compound_components(self, simplified: SimplifiedNode, node: EnhancedDOMTreeNode) -> None:
"""Enhance compound controls with information from their child components."""
# Only process elements that might have compound components
if node.tag_name not in ['input', 'select', 'details', 'audio', 'video']:
return
# For input elements, check for compound input types
if node.tag_name == 'input':
if not node.attributes or node.attributes.get('type') not in [
'date',
'time',
'datetime-local',
'month',
'week',
'range',
'number',
'color',
'file',
]:
return
# For other elements, check if they have AX child indicators
elif not node.ax_node or not node.ax_node.child_ids:
return
# Add compound component information based on element type
element_type = node.tag_name
input_type = node.attributes.get('type', '') if node.attributes else ''
if element_type == 'input':
# NOTE: For date/time inputs, we DON'T add compound components because:
# 1. They confuse the model (seeing "Day, Month, Year" suggests DD.MM.YYYY format)
# 2. HTML5 date/time inputs ALWAYS require ISO format (YYYY-MM-DD, HH:MM, etc.)
# 3. The placeholder attribute clearly shows the required format
# 4. These inputs use direct value assignment, not sequential typing
if input_type in ['date', 'time', 'datetime-local', 'month', 'week']:
# Skip compound components for date/time inputs - format is shown in placeholder
pass
elif input_type == 'range':
# Range slider with value indicator
min_val = node.attributes.get('min', '0') if node.attributes else '0'
max_val = node.attributes.get('max', '100') if node.attributes else '100'
node._compound_children.append(
{
'role': 'slider',
'name': 'Value',
'valuemin': self._safe_parse_number(min_val, 0.0),
'valuemax': self._safe_parse_number(max_val, 100.0),
'valuenow': None,
}
)
simplified.is_compound_component = True
elif input_type == 'number':
# Number input with increment/decrement buttons
min_val = node.attributes.get('min') if node.attributes else None
max_val = node.attributes.get('max') if node.attributes else None
node._compound_children.extend(
[
{'role': 'button', 'name': 'Increment', 'valuemin': None, 'valuemax': None, 'valuenow': None},
{'role': 'button', 'name': 'Decrement', 'valuemin': None, 'valuemax': None, 'valuenow': None},
{
'role': 'textbox',
'name': 'Value',
'valuemin': self._safe_parse_optional_number(min_val),
'valuemax': self._safe_parse_optional_number(max_val),
'valuenow': None,
},
]
)
simplified.is_compound_component = True
elif input_type == 'color':
# Color picker with components
node._compound_children.extend(
[
{'role': 'textbox', 'name': 'Hex Value', 'valuemin': None, 'valuemax': None, 'valuenow': None},
{'role': 'button', 'name': 'Color Picker', 'valuemin': None, 'valuemax': None, 'valuenow': None},
]
)
simplified.is_compound_component = True
elif input_type == 'file':
# File input with browse button
multiple = 'multiple' in node.attributes if node.attributes else False
# Extract current file selection state from AX tree
current_value = 'None' # Default to explicit "None" string for clarity
if node.ax_node and node.ax_node.properties:
for prop in node.ax_node.properties:
# Try valuetext first (human-readable display like "file.pdf")
if prop.name == 'valuetext' and prop.value:
value_str = str(prop.value).strip()
if value_str and value_str.lower() not in ['', 'no file chosen', 'no file selected']:
current_value = value_str
break
# Also try 'value' property (may include full path)
elif prop.name == 'value' and prop.value:
value_str = str(prop.value).strip()
if value_str:
# For file inputs, value might be a full path - extract just filename
if '\\' in value_str:
current_value = value_str.split('\\')[-1]
elif '/' in value_str:
current_value = value_str.split('/')[-1]
else:
current_value = value_str
break
node._compound_children.extend(
[
{'role': 'button', 'name': 'Browse Files', 'valuemin': None, 'valuemax': None, 'valuenow': None},
{
'role': 'textbox',
'name': f'{"Files" if multiple else "File"} Selected',
'valuemin': None,
'valuemax': None,
'valuenow': current_value, # Always shows state: filename or "None"
},
]
)
simplified.is_compound_component = True
elif element_type == 'select':
# Select dropdown with option list and detailed option information
base_components = [
{'role': 'button', 'name': 'Dropdown Toggle', 'valuemin': None, 'valuemax': None, 'valuenow': None}
]
# Extract option information from child nodes
options_info = self._extract_select_options(node)
if options_info:
options_component = {
'role': 'listbox',
'name': 'Options',
'valuemin': None,
'valuemax': None,
'valuenow': None,
'options_count': options_info['count'],
'first_options': options_info['first_options'],
}
if options_info['format_hint']:
options_component['format_hint'] = options_info['format_hint']
base_components.append(options_component)
else:
base_components.append(
{'role': 'listbox', 'name': 'Options', 'valuemin': None, 'valuemax': None, 'valuenow': None}
)
node._compound_children.extend(base_components)
simplified.is_compound_component = True
elif element_type == 'details':
# Details/summary disclosure widget
node._compound_children.extend(
[
{'role': 'button', 'name': 'Toggle Disclosure', 'valuemin': None, 'valuemax': None, 'valuenow': None},
{'role': 'region', 'name': 'Content Area', 'valuemin': None, 'valuemax': None, 'valuenow': None},
]
)
simplified.is_compound_component = True
elif element_type == 'audio':
# Audio player controls
node._compound_children.extend(
[
{'role': 'button', 'name': 'Play/Pause', 'valuemin': None, 'valuemax': None, 'valuenow': None},
{'role': 'slider', 'name': 'Progress', 'valuemin': 0, 'valuemax': 100, 'valuenow': None},
{'role': 'button', 'name': 'Mute', 'valuemin': None, 'valuemax': None, 'valuenow': None},
{'role': 'slider', 'name': 'Volume', 'valuemin': 0, 'valuemax': 100, 'valuenow': None},
]
)
simplified.is_compound_component = True
elif element_type == 'video':
# Video player controls
node._compound_children.extend(
[
{'role': 'button', 'name': 'Play/Pause', 'valuemin': None, 'valuemax': None, 'valuenow': None},
{'role': 'slider', 'name': 'Progress', 'valuemin': 0, 'valuemax': 100, 'valuenow': None},
{'role': 'button', 'name': 'Mute', 'valuemin': None, 'valuemax': None, 'valuenow': None},
{'role': 'slider', 'name': 'Volume', 'valuemin': 0, 'valuemax': 100, 'valuenow': None},
{'role': 'button', 'name': 'Fullscreen', 'valuemin': None, 'valuemax': None, 'valuenow': None},
]
)
simplified.is_compound_component = True
def _extract_select_options(self, select_node: EnhancedDOMTreeNode) -> dict[str, Any] | None:
"""Extract option information from a select element."""
if not select_node.children:
return None
options = []
option_values = []
def extract_options_recursive(node: EnhancedDOMTreeNode) -> None:
"""Recursively extract option elements, including from optgroups."""
if node.tag_name.lower() == 'option':
# Extract option text and value
option_text = ''
option_value = ''
# Get value attribute if present
if node.attributes and 'value' in node.attributes:
option_value = str(node.attributes['value']).strip()
# Get text content from direct child text nodes only to avoid duplication
def get_direct_text_content(n: EnhancedDOMTreeNode) -> str:
text = ''
for child in n.children:
if child.node_type == NodeType.TEXT_NODE and child.node_value:
text += child.node_value.strip() + ' '
return text.strip()
option_text = get_direct_text_content(node)
# Use text as value if no explicit value
if not option_value and option_text:
option_value = option_text
if option_text or option_value:
options.append({'text': option_text, 'value': option_value})
option_values.append(option_value)
elif node.tag_name.lower() == 'optgroup':
# Process optgroup children
for child in node.children:
extract_options_recursive(child)
else:
# Process other children that might contain options
for child in node.children:
extract_options_recursive(child)
# Extract all options from select children
for child in select_node.children:
extract_options_recursive(child)
if not options:
return None
# Prepare first 4 options for display
first_options = []
for option in options[:4]:
# Always use text if available, otherwise use value
display_text = option['text'] if option['text'] else option['value']
if display_text:
# Limit individual option text to avoid overly long attributes
text = display_text[:30] + ('...' if len(display_text) > 30 else '')
first_options.append(text)
# Add ellipsis indicator if there are more options than shown
if len(options) > 4:
first_options.append(f'... {len(options) - 4} more options...')
# Try to infer format hint from option values
format_hint = None
if len(option_values) >= 2:
# Check for common patterns
if all(val.isdigit() for val in option_values[:5] if val):
format_hint = 'numeric'
elif all(len(val) == 2 and val.isupper() for val in option_values[:5] if val):
format_hint = 'country/state codes'
elif all('/' in val or '-' in val for val in option_values[:5] if val):
format_hint = 'date/path format'
elif any('@' in val for val in option_values[:5] if val):
format_hint = 'email addresses'
return {'count': len(options), 'first_options': first_options, 'format_hint': format_hint}
def _is_interactive_cached(self, node: EnhancedDOMTreeNode) -> bool:
"""Cached version of clickable element detection to avoid redundant calls."""
if node.node_id not in self._clickable_cache:
import time
start_time = time.time()
result = ClickableElementDetector.is_interactive(node)
end_time = time.time()
if 'clickable_detection_time' not in self.timing_info:
self.timing_info['clickable_detection_time'] = 0
self.timing_info['clickable_detection_time'] += end_time - start_time
self._clickable_cache[node.node_id] = result
return self._clickable_cache[node.node_id]
def _create_simplified_tree(self, node: EnhancedDOMTreeNode, depth: int = 0) -> SimplifiedNode | None:
"""Step 1: Create a simplified tree with enhanced element detection."""
if node.node_type == NodeType.DOCUMENT_NODE:
# for all cldren including shadow roots
for child in node.children_and_shadow_roots:
simplified_child = self._create_simplified_tree(child, depth + 1)
if simplified_child:
return simplified_child
return None
if node.node_type == NodeType.DOCUMENT_FRAGMENT_NODE:
# ENHANCED shadow DOM processing - always include shadow content
simplified = SimplifiedNode(original_node=node, children=[])
for child in node.children_and_shadow_roots:
simplified_child = self._create_simplified_tree(child, depth + 1)
if simplified_child:
simplified.children.append(simplified_child)
# Always return shadow DOM fragments, even if children seem empty
# Shadow DOM often contains the actual interactive content in SPAs
return simplified if simplified.children else SimplifiedNode(original_node=node, children=[])
elif node.node_type == NodeType.ELEMENT_NODE:
# Skip non-content elements
if node.node_name.lower() in DISABLED_ELEMENTS:
return None
# Skip SVG child elements entirely (path, rect, g, circle, etc.)
if node.node_name.lower() in SVG_ELEMENTS:
return None
attributes = node.attributes or {}
# Check for session-specific exclude attribute first, then fall back to legacy attribute
exclude_attr = None
attr_type = None
if self.session_id:
session_specific_attr = f'data-browser-use-exclude-{self.session_id}'
exclude_attr = attributes.get(session_specific_attr)
if exclude_attr:
attr_type = 'session-specific'
# Fall back to legacy attribute if session-specific not found
if not exclude_attr:
exclude_attr = attributes.get('data-browser-use-exclude')
if isinstance(exclude_attr, str) and exclude_attr.lower() == 'true':
return None
if node.node_name == 'IFRAME' or node.node_name == 'FRAME':
if node.content_document:
simplified = SimplifiedNode(original_node=node, children=[])
for child in node.content_document.children_nodes or []:
simplified_child = self._create_simplified_tree(child, depth + 1)
if simplified_child is not None:
simplified.children.append(simplified_child)
return simplified
is_visible = node.is_visible
is_scrollable = node.is_actually_scrollable
has_shadow_content = bool(node.children_and_shadow_roots)
# ENHANCED SHADOW DOM DETECTION: Include shadow hosts even if not visible
is_shadow_host = any(child.node_type == NodeType.DOCUMENT_FRAGMENT_NODE for child in node.children_and_shadow_roots)
# Override visibility for elements with validation attributes
if not is_visible and node.attributes:
has_validation_attrs = any(attr.startswith(('aria-', 'pseudo')) for attr in node.attributes.keys())
if has_validation_attrs:
is_visible = True # Force visibility for validation elements
# EXCEPTION: File inputs are often hidden with opacity:0 but are still functional
# Bootstrap and other frameworks use this pattern with custom-styled file pickers
is_file_input = (
node.tag_name and node.tag_name.lower() == 'input' and node.attributes and node.attributes.get('type') == 'file'
)
if not is_visible and is_file_input:
is_visible = True # Force visibility for file inputs
# Include if visible, scrollable, has children, or is shadow host
if is_visible or is_scrollable or has_shadow_content or is_shadow_host:
simplified = SimplifiedNode(original_node=node, children=[], is_shadow_host=is_shadow_host)
# Process ALL children including shadow roots with enhanced logging
for child in node.children_and_shadow_roots:
simplified_child = self._create_simplified_tree(child, depth + 1)
if simplified_child:
simplified.children.append(simplified_child)
# COMPOUND CONTROL PROCESSING: Add virtual components for compound controls
self._add_compound_components(simplified, node)
# SHADOW DOM SPECIAL CASE: Always include shadow hosts even if not visible
# Many SPA frameworks (React, Vue) render content in shadow DOM
if is_shadow_host and simplified.children:
return simplified
# Return if meaningful or has meaningful children
if is_visible or is_scrollable or simplified.children:
return simplified
elif node.node_type == NodeType.TEXT_NODE:
# Include meaningful text nodes
is_visible = node.snapshot_node and node.is_visible
if is_visible and node.node_value and node.node_value.strip() and len(node.node_value.strip()) > 1:
return SimplifiedNode(original_node=node, children=[])
return None
def _optimize_tree(self, node: SimplifiedNode | None) -> SimplifiedNode | None:
"""Step 2: Optimize tree structure."""
if not node:
return None
# Process children
optimized_children = []
for child in node.children:
optimized_child = self._optimize_tree(child)
if optimized_child:
optimized_children.append(optimized_child)
node.children = optimized_children
# Keep meaningful nodes
is_visible = node.original_node.snapshot_node and node.original_node.is_visible
# EXCEPTION: File inputs are often hidden with opacity:0 but are still functional
is_file_input = (
node.original_node.tag_name
and node.original_node.tag_name.lower() == 'input'
and node.original_node.attributes
and node.original_node.attributes.get('type') == 'file'
)
if (
is_visible # Keep all visible nodes
or node.original_node.is_actually_scrollable
or node.original_node.node_type == NodeType.TEXT_NODE
or node.children
or is_file_input # Keep file inputs even if not visible
):
return node
return None
def _collect_interactive_elements(self, node: SimplifiedNode, elements: list[SimplifiedNode]) -> None:
"""Recursively collect interactive elements that are also visible."""
is_interactive = self._is_interactive_cached(node.original_node)
is_visible = node.original_node.snapshot_node and node.original_node.is_visible
# Only collect elements that are both interactive AND visible
if is_interactive and is_visible:
elements.append(node)
for child in node.children:
self._collect_interactive_elements(child, elements)
def _has_interactive_descendants(self, node: SimplifiedNode) -> bool:
"""Check if a node has any interactive descendants (not including the node itself)."""
# Check children for interactivity
for child in node.children:
# Check if child itself is interactive
if self._is_interactive_cached(child.original_node):
return True
# Recursively check child's descendants
if self._has_interactive_descendants(child):
return True
return False
def _is_inside_shadow_dom(self, node: SimplifiedNode) -> bool:
"""Check if a node is inside a shadow DOM by walking up the parent chain.
Shadow DOM elements are descendants of a #document-fragment node (shadow root).
The shadow root node has node_type == DOCUMENT_FRAGMENT_NODE and shadow_root_type set.
"""
current = node.original_node.parent_node
while current is not None:
# Shadow roots are DOCUMENT_FRAGMENT nodes with shadow_root_type
if current.node_type == NodeType.DOCUMENT_FRAGMENT_NODE and current.shadow_root_type is not None:
return True
current = current.parent_node
return False
def _assign_interactive_indices_and_mark_new_nodes(self, node: SimplifiedNode | None) -> None:
"""Assign interactive indices to clickable elements that are also visible."""
if not node:
return
# Skip assigning index to excluded nodes, or ignored by paint order
if not node.excluded_by_parent and not node.ignored_by_paint_order:
# Regular interactive element assignment (including enhanced compound controls)
is_interactive_assign = self._is_interactive_cached(node.original_node)
is_visible = node.original_node.snapshot_node and node.original_node.is_visible
is_scrollable = node.original_node.is_actually_scrollable
# DIAGNOSTIC: Log when interactive elements don't have snapshot_node
if is_interactive_assign and not node.original_node.snapshot_node:
import logging
logger = logging.getLogger('browser_use.dom.serializer')
attrs = node.original_node.attributes or {}
attr_str = f'name={attrs.get("name", "")} id={attrs.get("id", "")} type={attrs.get("type", "")}'
in_shadow = self._is_inside_shadow_dom(node)
if (
in_shadow
and node.original_node.tag_name
and node.original_node.tag_name.lower() in ['input', 'button', 'select', 'textarea', 'a']
):
logger.debug(
f'🔍 INCLUDING shadow DOM <{node.original_node.tag_name}> (no snapshot_node but in shadow DOM): '
f'backendNodeId={node.original_node.backend_node_id} {attr_str}'
)
else:
logger.debug(
f'🔍 SKIPPING interactive <{node.original_node.tag_name}> (no snapshot_node, not in shadow DOM): '
f'backendNodeId={node.original_node.backend_node_id} {attr_str}'
)
# EXCEPTION: File inputs are often hidden with opacity:0 but are still functional
# Bootstrap and other frameworks use this pattern with custom-styled file pickers
is_file_input = (
node.original_node.tag_name
and node.original_node.tag_name.lower() == 'input'
and node.original_node.attributes
and node.original_node.attributes.get('type') == 'file'
)
# EXCEPTION: Shadow DOM form elements may not have snapshot layout data from CDP's
# DOMSnapshot.captureSnapshot, but they're still functional/interactive.
# This handles login forms, custom web components, etc. inside shadow DOM.
is_shadow_dom_element = (
is_interactive_assign
and not node.original_node.snapshot_node
and node.original_node.tag_name
and node.original_node.tag_name.lower() in ['input', 'button', 'select', 'textarea', 'a']
and self._is_inside_shadow_dom(node)
)
# Check if scrollable container should be made interactive
# For scrollable elements, ONLY make them interactive if they have no interactive descendants
should_make_interactive = False
if is_scrollable:
# Check if this is a dropdown container that needs to be indexed regardless of descendants
attrs = node.original_node.attributes or {}
role = attrs.get('role', '').lower()
tag_name = (node.original_node.tag_name or '').lower()
class_attr = attrs.get('class', '').lower()
class_list = class_attr.split() if class_attr else []
# Detect dropdown containers by role, tag, or class
is_dropdown_by_role = role in ('listbox', 'menu', 'combobox', 'menubar', 'tree', 'grid')
is_dropdown_by_tag = tag_name == 'select'
# Match common dropdown class patterns
is_dropdown_by_class = (
'dropdown' in class_list
or 'dropdown-menu' in class_list
or 'select-menu' in class_list
or ('ui' in class_list and 'dropdown' in class_attr) # Semantic UI
)
is_dropdown_container = is_dropdown_by_role or is_dropdown_by_tag or is_dropdown_by_class
if is_dropdown_container:
# Always index dropdown containers - need to be targetable for select_dropdown
should_make_interactive = True
else:
# For other scrollable elements, check if they have interactive children
has_interactive_desc = self._has_interactive_descendants(node)
# Only make scrollable container interactive if it has no interactive descendants
if not has_interactive_desc:
should_make_interactive = True
elif is_interactive_assign and (is_visible or is_file_input or is_shadow_dom_element):
# Non-scrollable interactive elements: make interactive if visible (or file input or shadow DOM form element)
should_make_interactive = True
# Add to selector map if element should be interactive
if should_make_interactive:
# Mark node as interactive
node.is_interactive = True
# Store backend_node_id in selector map (model outputs backend_node_id)
self._selector_map[node.original_node.backend_node_id] = node.original_node
self._interactive_counter += 1
# Mark compound components as new for visibility
if node.is_compound_component:
node.is_new = True
elif self._previous_cached_selector_map:
# Check if node is new for regular elements
previous_backend_node_ids = {node.backend_node_id for node in self._previous_cached_selector_map.values()}
if node.original_node.backend_node_id not in previous_backend_node_ids:
node.is_new = True
# Process children
for child in node.children:
self._assign_interactive_indices_and_mark_new_nodes(child)
def _apply_bounding_box_filtering(self, node: SimplifiedNode | None) -> SimplifiedNode | None:
"""Filter children contained within propagating parent bounds."""
if not node:
return None
# Start with no active bounds
self._filter_tree_recursive(node, active_bounds=None, depth=0)
# Log statistics
excluded_count = self._count_excluded_nodes(node)
if excluded_count > 0:
import logging
logging.debug(f'BBox filtering excluded {excluded_count} nodes')
return node
def _filter_tree_recursive(self, node: SimplifiedNode, active_bounds: PropagatingBounds | None = None, depth: int = 0):
"""
Recursively filter tree with bounding box propagation.
Bounds propagate to ALL descendants until overridden.
"""
# Check if this node should be excluded by active bounds
if active_bounds and self._should_exclude_child(node, active_bounds):
node.excluded_by_parent = True
# Important: Still check if this node starts NEW propagation
# Check if this node starts new propagation (even if excluded!)
new_bounds = None
tag = node.original_node.tag_name.lower()
role = node.original_node.attributes.get('role') if node.original_node.attributes else None
attributes = {
'tag': tag,
'role': role,
}
# Check if this element matches any propagating element pattern
if self._is_propagating_element(attributes):
# This node propagates bounds to ALL its descendants
if node.original_node.snapshot_node and node.original_node.snapshot_node.bounds:
new_bounds = PropagatingBounds(
tag=tag,
bounds=node.original_node.snapshot_node.bounds,
node_id=node.original_node.node_id,
depth=depth,
)
# Propagate to ALL children
# Use new_bounds if this node starts propagation, otherwise continue with active_bounds
propagate_bounds = new_bounds if new_bounds else active_bounds
for child in node.children:
self._filter_tree_recursive(child, propagate_bounds, depth + 1)
def _should_exclude_child(self, node: SimplifiedNode, active_bounds: PropagatingBounds) -> bool:
"""
Determine if child should be excluded based on propagating bounds.
"""
# Never exclude text nodes - we always want to preserve text content
if node.original_node.node_type == NodeType.TEXT_NODE:
return False
# Get child bounds
if not node.original_node.snapshot_node or not node.original_node.snapshot_node.bounds:
return False # No bounds = can't determine containment
child_bounds = node.original_node.snapshot_node.bounds
# Check containment with configured threshold
if not self._is_contained(child_bounds, active_bounds.bounds, self.containment_threshold):
return False # Not sufficiently contained
# EXCEPTION RULES - Keep these even if contained:
child_tag = node.original_node.tag_name.lower()
child_role = node.original_node.attributes.get('role') if node.original_node.attributes else None
child_attributes = {
'tag': child_tag,
'role': child_role,
}
# 1. Never exclude form elements (they need individual interaction)
if child_tag in ['input', 'select', 'textarea', 'label']:
return False
# 2. Keep if child is also a propagating element
# (might have stopPropagation, e.g., button in button)
if self._is_propagating_element(child_attributes):
return False
# 3. Keep if has explicit onclick handler
if node.original_node.attributes and 'onclick' in node.original_node.attributes:
return False
# 4. Keep if has aria-label suggesting it's independently interactive
if node.original_node.attributes:
aria_label = node.original_node.attributes.get('aria-label')
if aria_label and aria_label.strip():
# Has meaningful aria-label, likely interactive
return False
# 5. Keep if has role suggesting interactivity
if node.original_node.attributes:
role = node.original_node.attributes.get('role')
if role in ['button', 'link', 'checkbox', 'radio', 'tab', 'menuitem', 'option']:
return False
# Default: exclude this child
return True
def _is_contained(self, child: DOMRect, parent: DOMRect, threshold: float) -> bool:
"""
Check if child is contained within parent bounds.
Args:
threshold: Percentage (0.0-1.0) of child that must be within parent
"""
# Calculate intersection
x_overlap = max(0, min(child.x + child.width, parent.x + parent.width) - max(child.x, parent.x))
y_overlap = max(0, min(child.y + child.height, parent.y + parent.height) - max(child.y, parent.y))
intersection_area = x_overlap * y_overlap
child_area = child.width * child.height
if child_area == 0:
return False # Zero-area element
containment_ratio = intersection_area / child_area
return containment_ratio >= threshold
def _count_excluded_nodes(self, node: SimplifiedNode, count: int = 0) -> int:
"""Count how many nodes were excluded (for debugging)."""
if hasattr(node, 'excluded_by_parent') and node.excluded_by_parent:
count += 1
for child in node.children:
count = self._count_excluded_nodes(child, count)
return count
def _is_propagating_element(self, attributes: dict[str, str | None]) -> bool:
"""
Check if an element should propagate bounds based on attributes.
If the element satisfies one of the patterns, it propagates bounds to all its children.
"""
keys_to_check = ['tag', 'role']
for pattern in self.PROPAGATING_ELEMENTS:
# Check if the element satisfies the pattern
check = [pattern.get(key) is None or pattern.get(key) == attributes.get(key) for key in keys_to_check]
if all(check):
return True
return False
@staticmethod
def serialize_tree(node: SimplifiedNode | None, include_attributes: list[str], depth: int = 0) -> str:
"""Serialize the optimized tree to string format."""
if not node:
return ''
# Skip rendering excluded nodes, but process their children
if hasattr(node, 'excluded_by_parent') and node.excluded_by_parent:
formatted_text = []
for child in node.children:
child_text = DOMTreeSerializer.serialize_tree(child, include_attributes, depth)
if child_text:
formatted_text.append(child_text)
return '\n'.join(formatted_text)
formatted_text = []
depth_str = depth * '\t'
next_depth = depth
if node.original_node.node_type == NodeType.ELEMENT_NODE:
# Skip displaying nodes marked as should_display=False
if not node.should_display:
for child in node.children:
child_text = DOMTreeSerializer.serialize_tree(child, include_attributes, depth)
if child_text:
formatted_text.append(child_text)
return '\n'.join(formatted_text)
# Special handling for SVG elements - show the tag but collapse children
if node.original_node.tag_name.lower() == 'svg':
shadow_prefix = ''
if node.is_shadow_host:
has_closed_shadow = any(
child.original_node.node_type == NodeType.DOCUMENT_FRAGMENT_NODE
and child.original_node.shadow_root_type
and child.original_node.shadow_root_type.lower() == 'closed'
for child in node.children
)
shadow_prefix = '|SHADOW(closed)|' if has_closed_shadow else '|SHADOW(open)|'
line = f'{depth_str}{shadow_prefix}'
# Add interactive marker if clickable
if node.is_interactive:
new_prefix = '*' if node.is_new else ''
line += f'{new_prefix}[{node.original_node.backend_node_id}]'
line += '<svg'
attributes_html_str = DOMTreeSerializer._build_attributes_string(node.original_node, include_attributes, '')
if attributes_html_str:
line += f' {attributes_html_str}'
line += ' /> <!-- SVG content collapsed -->'
formatted_text.append(line)
# Don't process children for SVG
return '\n'.join(formatted_text)
# Add element if clickable, scrollable, or iframe
is_any_scrollable = node.original_node.is_actually_scrollable or node.original_node.is_scrollable
should_show_scroll = node.original_node.should_show_scroll_info
if (
node.is_interactive
or is_any_scrollable
or node.original_node.tag_name.upper() == 'IFRAME'
or node.original_node.tag_name.upper() == 'FRAME'
):
next_depth += 1
# Build attributes string with compound component info
text_content = ''
attributes_html_str = DOMTreeSerializer._build_attributes_string(
node.original_node, include_attributes, text_content
)
# Add compound component information to attributes if present
if node.original_node._compound_children:
compound_info = []
for child_info in node.original_node._compound_children:
parts = []
if child_info['name']:
parts.append(f'name={child_info["name"]}')
if child_info['role']:
parts.append(f'role={child_info["role"]}')
if child_info['valuemin'] is not None:
parts.append(f'min={child_info["valuemin"]}')
if child_info['valuemax'] is not None:
parts.append(f'max={child_info["valuemax"]}')
if child_info['valuenow'] is not None:
parts.append(f'current={child_info["valuenow"]}')
# Add select-specific information
if 'options_count' in child_info and child_info['options_count'] is not None:
parts.append(f'count={child_info["options_count"]}')
if 'first_options' in child_info and child_info['first_options']:
options_str = '|'.join(child_info['first_options'][:4]) # Limit to 4 options
parts.append(f'options={options_str}')
if 'format_hint' in child_info and child_info['format_hint']:
parts.append(f'format={child_info["format_hint"]}')
if parts:
compound_info.append(f'({",".join(parts)})')
if compound_info:
compound_attr = f'compound_components={",".join(compound_info)}'
if attributes_html_str:
attributes_html_str += f' {compound_attr}'
else:
attributes_html_str = compound_attr
# Build the line with shadow host indicator
shadow_prefix = ''
if node.is_shadow_host:
# Check if any shadow children are closed
has_closed_shadow = any(
child.original_node.node_type == NodeType.DOCUMENT_FRAGMENT_NODE
and child.original_node.shadow_root_type
and child.original_node.shadow_root_type.lower() == 'closed'
for child in node.children
)
shadow_prefix = '|SHADOW(closed)|' if has_closed_shadow else '|SHADOW(open)|'
if should_show_scroll and not node.is_interactive:
# Scrollable container but not clickable
line = f'{depth_str}{shadow_prefix}|scroll element|<{node.original_node.tag_name}'
elif node.is_interactive:
# Clickable (and possibly scrollable) - show backend_node_id
new_prefix = '*' if node.is_new else ''
scroll_prefix = '|scroll element[' if should_show_scroll else '['
line = f'{depth_str}{shadow_prefix}{new_prefix}{scroll_prefix}{node.original_node.backend_node_id}]<{node.original_node.tag_name}'
elif node.original_node.tag_name.upper() == 'IFRAME':
# Iframe element (not interactive)
line = f'{depth_str}{shadow_prefix}|IFRAME|<{node.original_node.tag_name}'
elif node.original_node.tag_name.upper() == 'FRAME':
# Frame element (not interactive)
line = f'{depth_str}{shadow_prefix}|FRAME|<{node.original_node.tag_name}'
else:
line = f'{depth_str}{shadow_prefix}<{node.original_node.tag_name}'
if attributes_html_str:
line += f' {attributes_html_str}'
line += ' />'
# Add scroll information only when we should show it
if should_show_scroll:
scroll_info_text = node.original_node.get_scroll_info_text()
if scroll_info_text:
line += f' ({scroll_info_text})'
formatted_text.append(line)
elif node.original_node.node_type == NodeType.DOCUMENT_FRAGMENT_NODE:
# Shadow DOM representation - show clearly to LLM
if node.original_node.shadow_root_type and node.original_node.shadow_root_type.lower() == 'closed':
formatted_text.append(f'{depth_str}Closed Shadow')
else:
formatted_text.append(f'{depth_str}Open Shadow')
next_depth += 1
# Process shadow DOM children
for child in node.children:
child_text = DOMTreeSerializer.serialize_tree(child, include_attributes, next_depth)
if child_text:
formatted_text.append(child_text)
# Close shadow DOM indicator
if node.children: # Only show close if we had content
formatted_text.append(f'{depth_str}Shadow End')
elif node.original_node.node_type == NodeType.TEXT_NODE:
# Include visible text
is_visible = node.original_node.snapshot_node and node.original_node.is_visible
if (
is_visible
and node.original_node.node_value
and node.original_node.node_value.strip()
and len(node.original_node.node_value.strip()) > 1
):
clean_text = node.original_node.node_value.strip()
formatted_text.append(f'{depth_str}{clean_text}')
# Process children (for non-shadow elements)
if node.original_node.node_type != NodeType.DOCUMENT_FRAGMENT_NODE:
for child in node.children:
child_text = DOMTreeSerializer.serialize_tree(child, include_attributes, next_depth)
if child_text:
formatted_text.append(child_text)
# Add hidden content hint for iframes
if (
node.original_node.node_type == NodeType.ELEMENT_NODE
and node.original_node.tag_name
and node.original_node.tag_name.upper() in ('IFRAME', 'FRAME')
):
if node.original_node.hidden_elements_info:
# Show specific interactive elements with scroll distances
hidden = node.original_node.hidden_elements_info
hint_lines = [f'{depth_str}... ({len(hidden)} more elements below - scroll to reveal):']
for elem in hidden:
hint_lines.append(f'{depth_str} <{elem["tag"]}> "{elem["text"]}" ~{elem["pages"]} pages down')
formatted_text.extend(hint_lines)
elif node.original_node.has_hidden_content:
# Generic hint for non-interactive hidden content
formatted_text.append(f'{depth_str}... (more content below viewport - scroll to reveal)')
return '\n'.join(formatted_text)
@staticmethod
def _build_attributes_string(node: EnhancedDOMTreeNode, include_attributes: list[str], text: str) -> str:
"""Build the attributes string for an element."""
attributes_to_include = {}
# Include HTML attributes
if node.attributes:
attributes_to_include.update(
{
key: str(value).strip()
for key, value in node.attributes.items()
if key in include_attributes and str(value).strip() != ''
}
)
# Add format hints for date/time inputs to help LLMs use the correct format
# NOTE: These formats are standardized by HTML5 specification (ISO 8601), NOT locale-dependent
# The browser may DISPLAY dates in locale format (MM/DD/YYYY in US, DD/MM/YYYY in EU),
# but the .value attribute and programmatic setting ALWAYS uses these ISO formats:
# - date: YYYY-MM-DD (e.g., "2024-03-15")
# - time: HH:MM or HH:MM:SS (24-hour, e.g., "14:30")
# - datetime-local: YYYY-MM-DDTHH:MM (e.g., "2024-03-15T14:30")
# Reference: https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input/date
if node.tag_name and node.tag_name.lower() == 'input' and node.attributes:
input_type = node.attributes.get('type', '').lower()
# For HTML5 date/time inputs, add a highly visible "format" attribute
# This makes it IMPOSSIBLE for the model to miss the required format
if input_type in ['date', 'time', 'datetime-local', 'month', 'week']:
format_map = {
'date': 'YYYY-MM-DD',
'time': 'HH:MM',
'datetime-local': 'YYYY-MM-DDTHH:MM',
'month': 'YYYY-MM',
'week': 'YYYY-W##',
}
# Add format as a special attribute that appears prominently
# This appears BEFORE placeholder in the serialized output
attributes_to_include['format'] = format_map[input_type]
# Only add placeholder if it doesn't already exist
if 'placeholder' in include_attributes and 'placeholder' not in attributes_to_include:
# Native HTML5 date/time inputs - ISO format required
if input_type == 'date':
attributes_to_include['placeholder'] = 'YYYY-MM-DD'
elif input_type == 'time':
attributes_to_include['placeholder'] = 'HH:MM'
elif input_type == 'datetime-local':
attributes_to_include['placeholder'] = 'YYYY-MM-DDTHH:MM'
elif input_type == 'month':
attributes_to_include['placeholder'] = 'YYYY-MM'
elif input_type == 'week':
attributes_to_include['placeholder'] = 'YYYY-W##'
# Tel - suggest format if no pattern attribute
elif input_type == 'tel' and 'pattern' not in attributes_to_include:
attributes_to_include['placeholder'] = '123-456-7890'
# jQuery/Bootstrap/AngularJS datepickers (text inputs with datepicker classes/attributes)
elif input_type in {'text', ''}:
class_attr = node.attributes.get('class', '').lower()
# Check for AngularJS UI Bootstrap datepicker (uib-datepicker-popup attribute)
# This takes precedence as it's the most specific indicator
if 'uib-datepicker-popup' in node.attributes:
# Extract format from uib-datepicker-popup="MM/dd/yyyy"
date_format = node.attributes.get('uib-datepicker-popup', '')
if date_format:
# Use 'expected_format' for clarity - this is the required input format
attributes_to_include['expected_format'] = date_format
# Also keep format for consistency with HTML5 date inputs
attributes_to_include['format'] = date_format
# Detect jQuery/Bootstrap datepickers by class names
elif any(indicator in class_attr for indicator in ['datepicker', 'datetimepicker', 'daterangepicker']):
# Try to get format from data-date-format attribute
date_format = node.attributes.get('data-date-format', '')
if date_format:
attributes_to_include['placeholder'] = date_format
attributes_to_include['format'] = date_format # Also add format for jQuery datepickers
else:
# Default to common US format for jQuery datepickers
attributes_to_include['placeholder'] = 'mm/dd/yyyy'
attributes_to_include['format'] = 'mm/dd/yyyy'
# Also detect by data-* attributes
elif any(attr in node.attributes for attr in ['data-datepicker']):
date_format = node.attributes.get('data-date-format', '')
if date_format:
attributes_to_include['placeholder'] = date_format
attributes_to_include['format'] = date_format
else:
attributes_to_include['placeholder'] = 'mm/dd/yyyy'
attributes_to_include['format'] = 'mm/dd/yyyy'
# Include accessibility properties
if node.ax_node and node.ax_node.properties:
for prop in node.ax_node.properties:
try:
if prop.name in include_attributes and prop.value is not None:
# Convert boolean to lowercase string, keep others as-is
if isinstance(prop.value, bool):
attributes_to_include[prop.name] = str(prop.value).lower()
else:
prop_value_str = str(prop.value).strip()
if prop_value_str:
attributes_to_include[prop.name] = prop_value_str
except (AttributeError, ValueError):
continue
# Special handling for form elements - ensure current value is shown
# For text inputs, textareas, and selects, prioritize showing the current value from AX tree
if node.tag_name and node.tag_name.lower() in ['input', 'textarea', 'select']:
# ALWAYS check AX tree - it reflects actual typed value, DOM attribute may not update
if node.ax_node and node.ax_node.properties:
for prop in node.ax_node.properties:
# Try valuetext first (human-readable display value)
if prop.name == 'valuetext' and prop.value:
value_str = str(prop.value).strip()
if value_str:
attributes_to_include['value'] = value_str
break
# Also try 'value' property directly
elif prop.name == 'value' and prop.value:
value_str = str(prop.value).strip()
if value_str:
attributes_to_include['value'] = value_str
break
if not attributes_to_include:
return ''
# Remove duplicate values
ordered_keys = [key for key in include_attributes if key in attributes_to_include]
if len(ordered_keys) > 1:
keys_to_remove = set()
seen_values = {}
# Attributes that should never be removed as duplicates (they serve distinct purposes)
protected_attrs = {'format', 'expected_format', 'placeholder', 'value', 'aria-label', 'title'}
for key in ordered_keys:
value = attributes_to_include[key]
if len(value) > 5:
if value in seen_values and key not in protected_attrs:
keys_to_remove.add(key)
else:
seen_values[value] = key
for key in keys_to_remove:
del attributes_to_include[key]
# Remove attributes that duplicate accessibility data
role = node.ax_node.role if node.ax_node else None
if role and node.node_name == role:
attributes_to_include.pop('role', None)
# Remove type attribute if it matches the tag name (e.g. <button type="button">)
if 'type' in attributes_to_include and attributes_to_include['type'].lower() == node.node_name.lower():
del attributes_to_include['type']
# Remove invalid attribute if it's false (only show when true)
if 'invalid' in attributes_to_include and attributes_to_include['invalid'].lower() == 'false':
del attributes_to_include['invalid']
boolean_attrs = {'required'}
for attr in boolean_attrs:
if attr in attributes_to_include and attributes_to_include[attr].lower() in {'false', '0', 'no'}:
del attributes_to_include[attr]
# Remove aria-expanded if we have expanded (prefer AX tree over HTML attribute)
if 'expanded' in attributes_to_include and 'aria-expanded' in attributes_to_include:
del attributes_to_include['aria-expanded']
attrs_to_remove_if_text_matches = ['aria-label', 'placeholder', 'title']
for attr in attrs_to_remove_if_text_matches:
if attributes_to_include.get(attr) and attributes_to_include.get(attr, '').strip().lower() == text.strip().lower():
del attributes_to_include[attr]
if attributes_to_include:
# Format attributes, wrapping empty values in quotes for clarity
formatted_attrs = []
for key, value in attributes_to_include.items():
capped_value = cap_text_length(value, 100)
# Show empty values as key='' instead of key=
if not capped_value:
formatted_attrs.append(f"{key}=''")
else:
formatted_attrs.append(f'{key}={capped_value}')
return ' '.join(formatted_attrs)
return ''
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/dom/serializer/serializer.py",
"license": "MIT License",
"lines": 1096,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/integrations/gmail/actions.py | """
Gmail Actions for Browser Use
Defines agent actions for Gmail integration including 2FA code retrieval,
email reading, and authentication management.
"""
import logging
from pydantic import BaseModel, Field
from browser_use.agent.views import ActionResult
from browser_use.tools.service import Tools
from .service import GmailService
logger = logging.getLogger(__name__)
# Global Gmail service instance - initialized when actions are registered
_gmail_service: GmailService | None = None
class GetRecentEmailsParams(BaseModel):
"""Parameters for getting recent emails"""
keyword: str = Field(default='', description='A single keyword for search, e.g. github, airbnb, etc.')
max_results: int = Field(default=3, ge=1, le=50, description='Maximum number of emails to retrieve (1-50, default: 3)')
def register_gmail_actions(tools: Tools, gmail_service: GmailService | None = None, access_token: str | None = None) -> Tools:
"""
Register Gmail actions with the provided tools
Args:
tools: The browser-use tools to register actions with
gmail_service: Optional pre-configured Gmail service instance
access_token: Optional direct access token (alternative to file-based auth)
"""
global _gmail_service
# Use provided service or create a new one with access token if provided
if gmail_service:
_gmail_service = gmail_service
elif access_token:
_gmail_service = GmailService(access_token=access_token)
else:
_gmail_service = GmailService()
@tools.registry.action(
description='Get recent emails from the mailbox with a keyword to retrieve verification codes, OTP, 2FA tokens, magic links, or any recent email content. Keep your query a single keyword.',
param_model=GetRecentEmailsParams,
)
async def get_recent_emails(params: GetRecentEmailsParams) -> ActionResult:
"""Get recent emails from the last 5 minutes with full content"""
try:
if _gmail_service is None:
raise RuntimeError('Gmail service not initialized')
# Ensure authentication
if not _gmail_service.is_authenticated():
logger.info('📧 Gmail not authenticated, attempting authentication...')
authenticated = await _gmail_service.authenticate()
if not authenticated:
return ActionResult(
extracted_content='Failed to authenticate with Gmail. Please ensure Gmail credentials are set up properly.',
long_term_memory='Gmail authentication failed',
)
# Use specified max_results (1-50, default 10), last 5 minutes
max_results = params.max_results
time_filter = '5m'
# Build query with time filter and optional user query
query_parts = [f'newer_than:{time_filter}']
if params.keyword.strip():
query_parts.append(params.keyword.strip())
query = ' '.join(query_parts)
logger.info(f'🔍 Gmail search query: {query}')
# Get emails
emails = await _gmail_service.get_recent_emails(max_results=max_results, query=query, time_filter=time_filter)
if not emails:
query_info = f" matching '{params.keyword}'" if params.keyword.strip() else ''
memory = f'No recent emails found from last {time_filter}{query_info}'
return ActionResult(
extracted_content=memory,
long_term_memory=memory,
)
# Format with full email content for large display
content = f'Found {len(emails)} recent email{"s" if len(emails) > 1 else ""} from the last {time_filter}:\n\n'
for i, email in enumerate(emails, 1):
content += f'Email {i}:\n'
content += f'From: {email["from"]}\n'
content += f'Subject: {email["subject"]}\n'
content += f'Date: {email["date"]}\n'
content += f'Content:\n{email["body"]}\n'
content += '-' * 50 + '\n\n'
logger.info(f'📧 Retrieved {len(emails)} recent emails')
return ActionResult(
extracted_content=content,
include_extracted_content_only_once=True,
long_term_memory=f'Retrieved {len(emails)} recent emails from last {time_filter} for query {query}.',
)
except Exception as e:
logger.error(f'Error getting recent emails: {e}')
return ActionResult(
error=f'Error getting recent emails: {str(e)}',
long_term_memory='Failed to get recent emails due to error',
)
return tools
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/integrations/gmail/actions.py",
"license": "MIT License",
"lines": 91,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/integrations/gmail/service.py | """
Gmail API Service for Browser Use
Handles Gmail API authentication, email reading, and 2FA code extraction.
This service provides a clean interface for agents to interact with Gmail.
"""
import base64
import logging
import os
from pathlib import Path
from typing import Any
import anyio
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from browser_use.config import CONFIG
logger = logging.getLogger(__name__)
class GmailService:
"""
Gmail API service for email reading.
Provides functionality to:
- Authenticate with Gmail API using OAuth2
- Read recent emails with filtering
- Return full email content for agent analysis
"""
# Gmail API scopes
SCOPES = ['https://www.googleapis.com/auth/gmail.readonly']
def __init__(
self,
credentials_file: str | None = None,
token_file: str | None = None,
config_dir: str | None = None,
access_token: str | None = None,
):
"""
Initialize Gmail Service
Args:
credentials_file: Path to OAuth credentials JSON from Google Cloud Console
token_file: Path to store/load access tokens
config_dir: Directory to store config files (defaults to browser-use config directory)
access_token: Direct access token (skips file-based auth if provided)
"""
# Set up configuration directory using browser-use's config system
if config_dir is None:
self.config_dir = CONFIG.BROWSER_USE_CONFIG_DIR
else:
self.config_dir = Path(config_dir).expanduser().resolve()
# Ensure config directory exists (only if not using direct token)
if access_token is None:
self.config_dir.mkdir(parents=True, exist_ok=True)
# Set up credential paths
self.credentials_file = credentials_file or self.config_dir / 'gmail_credentials.json'
self.token_file = token_file or self.config_dir / 'gmail_token.json'
# Direct access token support
self.access_token = access_token
self.service = None
self.creds = None
self._authenticated = False
def is_authenticated(self) -> bool:
"""Check if Gmail service is authenticated"""
return self._authenticated and self.service is not None
async def authenticate(self) -> bool:
"""
Handle OAuth authentication and token management
Returns:
bool: True if authentication successful, False otherwise
"""
try:
logger.info('🔐 Authenticating with Gmail API...')
# Check if using direct access token
if self.access_token:
logger.info('🔑 Using provided access token')
# Create credentials from access token
self.creds = Credentials(token=self.access_token, scopes=self.SCOPES)
# Test token validity by building service
self.service = build('gmail', 'v1', credentials=self.creds)
self._authenticated = True
logger.info('✅ Gmail API ready with access token!')
return True
# Original file-based authentication flow
# Try to load existing tokens
if os.path.exists(self.token_file):
self.creds = Credentials.from_authorized_user_file(str(self.token_file), self.SCOPES)
logger.debug('📁 Loaded existing tokens')
# If no valid credentials, run OAuth flow
if not self.creds or not self.creds.valid:
if self.creds and self.creds.expired and self.creds.refresh_token:
logger.info('🔄 Refreshing expired tokens...')
self.creds.refresh(Request())
else:
logger.info('🌐 Starting OAuth flow...')
if not os.path.exists(self.credentials_file):
logger.error(
f'❌ Gmail credentials file not found: {self.credentials_file}\n'
'Please download it from Google Cloud Console:\n'
'1. Go to https://console.cloud.google.com/\n'
'2. APIs & Services > Credentials\n'
'3. Download OAuth 2.0 Client JSON\n'
f"4. Save as 'gmail_credentials.json' in {self.config_dir}/"
)
return False
flow = InstalledAppFlow.from_client_secrets_file(str(self.credentials_file), self.SCOPES)
# Use specific redirect URI to match OAuth credentials
self.creds = flow.run_local_server(port=8080, open_browser=True)
# Save tokens for next time
await anyio.Path(self.token_file).write_text(self.creds.to_json())
logger.info(f'💾 Tokens saved to {self.token_file}')
# Build Gmail service
self.service = build('gmail', 'v1', credentials=self.creds)
self._authenticated = True
logger.info('✅ Gmail API ready!')
return True
except Exception as e:
logger.error(f'❌ Gmail authentication failed: {e}')
return False
async def get_recent_emails(self, max_results: int = 10, query: str = '', time_filter: str = '1h') -> list[dict[str, Any]]:
"""
Get recent emails with optional query filter
Args:
max_results: Maximum number of emails to fetch
query: Gmail search query (e.g., 'from:noreply@example.com')
time_filter: Time filter (e.g., '5m', '1h', '1d')
Returns:
List of email dictionaries with parsed content
"""
if not self.is_authenticated():
logger.error('❌ Gmail service not authenticated. Call authenticate() first.')
return []
try:
# Add time filter to query if provided
if time_filter and 'newer_than:' not in query:
query = f'newer_than:{time_filter} {query}'.strip()
logger.info(f'📧 Fetching {max_results} recent emails...')
if query:
logger.debug(f'🔍 Query: {query}')
# Get message list
assert self.service is not None
results = self.service.users().messages().list(userId='me', maxResults=max_results, q=query).execute()
messages = results.get('messages', [])
if not messages:
logger.info('📭 No messages found')
return []
logger.info(f'📨 Found {len(messages)} messages, fetching details...')
# Get full message details
emails = []
for i, message in enumerate(messages, 1):
logger.debug(f'📖 Reading email {i}/{len(messages)}...')
full_message = self.service.users().messages().get(userId='me', id=message['id'], format='full').execute()
email_data = self._parse_email(full_message)
emails.append(email_data)
return emails
except HttpError as error:
logger.error(f'❌ Gmail API error: {error}')
return []
except Exception as e:
logger.error(f'❌ Unexpected error fetching emails: {e}')
return []
def _parse_email(self, message: dict[str, Any]) -> dict[str, Any]:
"""Parse Gmail message into readable format"""
headers = {h['name']: h['value'] for h in message['payload']['headers']}
return {
'id': message['id'],
'thread_id': message['threadId'],
'subject': headers.get('Subject', ''),
'from': headers.get('From', ''),
'to': headers.get('To', ''),
'date': headers.get('Date', ''),
'timestamp': int(message['internalDate']),
'body': self._extract_body(message['payload']),
'raw_message': message,
}
def _extract_body(self, payload: dict[str, Any]) -> str:
"""Extract email body from payload"""
body = ''
if payload.get('body', {}).get('data'):
# Simple email body
body = base64.urlsafe_b64decode(payload['body']['data']).decode('utf-8')
elif payload.get('parts'):
# Multi-part email
for part in payload['parts']:
if part['mimeType'] == 'text/plain' and part.get('body', {}).get('data'):
part_body = base64.urlsafe_b64decode(part['body']['data']).decode('utf-8')
body += part_body
elif part['mimeType'] == 'text/html' and not body and part.get('body', {}).get('data'):
# Fallback to HTML if no plain text
body = base64.urlsafe_b64decode(part['body']['data']).decode('utf-8')
return body
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/integrations/gmail/service.py",
"license": "MIT License",
"lines": 188,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/llm/anthropic/chat.py | import json
from collections.abc import Mapping
from dataclasses import dataclass
from typing import Any, TypeVar, overload
import httpx
from anthropic import (
APIConnectionError,
APIStatusError,
AsyncAnthropic,
NotGiven,
RateLimitError,
omit,
)
from anthropic.types import CacheControlEphemeralParam, Message, ToolParam
from anthropic.types.model_param import ModelParam
from anthropic.types.text_block import TextBlock
from anthropic.types.tool_choice_tool_param import ToolChoiceToolParam
from httpx import Timeout
from pydantic import BaseModel
from browser_use.llm.anthropic.serializer import AnthropicMessageSerializer
from browser_use.llm.base import BaseChatModel
from browser_use.llm.exceptions import ModelProviderError, ModelRateLimitError
from browser_use.llm.messages import BaseMessage
from browser_use.llm.schema import SchemaOptimizer
from browser_use.llm.views import ChatInvokeCompletion, ChatInvokeUsage
T = TypeVar('T', bound=BaseModel)
@dataclass
class ChatAnthropic(BaseChatModel):
"""
A wrapper around Anthropic's chat model.
"""
# Model configuration
model: str | ModelParam
max_tokens: int = 8192
temperature: float | None = None
top_p: float | None = None
seed: int | None = None
# Client initialization parameters
api_key: str | None = None
auth_token: str | None = None
base_url: str | httpx.URL | None = None
timeout: float | Timeout | None | NotGiven = NotGiven()
max_retries: int = 10
default_headers: Mapping[str, str] | None = None
default_query: Mapping[str, object] | None = None
http_client: httpx.AsyncClient | None = None
# Static
@property
def provider(self) -> str:
return 'anthropic'
def _get_client_params(self) -> dict[str, Any]:
"""Prepare client parameters dictionary."""
# Define base client params
base_params = {
'api_key': self.api_key,
'auth_token': self.auth_token,
'base_url': self.base_url,
'timeout': self.timeout,
'max_retries': self.max_retries,
'default_headers': self.default_headers,
'default_query': self.default_query,
'http_client': self.http_client,
}
# Create client_params dict with non-None values and non-NotGiven values
client_params = {}
for k, v in base_params.items():
if v is not None and v is not NotGiven():
client_params[k] = v
return client_params
def _get_client_params_for_invoke(self):
"""Prepare client parameters dictionary for invoke."""
client_params = {}
if self.temperature is not None:
client_params['temperature'] = self.temperature
if self.max_tokens is not None:
client_params['max_tokens'] = self.max_tokens
if self.top_p is not None:
client_params['top_p'] = self.top_p
if self.seed is not None:
client_params['seed'] = self.seed
return client_params
def get_client(self) -> AsyncAnthropic:
"""
Returns an AsyncAnthropic client.
Returns:
AsyncAnthropic: An instance of the AsyncAnthropic client.
"""
client_params = self._get_client_params()
return AsyncAnthropic(**client_params)
@property
def name(self) -> str:
return str(self.model)
def _get_usage(self, response: Message) -> ChatInvokeUsage | None:
usage = ChatInvokeUsage(
prompt_tokens=response.usage.input_tokens
+ (
response.usage.cache_read_input_tokens or 0
), # Total tokens in Anthropic are a bit fucked, you have to add cached tokens to the prompt tokens
completion_tokens=response.usage.output_tokens,
total_tokens=response.usage.input_tokens + response.usage.output_tokens,
prompt_cached_tokens=response.usage.cache_read_input_tokens,
prompt_cache_creation_tokens=response.usage.cache_creation_input_tokens,
prompt_image_tokens=None,
)
return usage
@overload
async def ainvoke(
self, messages: list[BaseMessage], output_format: None = None, **kwargs: Any
) -> ChatInvokeCompletion[str]: ...
@overload
async def ainvoke(self, messages: list[BaseMessage], output_format: type[T], **kwargs: Any) -> ChatInvokeCompletion[T]: ...
async def ainvoke(
self, messages: list[BaseMessage], output_format: type[T] | None = None, **kwargs: Any
) -> ChatInvokeCompletion[T] | ChatInvokeCompletion[str]:
anthropic_messages, system_prompt = AnthropicMessageSerializer.serialize_messages(messages)
try:
if output_format is None:
# Normal completion without structured output
response = await self.get_client().messages.create(
model=self.model,
messages=anthropic_messages,
system=system_prompt or omit,
**self._get_client_params_for_invoke(),
)
# Ensure we have a valid Message object before accessing attributes
if not isinstance(response, Message):
raise ModelProviderError(
message=f'Unexpected response type from Anthropic API: {type(response).__name__}. Response: {str(response)[:200]}',
status_code=502,
model=self.name,
)
usage = self._get_usage(response)
# Extract text from the first content block
first_content = response.content[0]
if isinstance(first_content, TextBlock):
response_text = first_content.text
else:
# If it's not a text block, convert to string
response_text = str(first_content)
return ChatInvokeCompletion(
completion=response_text,
usage=usage,
stop_reason=response.stop_reason,
)
else:
# Use tool calling for structured output
# Create a tool that represents the output format
tool_name = output_format.__name__
schema = SchemaOptimizer.create_optimized_json_schema(output_format)
# Remove title from schema if present (Anthropic doesn't like it in parameters)
if 'title' in schema:
del schema['title']
tool = ToolParam(
name=tool_name,
description=f'Extract information in the format of {tool_name}',
input_schema=schema,
cache_control=CacheControlEphemeralParam(type='ephemeral'),
)
# Force the model to use this tool
tool_choice = ToolChoiceToolParam(type='tool', name=tool_name)
response = await self.get_client().messages.create(
model=self.model,
messages=anthropic_messages,
tools=[tool],
system=system_prompt or omit,
tool_choice=tool_choice,
**self._get_client_params_for_invoke(),
)
# Ensure we have a valid Message object before accessing attributes
if not isinstance(response, Message):
raise ModelProviderError(
message=f'Unexpected response type from Anthropic API: {type(response).__name__}. Response: {str(response)[:200]}',
status_code=502,
model=self.name,
)
usage = self._get_usage(response)
# Extract the tool use block
for content_block in response.content:
if hasattr(content_block, 'type') and content_block.type == 'tool_use':
# Parse the tool input as the structured output
try:
return ChatInvokeCompletion(
completion=output_format.model_validate(content_block.input),
usage=usage,
stop_reason=response.stop_reason,
)
except Exception as e:
# If validation fails, try to parse it as JSON first
if isinstance(content_block.input, str):
data = json.loads(content_block.input)
return ChatInvokeCompletion(
completion=output_format.model_validate(data),
usage=usage,
stop_reason=response.stop_reason,
)
raise e
# If no tool use block found, raise an error
raise ValueError('Expected tool use in response but none found')
except APIConnectionError as e:
raise ModelProviderError(message=e.message, model=self.name) from e
except RateLimitError as e:
raise ModelRateLimitError(message=e.message, model=self.name) from e
except APIStatusError as e:
raise ModelProviderError(message=e.message, status_code=e.status_code, model=self.name) from e
except Exception as e:
raise ModelProviderError(message=str(e), model=self.name) from e
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/llm/anthropic/chat.py",
"license": "MIT License",
"lines": 206,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/llm/anthropic/serializer.py | import json
from typing import overload
from anthropic.types import (
Base64ImageSourceParam,
CacheControlEphemeralParam,
ImageBlockParam,
MessageParam,
TextBlockParam,
ToolUseBlockParam,
URLImageSourceParam,
)
from browser_use.llm.messages import (
AssistantMessage,
BaseMessage,
ContentPartImageParam,
ContentPartTextParam,
SupportedImageMediaType,
SystemMessage,
UserMessage,
)
NonSystemMessage = UserMessage | AssistantMessage
class AnthropicMessageSerializer:
"""Serializer for converting between custom message types and Anthropic message param types."""
@staticmethod
def _is_base64_image(url: str) -> bool:
"""Check if the URL is a base64 encoded image."""
return url.startswith('data:image/')
@staticmethod
def _parse_base64_url(url: str) -> tuple[SupportedImageMediaType, str]:
"""Parse a base64 data URL to extract media type and data."""
# Format: data:image/jpeg;base64,<data>
if not url.startswith('data:'):
raise ValueError(f'Invalid base64 URL: {url}')
header, data = url.split(',', 1)
media_type = header.split(';')[0].replace('data:', '')
# Ensure it's a supported media type
supported_types = ['image/jpeg', 'image/png', 'image/gif', 'image/webp']
if media_type not in supported_types:
# Default to jpeg if not recognized
media_type = 'image/jpeg'
return media_type, data # type: ignore
@staticmethod
def _serialize_cache_control(use_cache: bool) -> CacheControlEphemeralParam | None:
"""Serialize cache control."""
if use_cache:
return CacheControlEphemeralParam(type='ephemeral')
return None
@staticmethod
def _serialize_content_part_text(part: ContentPartTextParam, use_cache: bool) -> TextBlockParam:
"""Convert a text content part to Anthropic's TextBlockParam."""
return TextBlockParam(
text=part.text, type='text', cache_control=AnthropicMessageSerializer._serialize_cache_control(use_cache)
)
@staticmethod
def _serialize_content_part_image(part: ContentPartImageParam) -> ImageBlockParam:
"""Convert an image content part to Anthropic's ImageBlockParam."""
url = part.image_url.url
if AnthropicMessageSerializer._is_base64_image(url):
# Handle base64 encoded images
media_type, data = AnthropicMessageSerializer._parse_base64_url(url)
return ImageBlockParam(
source=Base64ImageSourceParam(
data=data,
media_type=media_type,
type='base64',
),
type='image',
)
else:
# Handle URL images
return ImageBlockParam(source=URLImageSourceParam(url=url, type='url'), type='image')
@staticmethod
def _serialize_content_to_str(
content: str | list[ContentPartTextParam], use_cache: bool = False
) -> list[TextBlockParam] | str:
"""Serialize content to a string."""
cache_control = AnthropicMessageSerializer._serialize_cache_control(use_cache)
if isinstance(content, str):
if cache_control:
return [TextBlockParam(text=content, type='text', cache_control=cache_control)]
else:
return content
serialized_blocks: list[TextBlockParam] = []
for i, part in enumerate(content):
is_last = i == len(content) - 1
if part.type == 'text':
serialized_blocks.append(
AnthropicMessageSerializer._serialize_content_part_text(part, use_cache=use_cache and is_last)
)
return serialized_blocks
@staticmethod
def _serialize_content(
content: str | list[ContentPartTextParam | ContentPartImageParam],
use_cache: bool = False,
) -> str | list[TextBlockParam | ImageBlockParam]:
"""Serialize content to Anthropic format."""
if isinstance(content, str):
if use_cache:
return [TextBlockParam(text=content, type='text', cache_control=CacheControlEphemeralParam(type='ephemeral'))]
else:
return content
serialized_blocks: list[TextBlockParam | ImageBlockParam] = []
for i, part in enumerate(content):
is_last = i == len(content) - 1
if part.type == 'text':
serialized_blocks.append(
AnthropicMessageSerializer._serialize_content_part_text(part, use_cache=use_cache and is_last)
)
elif part.type == 'image_url':
serialized_blocks.append(AnthropicMessageSerializer._serialize_content_part_image(part))
return serialized_blocks
@staticmethod
def _serialize_tool_calls_to_content(tool_calls, use_cache: bool = False) -> list[ToolUseBlockParam]:
"""Convert tool calls to Anthropic's ToolUseBlockParam format."""
blocks: list[ToolUseBlockParam] = []
for i, tool_call in enumerate(tool_calls):
# Parse the arguments JSON string to object
try:
input_obj = json.loads(tool_call.function.arguments)
except json.JSONDecodeError:
# If arguments aren't valid JSON, use as string
input_obj = {'arguments': tool_call.function.arguments}
is_last = i == len(tool_calls) - 1
blocks.append(
ToolUseBlockParam(
id=tool_call.id,
input=input_obj,
name=tool_call.function.name,
type='tool_use',
cache_control=AnthropicMessageSerializer._serialize_cache_control(use_cache and is_last),
)
)
return blocks
# region - Serialize overloads
@overload
@staticmethod
def serialize(message: UserMessage) -> MessageParam: ...
@overload
@staticmethod
def serialize(message: SystemMessage) -> SystemMessage: ...
@overload
@staticmethod
def serialize(message: AssistantMessage) -> MessageParam: ...
@staticmethod
def serialize(message: BaseMessage) -> MessageParam | SystemMessage:
"""Serialize a custom message to an Anthropic MessageParam.
Note: Anthropic doesn't have a 'system' role. System messages should be
handled separately as the system parameter in the API call, not as a message.
If a SystemMessage is passed here, it will be converted to a user message.
"""
if isinstance(message, UserMessage):
content = AnthropicMessageSerializer._serialize_content(message.content, use_cache=message.cache)
return MessageParam(role='user', content=content)
elif isinstance(message, SystemMessage):
# Anthropic doesn't have system messages in the messages array
# System prompts are passed separately. Convert to user message.
return message
elif isinstance(message, AssistantMessage):
# Handle content and tool calls
blocks: list[TextBlockParam | ToolUseBlockParam] = []
# Add content blocks if present
if message.content is not None:
if isinstance(message.content, str):
# String content: only cache if it's the only/last block (no tool calls)
blocks.append(
TextBlockParam(
text=message.content,
type='text',
cache_control=AnthropicMessageSerializer._serialize_cache_control(
message.cache and not message.tool_calls
),
)
)
else:
# Process content parts (text and refusal)
for i, part in enumerate(message.content):
# Only last content block gets cache if there are no tool calls
is_last_content = (i == len(message.content) - 1) and not message.tool_calls
if part.type == 'text':
blocks.append(
AnthropicMessageSerializer._serialize_content_part_text(
part, use_cache=message.cache and is_last_content
)
)
# # Note: Anthropic doesn't have a specific refusal block type,
# # so we convert refusals to text blocks
# elif part.type == 'refusal':
# blocks.append(TextBlockParam(text=f'[Refusal] {part.refusal}', type='text'))
# Add tool use blocks if present
if message.tool_calls:
tool_blocks = AnthropicMessageSerializer._serialize_tool_calls_to_content(
message.tool_calls, use_cache=message.cache
)
blocks.extend(tool_blocks)
# If no content or tool calls, add empty text block
# (Anthropic requires at least one content block)
if not blocks:
blocks.append(
TextBlockParam(
text='', type='text', cache_control=AnthropicMessageSerializer._serialize_cache_control(message.cache)
)
)
# If caching is enabled or we have multiple blocks, return blocks as-is
# Otherwise, simplify single text blocks to plain string
if message.cache or len(blocks) > 1:
content = blocks
else:
# Only simplify when no caching and single block
single_block = blocks[0]
if single_block['type'] == 'text' and not single_block.get('cache_control'):
content = single_block['text']
else:
content = blocks
return MessageParam(
role='assistant',
content=content,
)
else:
raise ValueError(f'Unknown message type: {type(message)}')
@staticmethod
def _clean_cache_messages(messages: list[NonSystemMessage]) -> list[NonSystemMessage]:
"""Clean cache settings so only the last cache=True message remains cached.
Because of how Claude caching works, only the last cache message matters.
This method automatically removes cache=True from all messages except the last one.
Args:
messages: List of non-system messages to clean
Returns:
List of messages with cleaned cache settings
"""
if not messages:
return messages
# Create a copy to avoid modifying the original
cleaned_messages = [msg.model_copy(deep=True) for msg in messages]
# Find the last message with cache=True
last_cache_index = -1
for i in range(len(cleaned_messages) - 1, -1, -1):
if cleaned_messages[i].cache:
last_cache_index = i
break
# If we found a cached message, disable cache for all others
if last_cache_index != -1:
for i, msg in enumerate(cleaned_messages):
if i != last_cache_index and msg.cache:
# Set cache to False for all messages except the last cached one
msg.cache = False
return cleaned_messages
@staticmethod
def serialize_messages(messages: list[BaseMessage]) -> tuple[list[MessageParam], list[TextBlockParam] | str | None]:
"""Serialize a list of messages, extracting any system message.
Returns:
A tuple of (messages, system_message) where system_message is extracted
from any SystemMessage in the list.
"""
messages = [m.model_copy(deep=True) for m in messages]
# Separate system messages from normal messages
normal_messages: list[NonSystemMessage] = []
system_message: SystemMessage | None = None
for message in messages:
if isinstance(message, SystemMessage):
system_message = message
else:
normal_messages.append(message)
# Clean cache messages so only the last cache=True message remains cached
normal_messages = AnthropicMessageSerializer._clean_cache_messages(normal_messages)
# Serialize normal messages
serialized_messages: list[MessageParam] = []
for message in normal_messages:
serialized_messages.append(AnthropicMessageSerializer.serialize(message))
# Serialize system message
serialized_system_message: list[TextBlockParam] | str | None = None
if system_message:
serialized_system_message = AnthropicMessageSerializer._serialize_content_to_str(
system_message.content, use_cache=system_message.cache
)
return serialized_messages, serialized_system_message
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/llm/anthropic/serializer.py",
"license": "MIT License",
"lines": 275,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/llm/aws/chat_anthropic.py | import json
from collections.abc import Mapping
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any, TypeVar, overload
from anthropic import (
APIConnectionError,
APIStatusError,
AsyncAnthropicBedrock,
RateLimitError,
omit,
)
from anthropic.types import CacheControlEphemeralParam, Message, ToolParam
from anthropic.types.text_block import TextBlock
from anthropic.types.tool_choice_tool_param import ToolChoiceToolParam
from pydantic import BaseModel
from browser_use.llm.anthropic.serializer import AnthropicMessageSerializer
from browser_use.llm.aws.chat_bedrock import ChatAWSBedrock
from browser_use.llm.exceptions import ModelProviderError, ModelRateLimitError
from browser_use.llm.messages import BaseMessage
from browser_use.llm.views import ChatInvokeCompletion, ChatInvokeUsage
if TYPE_CHECKING:
from boto3.session import Session # pyright: ignore
T = TypeVar('T', bound=BaseModel)
@dataclass
class ChatAnthropicBedrock(ChatAWSBedrock):
"""
AWS Bedrock Anthropic Claude chat model.
This is a convenience class that provides Claude-specific defaults
for the AWS Bedrock service. It inherits all functionality from
ChatAWSBedrock but sets Anthropic Claude as the default model.
"""
# Anthropic Claude specific defaults
model: str = 'anthropic.claude-3-5-sonnet-20240620-v1:0'
max_tokens: int = 8192
temperature: float | None = None
top_p: float | None = None
top_k: int | None = None
stop_sequences: list[str] | None = None
seed: int | None = None
# AWS credentials and configuration
aws_access_key: str | None = None
aws_secret_key: str | None = None
aws_session_token: str | None = None
aws_region: str | None = None
session: 'Session | None' = None
# Client initialization parameters
max_retries: int = 10
default_headers: Mapping[str, str] | None = None
default_query: Mapping[str, object] | None = None
@property
def provider(self) -> str:
return 'anthropic_bedrock'
def _get_client_params(self) -> dict[str, Any]:
"""Prepare client parameters dictionary for Bedrock."""
client_params: dict[str, Any] = {}
if self.session:
credentials = self.session.get_credentials()
client_params.update(
{
'aws_access_key': credentials.access_key,
'aws_secret_key': credentials.secret_key,
'aws_session_token': credentials.token,
'aws_region': self.session.region_name,
}
)
else:
# Use individual credentials
if self.aws_access_key:
client_params['aws_access_key'] = self.aws_access_key
if self.aws_secret_key:
client_params['aws_secret_key'] = self.aws_secret_key
if self.aws_region:
client_params['aws_region'] = self.aws_region
if self.aws_session_token:
client_params['aws_session_token'] = self.aws_session_token
# Add optional parameters
if self.max_retries:
client_params['max_retries'] = self.max_retries
if self.default_headers:
client_params['default_headers'] = self.default_headers
if self.default_query:
client_params['default_query'] = self.default_query
return client_params
def _get_client_params_for_invoke(self) -> dict[str, Any]:
"""Prepare client parameters dictionary for invoke."""
client_params = {}
if self.temperature is not None:
client_params['temperature'] = self.temperature
if self.max_tokens is not None:
client_params['max_tokens'] = self.max_tokens
if self.top_p is not None:
client_params['top_p'] = self.top_p
if self.top_k is not None:
client_params['top_k'] = self.top_k
if self.seed is not None:
client_params['seed'] = self.seed
if self.stop_sequences is not None:
client_params['stop_sequences'] = self.stop_sequences
return client_params
def get_client(self) -> AsyncAnthropicBedrock:
"""
Returns an AsyncAnthropicBedrock client.
Returns:
AsyncAnthropicBedrock: An instance of the AsyncAnthropicBedrock client.
"""
client_params = self._get_client_params()
return AsyncAnthropicBedrock(**client_params)
@property
def name(self) -> str:
return str(self.model)
def _get_usage(self, response: Message) -> ChatInvokeUsage | None:
"""Extract usage information from the response."""
usage = ChatInvokeUsage(
prompt_tokens=response.usage.input_tokens
+ (
response.usage.cache_read_input_tokens or 0
), # Total tokens in Anthropic are a bit fucked, you have to add cached tokens to the prompt tokens
completion_tokens=response.usage.output_tokens,
total_tokens=response.usage.input_tokens + response.usage.output_tokens,
prompt_cached_tokens=response.usage.cache_read_input_tokens,
prompt_cache_creation_tokens=response.usage.cache_creation_input_tokens,
prompt_image_tokens=None,
)
return usage
@overload
async def ainvoke(
self, messages: list[BaseMessage], output_format: None = None, **kwargs: Any
) -> ChatInvokeCompletion[str]: ...
@overload
async def ainvoke(self, messages: list[BaseMessage], output_format: type[T], **kwargs: Any) -> ChatInvokeCompletion[T]: ...
async def ainvoke(
self, messages: list[BaseMessage], output_format: type[T] | None = None, **kwargs: Any
) -> ChatInvokeCompletion[T] | ChatInvokeCompletion[str]:
anthropic_messages, system_prompt = AnthropicMessageSerializer.serialize_messages(messages)
try:
if output_format is None:
# Normal completion without structured output
response = await self.get_client().messages.create(
model=self.model,
messages=anthropic_messages,
system=system_prompt or omit,
**self._get_client_params_for_invoke(),
)
usage = self._get_usage(response)
# Extract text from the first content block
first_content = response.content[0]
if isinstance(first_content, TextBlock):
response_text = first_content.text
else:
# If it's not a text block, convert to string
response_text = str(first_content)
return ChatInvokeCompletion(
completion=response_text,
usage=usage,
)
else:
# Use tool calling for structured output
# Create a tool that represents the output format
tool_name = output_format.__name__
schema = output_format.model_json_schema()
# Remove title from schema if present (Anthropic doesn't like it in parameters)
if 'title' in schema:
del schema['title']
tool = ToolParam(
name=tool_name,
description=f'Extract information in the format of {tool_name}',
input_schema=schema,
cache_control=CacheControlEphemeralParam(type='ephemeral'),
)
# Force the model to use this tool
tool_choice = ToolChoiceToolParam(type='tool', name=tool_name)
response = await self.get_client().messages.create(
model=self.model,
messages=anthropic_messages,
tools=[tool],
system=system_prompt or omit,
tool_choice=tool_choice,
**self._get_client_params_for_invoke(),
)
usage = self._get_usage(response)
# Extract the tool use block
for content_block in response.content:
if hasattr(content_block, 'type') and content_block.type == 'tool_use':
# Parse the tool input as the structured output
try:
return ChatInvokeCompletion(completion=output_format.model_validate(content_block.input), usage=usage)
except Exception as e:
# If validation fails, try to parse it as JSON first
if isinstance(content_block.input, str):
data = json.loads(content_block.input)
return ChatInvokeCompletion(
completion=output_format.model_validate(data),
usage=usage,
)
raise e
# If no tool use block found, raise an error
raise ValueError('Expected tool use in response but none found')
except APIConnectionError as e:
raise ModelProviderError(message=e.message, model=self.name) from e
except RateLimitError as e:
raise ModelRateLimitError(message=e.message, model=self.name) from e
except APIStatusError as e:
raise ModelProviderError(message=e.message, status_code=e.status_code, model=self.name) from e
except Exception as e:
raise ModelProviderError(message=str(e), model=self.name) from e
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/llm/aws/chat_anthropic.py",
"license": "MIT License",
"lines": 205,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/llm/aws/chat_bedrock.py | import json
from dataclasses import dataclass
from os import getenv
from typing import TYPE_CHECKING, Any, TypeVar, overload
from pydantic import BaseModel
from browser_use.llm.aws.serializer import AWSBedrockMessageSerializer
from browser_use.llm.base import BaseChatModel
from browser_use.llm.exceptions import ModelProviderError, ModelRateLimitError
from browser_use.llm.messages import BaseMessage
from browser_use.llm.views import ChatInvokeCompletion, ChatInvokeUsage
if TYPE_CHECKING:
from boto3 import client as AwsClient # type: ignore
from boto3.session import Session # type: ignore
T = TypeVar('T', bound=BaseModel)
@dataclass
class ChatAWSBedrock(BaseChatModel):
"""
AWS Bedrock chat model supporting multiple providers (Anthropic, Meta, etc.).
This class provides access to various models via AWS Bedrock,
supporting both text generation and structured output via tool calling.
To use this model, you need to either:
1. Set the following environment variables:
- AWS_ACCESS_KEY_ID
- AWS_SECRET_ACCESS_KEY
- AWS_SESSION_TOKEN (only required when using temporary credentials)
- AWS_REGION
2. Or provide a boto3 Session object
3. Or use AWS SSO authentication
"""
# Model configuration
model: str = 'anthropic.claude-3-5-sonnet-20240620-v1:0'
max_tokens: int | None = 4096
temperature: float | None = None
top_p: float | None = None
seed: int | None = None
stop_sequences: list[str] | None = None
# AWS credentials and configuration
aws_access_key_id: str | None = None
aws_secret_access_key: str | None = None
aws_session_token: str | None = None
aws_region: str | None = None
aws_sso_auth: bool = False
session: 'Session | None' = None
# Request parameters
request_params: dict[str, Any] | None = None
# Static
@property
def provider(self) -> str:
return 'aws_bedrock'
def _get_client(self) -> 'AwsClient': # type: ignore
"""Get the AWS Bedrock client."""
try:
from boto3 import client as AwsClient # type: ignore
except ImportError:
raise ImportError(
'`boto3` not installed. Please install using `pip install browser-use[aws] or pip install browser-use[all]`'
)
if self.session:
return self.session.client('bedrock-runtime')
# Get credentials from environment or instance parameters
access_key = self.aws_access_key_id or getenv('AWS_ACCESS_KEY_ID')
secret_key = self.aws_secret_access_key or getenv('AWS_SECRET_ACCESS_KEY')
session_token = self.aws_session_token or getenv('AWS_SESSION_TOKEN')
region = self.aws_region or getenv('AWS_REGION') or getenv('AWS_DEFAULT_REGION')
if self.aws_sso_auth:
return AwsClient(service_name='bedrock-runtime', region_name=region)
else:
if not access_key or not secret_key:
raise ModelProviderError(
message='AWS credentials not found. Please set AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables (and AWS_SESSION_TOKEN if using temporary credentials) or provide a boto3 session.',
model=self.name,
)
return AwsClient(
service_name='bedrock-runtime',
region_name=region,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
aws_session_token=session_token,
)
@property
def name(self) -> str:
return str(self.model)
def _get_inference_config(self) -> dict[str, Any]:
"""Get the inference configuration for the request."""
config = {}
if self.max_tokens is not None:
config['maxTokens'] = self.max_tokens
if self.temperature is not None:
config['temperature'] = self.temperature
if self.top_p is not None:
config['topP'] = self.top_p
if self.stop_sequences is not None:
config['stopSequences'] = self.stop_sequences
if self.seed is not None:
config['seed'] = self.seed
return config
def _format_tools_for_request(self, output_format: type[BaseModel]) -> list[dict[str, Any]]:
"""Format a Pydantic model as a tool for structured output."""
schema = output_format.model_json_schema()
# Convert Pydantic schema to Bedrock tool format
properties = {}
required = []
for prop_name, prop_info in schema.get('properties', {}).items():
properties[prop_name] = {
'type': prop_info.get('type', 'string'),
'description': prop_info.get('description', ''),
}
# Add required fields
required = schema.get('required', [])
return [
{
'toolSpec': {
'name': f'extract_{output_format.__name__.lower()}',
'description': f'Extract information in the format of {output_format.__name__}',
'inputSchema': {'json': {'type': 'object', 'properties': properties, 'required': required}},
}
}
]
def _get_usage(self, response: dict[str, Any]) -> ChatInvokeUsage | None:
"""Extract usage information from the response."""
if 'usage' not in response:
return None
usage_data = response['usage']
return ChatInvokeUsage(
prompt_tokens=usage_data.get('inputTokens', 0),
completion_tokens=usage_data.get('outputTokens', 0),
total_tokens=usage_data.get('totalTokens', 0),
prompt_cached_tokens=None, # Bedrock doesn't provide this
prompt_cache_creation_tokens=None,
prompt_image_tokens=None,
)
@overload
async def ainvoke(
self, messages: list[BaseMessage], output_format: None = None, **kwargs: Any
) -> ChatInvokeCompletion[str]: ...
@overload
async def ainvoke(self, messages: list[BaseMessage], output_format: type[T], **kwargs: Any) -> ChatInvokeCompletion[T]: ...
async def ainvoke(
self, messages: list[BaseMessage], output_format: type[T] | None = None, **kwargs: Any
) -> ChatInvokeCompletion[T] | ChatInvokeCompletion[str]:
"""
Invoke the AWS Bedrock model with the given messages.
Args:
messages: List of chat messages
output_format: Optional Pydantic model class for structured output
Returns:
Either a string response or an instance of output_format
"""
try:
from botocore.exceptions import ClientError # type: ignore
except ImportError:
raise ImportError(
'`boto3` not installed. Please install using `pip install browser-use[aws] or pip install browser-use[all]`'
)
bedrock_messages, system_message = AWSBedrockMessageSerializer.serialize_messages(messages)
try:
# Prepare the request body
body: dict[str, Any] = {}
if system_message:
body['system'] = system_message
inference_config = self._get_inference_config()
if inference_config:
body['inferenceConfig'] = inference_config
# Handle structured output via tool calling
if output_format is not None:
tools = self._format_tools_for_request(output_format)
body['toolConfig'] = {'tools': tools}
# Add any additional request parameters
if self.request_params:
body.update(self.request_params)
# Filter out None values
body = {k: v for k, v in body.items() if v is not None}
# Make the API call
client = self._get_client()
response = client.converse(modelId=self.model, messages=bedrock_messages, **body)
usage = self._get_usage(response)
# Extract the response content
if 'output' in response and 'message' in response['output']:
message = response['output']['message']
content = message.get('content', [])
if output_format is None:
# Return text response
text_content = []
for item in content:
if 'text' in item:
text_content.append(item['text'])
response_text = '\n'.join(text_content) if text_content else ''
return ChatInvokeCompletion(
completion=response_text,
usage=usage,
)
else:
# Handle structured output from tool calls
for item in content:
if 'toolUse' in item:
tool_use = item['toolUse']
tool_input = tool_use.get('input', {})
try:
# Validate and return the structured output
return ChatInvokeCompletion(
completion=output_format.model_validate(tool_input),
usage=usage,
)
except Exception as e:
# If validation fails, try to parse as JSON first
if isinstance(tool_input, str):
try:
data = json.loads(tool_input)
return ChatInvokeCompletion(
completion=output_format.model_validate(data),
usage=usage,
)
except json.JSONDecodeError:
pass
raise ModelProviderError(
message=f'Failed to validate structured output: {str(e)}',
model=self.name,
) from e
# If no tool use found but output_format was requested
raise ModelProviderError(
message='Expected structured output but no tool use found in response',
model=self.name,
)
# If no valid content found
if output_format is None:
return ChatInvokeCompletion(
completion='',
usage=usage,
)
else:
raise ModelProviderError(
message='No valid content found in response',
model=self.name,
)
except ClientError as e:
error_code = e.response.get('Error', {}).get('Code', 'Unknown')
error_message = e.response.get('Error', {}).get('Message', str(e))
if error_code in ['ThrottlingException', 'TooManyRequestsException']:
raise ModelRateLimitError(message=error_message, model=self.name) from e
else:
raise ModelProviderError(message=error_message, model=self.name) from e
except Exception as e:
raise ModelProviderError(message=str(e), model=self.name) from e
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/llm/aws/chat_bedrock.py",
"license": "MIT License",
"lines": 243,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/llm/aws/serializer.py | import base64
import json
import re
from typing import Any, overload
from browser_use.llm.messages import (
AssistantMessage,
BaseMessage,
ContentPartImageParam,
ContentPartRefusalParam,
ContentPartTextParam,
SystemMessage,
ToolCall,
UserMessage,
)
class AWSBedrockMessageSerializer:
"""Serializer for converting between custom message types and AWS Bedrock message format."""
@staticmethod
def _is_base64_image(url: str) -> bool:
"""Check if the URL is a base64 encoded image."""
return url.startswith('data:image/')
@staticmethod
def _is_url_image(url: str) -> bool:
"""Check if the URL is a regular HTTP/HTTPS image URL."""
return url.startswith(('http://', 'https://')) and any(
url.lower().endswith(ext) for ext in ['.jpg', '.jpeg', '.png', '.gif', '.webp', '.bmp']
)
@staticmethod
def _parse_base64_url(url: str) -> tuple[str, bytes]:
"""Parse a base64 data URL to extract format and raw bytes."""
# Format: data:image/jpeg;base64,<data>
if not url.startswith('data:'):
raise ValueError(f'Invalid base64 URL: {url}')
header, data = url.split(',', 1)
# Extract format from mime type
mime_match = re.search(r'image/(\w+)', header)
if mime_match:
format_name = mime_match.group(1).lower()
# Map common formats
format_mapping = {'jpg': 'jpeg', 'jpeg': 'jpeg', 'png': 'png', 'gif': 'gif', 'webp': 'webp'}
image_format = format_mapping.get(format_name, 'jpeg')
else:
image_format = 'jpeg' # Default format
# Decode base64 data
try:
image_bytes = base64.b64decode(data)
except Exception as e:
raise ValueError(f'Failed to decode base64 image data: {e}')
return image_format, image_bytes
@staticmethod
def _download_and_convert_image(url: str) -> tuple[str, bytes]:
"""Download an image from URL and convert to base64 bytes."""
try:
import httpx
except ImportError:
raise ImportError('httpx not available. Please install it to use URL images with AWS Bedrock.')
try:
response = httpx.get(url, timeout=30)
response.raise_for_status()
# Detect format from content type or URL
content_type = response.headers.get('content-type', '').lower()
if 'jpeg' in content_type or url.lower().endswith(('.jpg', '.jpeg')):
image_format = 'jpeg'
elif 'png' in content_type or url.lower().endswith('.png'):
image_format = 'png'
elif 'gif' in content_type or url.lower().endswith('.gif'):
image_format = 'gif'
elif 'webp' in content_type or url.lower().endswith('.webp'):
image_format = 'webp'
else:
image_format = 'jpeg' # Default format
return image_format, response.content
except Exception as e:
raise ValueError(f'Failed to download image from {url}: {e}')
@staticmethod
def _serialize_content_part_text(part: ContentPartTextParam) -> dict[str, Any]:
"""Convert a text content part to AWS Bedrock format."""
return {'text': part.text}
@staticmethod
def _serialize_content_part_image(part: ContentPartImageParam) -> dict[str, Any]:
"""Convert an image content part to AWS Bedrock format."""
url = part.image_url.url
if AWSBedrockMessageSerializer._is_base64_image(url):
# Handle base64 encoded images
image_format, image_bytes = AWSBedrockMessageSerializer._parse_base64_url(url)
elif AWSBedrockMessageSerializer._is_url_image(url):
# Download and convert URL images
image_format, image_bytes = AWSBedrockMessageSerializer._download_and_convert_image(url)
else:
raise ValueError(f'Unsupported image URL format: {url}')
return {
'image': {
'format': image_format,
'source': {
'bytes': image_bytes,
},
}
}
@staticmethod
def _serialize_user_content(
content: str | list[ContentPartTextParam | ContentPartImageParam],
) -> list[dict[str, Any]]:
"""Serialize content for user messages."""
if isinstance(content, str):
return [{'text': content}]
content_blocks: list[dict[str, Any]] = []
for part in content:
if part.type == 'text':
content_blocks.append(AWSBedrockMessageSerializer._serialize_content_part_text(part))
elif part.type == 'image_url':
content_blocks.append(AWSBedrockMessageSerializer._serialize_content_part_image(part))
return content_blocks
@staticmethod
def _serialize_system_content(
content: str | list[ContentPartTextParam],
) -> list[dict[str, Any]]:
"""Serialize content for system messages."""
if isinstance(content, str):
return [{'text': content}]
content_blocks: list[dict[str, Any]] = []
for part in content:
if part.type == 'text':
content_blocks.append(AWSBedrockMessageSerializer._serialize_content_part_text(part))
return content_blocks
@staticmethod
def _serialize_assistant_content(
content: str | list[ContentPartTextParam | ContentPartRefusalParam] | None,
) -> list[dict[str, Any]]:
"""Serialize content for assistant messages."""
if content is None:
return []
if isinstance(content, str):
return [{'text': content}]
content_blocks: list[dict[str, Any]] = []
for part in content:
if part.type == 'text':
content_blocks.append(AWSBedrockMessageSerializer._serialize_content_part_text(part))
# Skip refusal content parts - AWS Bedrock doesn't need them
return content_blocks
@staticmethod
def _serialize_tool_call(tool_call: ToolCall) -> dict[str, Any]:
"""Convert a tool call to AWS Bedrock format."""
try:
arguments = json.loads(tool_call.function.arguments)
except json.JSONDecodeError:
# If arguments aren't valid JSON, wrap them
arguments = {'arguments': tool_call.function.arguments}
return {
'toolUse': {
'toolUseId': tool_call.id,
'name': tool_call.function.name,
'input': arguments,
}
}
# region - Serialize overloads
@overload
@staticmethod
def serialize(message: UserMessage) -> dict[str, Any]: ...
@overload
@staticmethod
def serialize(message: SystemMessage) -> SystemMessage: ...
@overload
@staticmethod
def serialize(message: AssistantMessage) -> dict[str, Any]: ...
@staticmethod
def serialize(message: BaseMessage) -> dict[str, Any] | SystemMessage:
"""Serialize a custom message to AWS Bedrock format."""
if isinstance(message, UserMessage):
return {
'role': 'user',
'content': AWSBedrockMessageSerializer._serialize_user_content(message.content),
}
elif isinstance(message, SystemMessage):
# System messages are handled separately in AWS Bedrock
return message
elif isinstance(message, AssistantMessage):
content_blocks: list[dict[str, Any]] = []
# Add content blocks if present
if message.content is not None:
content_blocks.extend(AWSBedrockMessageSerializer._serialize_assistant_content(message.content))
# Add tool use blocks if present
if message.tool_calls:
for tool_call in message.tool_calls:
content_blocks.append(AWSBedrockMessageSerializer._serialize_tool_call(tool_call))
# AWS Bedrock requires at least one content block
if not content_blocks:
content_blocks = [{'text': ''}]
return {
'role': 'assistant',
'content': content_blocks,
}
else:
raise ValueError(f'Unknown message type: {type(message)}')
@staticmethod
def serialize_messages(messages: list[BaseMessage]) -> tuple[list[dict[str, Any]], list[dict[str, Any]] | None]:
"""
Serialize a list of messages, extracting any system message.
Returns:
Tuple of (bedrock_messages, system_message) where system_message is extracted
from any SystemMessage in the list.
"""
bedrock_messages: list[dict[str, Any]] = []
system_message: list[dict[str, Any]] | None = None
for message in messages:
if isinstance(message, SystemMessage):
# Extract system message content
system_message = AWSBedrockMessageSerializer._serialize_system_content(message.content)
else:
# Serialize and add to regular messages
serialized = AWSBedrockMessageSerializer.serialize(message)
bedrock_messages.append(serialized)
return bedrock_messages, system_message
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/llm/aws/serializer.py",
"license": "MIT License",
"lines": 211,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/llm/azure/chat.py | import os
from dataclasses import dataclass
from typing import Any, TypeVar, overload
import httpx
from openai import APIConnectionError, APIStatusError, RateLimitError
from openai import AsyncAzureOpenAI as AsyncAzureOpenAIClient
from openai.types.responses import Response
from openai.types.shared import ChatModel
from pydantic import BaseModel
from browser_use.llm.exceptions import ModelProviderError, ModelRateLimitError
from browser_use.llm.messages import BaseMessage
from browser_use.llm.openai.like import ChatOpenAILike
from browser_use.llm.openai.responses_serializer import ResponsesAPIMessageSerializer
from browser_use.llm.schema import SchemaOptimizer
from browser_use.llm.views import ChatInvokeCompletion, ChatInvokeUsage
T = TypeVar('T', bound=BaseModel)
# List of models that only support the Responses API
RESPONSES_API_ONLY_MODELS: list[str] = [
'gpt-5.1-codex',
'gpt-5.1-codex-mini',
'gpt-5.1-codex-max',
'gpt-5-codex',
'codex-mini-latest',
'computer-use-preview',
]
@dataclass
class ChatAzureOpenAI(ChatOpenAILike):
"""
A class for to interact with any provider using the OpenAI API schema.
Args:
model (str): The name of the OpenAI model to use. Defaults to "not-provided".
api_key (Optional[str]): The API key to use. Defaults to "not-provided".
use_responses_api (bool): If True, use the Responses API instead of Chat Completions API.
This is required for certain models like gpt-5.1-codex-mini on Azure OpenAI with
api_version >= 2025-03-01-preview. Set to 'auto' to automatically detect based on model.
"""
# Model configuration
model: str | ChatModel
# Client initialization parameters
api_key: str | None = None
api_version: str | None = '2024-12-01-preview'
azure_endpoint: str | None = None
azure_deployment: str | None = None
base_url: str | None = None
azure_ad_token: str | None = None
azure_ad_token_provider: Any | None = None
default_headers: dict[str, str] | None = None
default_query: dict[str, Any] | None = None
# Responses API support
use_responses_api: bool | str = 'auto' # True, False, or 'auto'
client: AsyncAzureOpenAIClient | None = None
@property
def provider(self) -> str:
return 'azure'
def _get_client_params(self) -> dict[str, Any]:
_client_params: dict[str, Any] = {}
self.api_key = self.api_key or os.getenv('AZURE_OPENAI_KEY') or os.getenv('AZURE_OPENAI_API_KEY')
self.azure_endpoint = self.azure_endpoint or os.getenv('AZURE_OPENAI_ENDPOINT')
self.azure_deployment = self.azure_deployment or os.getenv('AZURE_OPENAI_DEPLOYMENT')
params_mapping = {
'api_key': self.api_key,
'api_version': self.api_version,
'organization': self.organization,
'azure_endpoint': self.azure_endpoint,
'azure_deployment': self.azure_deployment,
'base_url': self.base_url,
'azure_ad_token': self.azure_ad_token,
'azure_ad_token_provider': self.azure_ad_token_provider,
'http_client': self.http_client,
}
if self.default_headers is not None:
_client_params['default_headers'] = self.default_headers
if self.default_query is not None:
_client_params['default_query'] = self.default_query
_client_params.update({k: v for k, v in params_mapping.items() if v is not None})
return _client_params
def get_client(self) -> AsyncAzureOpenAIClient:
"""
Returns an asynchronous OpenAI client.
Returns:
AsyncAzureOpenAIClient: An instance of the asynchronous OpenAI client.
"""
if self.client:
return self.client
_client_params: dict[str, Any] = self._get_client_params()
if self.http_client:
_client_params['http_client'] = self.http_client
else:
# Create a new async HTTP client with custom limits
_client_params['http_client'] = httpx.AsyncClient(
limits=httpx.Limits(max_connections=20, max_keepalive_connections=6)
)
self.client = AsyncAzureOpenAIClient(**_client_params)
return self.client
def _should_use_responses_api(self) -> bool:
"""Determine if the Responses API should be used based on model and settings."""
if isinstance(self.use_responses_api, bool):
return self.use_responses_api
# Auto-detect: use Responses API for models that require it
model_lower = str(self.model).lower()
for responses_only_model in RESPONSES_API_ONLY_MODELS:
if responses_only_model.lower() in model_lower:
return True
return False
def _get_usage_from_responses(self, response: Response) -> ChatInvokeUsage | None:
"""Extract usage information from a Responses API response."""
if response.usage is None:
return None
# Get cached tokens from input_tokens_details if available
cached_tokens = None
if response.usage.input_tokens_details is not None:
cached_tokens = getattr(response.usage.input_tokens_details, 'cached_tokens', None)
return ChatInvokeUsage(
prompt_tokens=response.usage.input_tokens,
prompt_cached_tokens=cached_tokens,
prompt_cache_creation_tokens=None,
prompt_image_tokens=None,
completion_tokens=response.usage.output_tokens,
total_tokens=response.usage.total_tokens,
)
async def _ainvoke_responses_api(
self, messages: list[BaseMessage], output_format: type[T] | None = None, **kwargs: Any
) -> ChatInvokeCompletion[T] | ChatInvokeCompletion[str]:
"""
Invoke the model using the Responses API.
This is used for models that require the Responses API (e.g., gpt-5.1-codex-mini)
or when use_responses_api is explicitly set to True.
"""
# Serialize messages to Responses API input format
input_messages = ResponsesAPIMessageSerializer.serialize_messages(messages)
try:
model_params: dict[str, Any] = {
'model': self.model,
'input': input_messages,
}
if self.temperature is not None:
model_params['temperature'] = self.temperature
if self.max_completion_tokens is not None:
model_params['max_output_tokens'] = self.max_completion_tokens
if self.top_p is not None:
model_params['top_p'] = self.top_p
if self.service_tier is not None:
model_params['service_tier'] = self.service_tier
# Handle reasoning models
if self.reasoning_models and any(str(m).lower() in str(self.model).lower() for m in self.reasoning_models):
# For reasoning models, use reasoning parameter instead of reasoning_effort
model_params['reasoning'] = {'effort': self.reasoning_effort}
model_params.pop('temperature', None)
if output_format is None:
# Return string response
response = await self.get_client().responses.create(**model_params)
usage = self._get_usage_from_responses(response)
return ChatInvokeCompletion(
completion=response.output_text or '',
usage=usage,
stop_reason=response.status if response.status else None,
)
else:
# For structured output, use the text.format parameter
json_schema = SchemaOptimizer.create_optimized_json_schema(
output_format,
remove_min_items=self.remove_min_items_from_schema,
remove_defaults=self.remove_defaults_from_schema,
)
model_params['text'] = {
'format': {
'type': 'json_schema',
'name': 'agent_output',
'strict': True,
'schema': json_schema,
}
}
# Add JSON schema to system prompt if requested
if self.add_schema_to_system_prompt and input_messages and input_messages[0].get('role') == 'system':
schema_text = f'\n<json_schema>\n{json_schema}\n</json_schema>'
content = input_messages[0].get('content', '')
if isinstance(content, str):
input_messages[0]['content'] = content + schema_text
elif isinstance(content, list):
input_messages[0]['content'] = list(content) + [{'type': 'input_text', 'text': schema_text}]
model_params['input'] = input_messages
if self.dont_force_structured_output:
# Remove the text format parameter if not forcing structured output
model_params.pop('text', None)
response = await self.get_client().responses.create(**model_params)
if not response.output_text:
raise ModelProviderError(
message='Failed to parse structured output from model response',
status_code=500,
model=self.name,
)
usage = self._get_usage_from_responses(response)
parsed = output_format.model_validate_json(response.output_text)
return ChatInvokeCompletion(
completion=parsed,
usage=usage,
stop_reason=response.status if response.status else None,
)
except RateLimitError as e:
raise ModelRateLimitError(message=e.message, model=self.name) from e
except APIConnectionError as e:
raise ModelProviderError(message=str(e), model=self.name) from e
except APIStatusError as e:
raise ModelProviderError(message=e.message, status_code=e.status_code, model=self.name) from e
except Exception as e:
raise ModelProviderError(message=str(e), model=self.name) from e
@overload
async def ainvoke(
self, messages: list[BaseMessage], output_format: None = None, **kwargs: Any
) -> ChatInvokeCompletion[str]: ...
@overload
async def ainvoke(self, messages: list[BaseMessage], output_format: type[T], **kwargs: Any) -> ChatInvokeCompletion[T]: ...
async def ainvoke(
self, messages: list[BaseMessage], output_format: type[T] | None = None, **kwargs: Any
) -> ChatInvokeCompletion[T] | ChatInvokeCompletion[str]:
"""
Invoke the model with the given messages.
This method routes to either the Responses API or the Chat Completions API
based on the model and settings.
Args:
messages: List of chat messages
output_format: Optional Pydantic model class for structured output
Returns:
Either a string response or an instance of output_format
"""
if self._should_use_responses_api():
return await self._ainvoke_responses_api(messages, output_format, **kwargs)
else:
# Use the parent class implementation (Chat Completions API)
return await super().ainvoke(messages, output_format, **kwargs)
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/llm/azure/chat.py",
"license": "MIT License",
"lines": 230,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/llm/base.py | """
We have switched all of our code from langchain to openai.types.chat.chat_completion_message_param.
For easier transition we have
"""
from typing import Any, Protocol, TypeVar, overload, runtime_checkable
from pydantic import BaseModel
from browser_use.llm.messages import BaseMessage
from browser_use.llm.views import ChatInvokeCompletion
T = TypeVar('T', bound=BaseModel)
@runtime_checkable
class BaseChatModel(Protocol):
_verified_api_keys: bool = False
model: str
@property
def provider(self) -> str: ...
@property
def name(self) -> str: ...
@property
def model_name(self) -> str:
# for legacy support
return self.model
@overload
async def ainvoke(
self, messages: list[BaseMessage], output_format: None = None, **kwargs: Any
) -> ChatInvokeCompletion[str]: ...
@overload
async def ainvoke(self, messages: list[BaseMessage], output_format: type[T], **kwargs: Any) -> ChatInvokeCompletion[T]: ...
async def ainvoke(
self, messages: list[BaseMessage], output_format: type[T] | None = None, **kwargs: Any
) -> ChatInvokeCompletion[T] | ChatInvokeCompletion[str]: ...
@classmethod
def __get_pydantic_core_schema__(
cls,
source_type: type,
handler: Any,
) -> Any:
"""
Allow this Protocol to be used in Pydantic models -> very useful to typesafe the agent settings for example.
Returns a schema that allows any object (since this is a Protocol).
"""
from pydantic_core import core_schema
# Return a schema that accepts any object for Protocol types
return core_schema.any_schema()
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/llm/base.py",
"license": "MIT License",
"lines": 43,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:browser_use/llm/deepseek/chat.py | from __future__ import annotations
import json
from dataclasses import dataclass
from typing import Any, TypeVar, overload
import httpx
from openai import (
APIConnectionError,
APIError,
APIStatusError,
APITimeoutError,
AsyncOpenAI,
RateLimitError,
)
from pydantic import BaseModel
from browser_use.llm.base import BaseChatModel
from browser_use.llm.deepseek.serializer import DeepSeekMessageSerializer
from browser_use.llm.exceptions import ModelProviderError, ModelRateLimitError
from browser_use.llm.messages import BaseMessage
from browser_use.llm.schema import SchemaOptimizer
from browser_use.llm.views import ChatInvokeCompletion
T = TypeVar('T', bound=BaseModel)
@dataclass
class ChatDeepSeek(BaseChatModel):
"""DeepSeek /chat/completions wrapper (OpenAI-compatible)."""
model: str = 'deepseek-chat'
# Generation parameters
max_tokens: int | None = None
temperature: float | None = None
top_p: float | None = None
seed: int | None = None
# Connection parameters
api_key: str | None = None
base_url: str | httpx.URL | None = 'https://api.deepseek.com/v1'
timeout: float | httpx.Timeout | None = None
client_params: dict[str, Any] | None = None
@property
def provider(self) -> str:
return 'deepseek'
def _client(self) -> AsyncOpenAI:
return AsyncOpenAI(
api_key=self.api_key,
base_url=self.base_url,
timeout=self.timeout,
**(self.client_params or {}),
)
@property
def name(self) -> str:
return self.model
@overload
async def ainvoke(
self,
messages: list[BaseMessage],
output_format: None = None,
tools: list[dict[str, Any]] | None = None,
stop: list[str] | None = None,
**kwargs: Any,
) -> ChatInvokeCompletion[str]: ...
@overload
async def ainvoke(
self,
messages: list[BaseMessage],
output_format: type[T],
tools: list[dict[str, Any]] | None = None,
stop: list[str] | None = None,
**kwargs: Any,
) -> ChatInvokeCompletion[T]: ...
async def ainvoke(
self,
messages: list[BaseMessage],
output_format: type[T] | None = None,
tools: list[dict[str, Any]] | None = None,
stop: list[str] | None = None,
**kwargs: Any,
) -> ChatInvokeCompletion[T] | ChatInvokeCompletion[str]:
"""
DeepSeek ainvoke supports:
1. Regular text/multi-turn conversation
2. Function Calling
3. JSON Output (response_format)
4. Conversation prefix continuation (beta, prefix, stop)
"""
client = self._client()
ds_messages = DeepSeekMessageSerializer.serialize_messages(messages)
common: dict[str, Any] = {}
if self.temperature is not None:
common['temperature'] = self.temperature
if self.max_tokens is not None:
common['max_tokens'] = self.max_tokens
if self.top_p is not None:
common['top_p'] = self.top_p
if self.seed is not None:
common['seed'] = self.seed
# Beta conversation prefix continuation (see official documentation)
if self.base_url and str(self.base_url).endswith('/beta'):
# The last assistant message must have prefix
if ds_messages and isinstance(ds_messages[-1], dict) and ds_messages[-1].get('role') == 'assistant':
ds_messages[-1]['prefix'] = True
if stop:
common['stop'] = stop
# ① Regular multi-turn conversation/text output
if output_format is None and not tools:
try:
resp = await client.chat.completions.create( # type: ignore
model=self.model,
messages=ds_messages, # type: ignore
**common,
)
return ChatInvokeCompletion(
completion=resp.choices[0].message.content or '',
usage=None,
)
except RateLimitError as e:
raise ModelRateLimitError(str(e), model=self.name) from e
except (APIError, APIConnectionError, APITimeoutError, APIStatusError) as e:
raise ModelProviderError(str(e), model=self.name) from e
except Exception as e:
raise ModelProviderError(str(e), model=self.name) from e
# ② Function Calling path (with tools or output_format)
if tools or (output_format is not None and hasattr(output_format, 'model_json_schema')):
try:
call_tools = tools
tool_choice = None
if output_format is not None and hasattr(output_format, 'model_json_schema'):
tool_name = output_format.__name__
schema = SchemaOptimizer.create_optimized_json_schema(output_format)
schema.pop('title', None)
call_tools = [
{
'type': 'function',
'function': {
'name': tool_name,
'description': f'Return a JSON object of type {tool_name}',
'parameters': schema,
},
}
]
tool_choice = {'type': 'function', 'function': {'name': tool_name}}
resp = await client.chat.completions.create( # type: ignore
model=self.model,
messages=ds_messages, # type: ignore
tools=call_tools, # type: ignore
tool_choice=tool_choice, # type: ignore
**common,
)
msg = resp.choices[0].message
if not msg.tool_calls:
raise ValueError('Expected tool_calls in response but got none')
raw_args = msg.tool_calls[0].function.arguments
if isinstance(raw_args, str):
parsed = json.loads(raw_args)
else:
parsed = raw_args
# --------- Fix: only use model_validate when output_format is not None ----------
if output_format is not None:
return ChatInvokeCompletion(
completion=output_format.model_validate(parsed),
usage=None,
)
else:
# If no output_format, return dict directly
return ChatInvokeCompletion(
completion=parsed,
usage=None,
)
except RateLimitError as e:
raise ModelRateLimitError(str(e), model=self.name) from e
except (APIError, APIConnectionError, APITimeoutError, APIStatusError) as e:
raise ModelProviderError(str(e), model=self.name) from e
except Exception as e:
raise ModelProviderError(str(e), model=self.name) from e
# ③ JSON Output path (official response_format)
if output_format is not None and hasattr(output_format, 'model_json_schema'):
try:
resp = await client.chat.completions.create( # type: ignore
model=self.model,
messages=ds_messages, # type: ignore
response_format={'type': 'json_object'},
**common,
)
content = resp.choices[0].message.content
if not content:
raise ModelProviderError('Empty JSON content in DeepSeek response', model=self.name)
parsed = output_format.model_validate_json(content)
return ChatInvokeCompletion(
completion=parsed,
usage=None,
)
except RateLimitError as e:
raise ModelRateLimitError(str(e), model=self.name) from e
except (APIError, APIConnectionError, APITimeoutError, APIStatusError) as e:
raise ModelProviderError(str(e), model=self.name) from e
except Exception as e:
raise ModelProviderError(str(e), model=self.name) from e
raise ModelProviderError('No valid ainvoke execution path for DeepSeek LLM', model=self.name)
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/llm/deepseek/chat.py",
"license": "MIT License",
"lines": 194,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/llm/deepseek/serializer.py | from __future__ import annotations
import json
from typing import Any, overload
from browser_use.llm.messages import (
AssistantMessage,
BaseMessage,
ContentPartImageParam,
ContentPartTextParam,
SystemMessage,
ToolCall,
UserMessage,
)
MessageDict = dict[str, Any]
class DeepSeekMessageSerializer:
"""Serializer for converting browser-use messages to DeepSeek messages."""
# -------- content 处理 --------------------------------------------------
@staticmethod
def _serialize_text_part(part: ContentPartTextParam) -> str:
return part.text
@staticmethod
def _serialize_image_part(part: ContentPartImageParam) -> dict[str, Any]:
url = part.image_url.url
if url.startswith('data:'):
return {'type': 'image_url', 'image_url': {'url': url}}
return {'type': 'image_url', 'image_url': {'url': url}}
@staticmethod
def _serialize_content(content: Any) -> str | list[dict[str, Any]]:
if content is None:
return ''
if isinstance(content, str):
return content
serialized: list[dict[str, Any]] = []
for part in content:
if part.type == 'text':
serialized.append({'type': 'text', 'text': DeepSeekMessageSerializer._serialize_text_part(part)})
elif part.type == 'image_url':
serialized.append(DeepSeekMessageSerializer._serialize_image_part(part))
elif part.type == 'refusal':
serialized.append({'type': 'text', 'text': f'[Refusal] {part.refusal}'})
return serialized
# -------- Tool-call 处理 -------------------------------------------------
@staticmethod
def _serialize_tool_calls(tool_calls: list[ToolCall]) -> list[dict[str, Any]]:
deepseek_tool_calls: list[dict[str, Any]] = []
for tc in tool_calls:
try:
arguments = json.loads(tc.function.arguments)
except json.JSONDecodeError:
arguments = {'arguments': tc.function.arguments}
deepseek_tool_calls.append(
{
'id': tc.id,
'type': 'function',
'function': {
'name': tc.function.name,
'arguments': arguments,
},
}
)
return deepseek_tool_calls
# -------- 单条消息序列化 -------------------------------------------------
@overload
@staticmethod
def serialize(message: UserMessage) -> MessageDict: ...
@overload
@staticmethod
def serialize(message: SystemMessage) -> MessageDict: ...
@overload
@staticmethod
def serialize(message: AssistantMessage) -> MessageDict: ...
@staticmethod
def serialize(message: BaseMessage) -> MessageDict:
if isinstance(message, UserMessage):
return {
'role': 'user',
'content': DeepSeekMessageSerializer._serialize_content(message.content),
}
if isinstance(message, SystemMessage):
return {
'role': 'system',
'content': DeepSeekMessageSerializer._serialize_content(message.content),
}
if isinstance(message, AssistantMessage):
msg: MessageDict = {
'role': 'assistant',
'content': DeepSeekMessageSerializer._serialize_content(message.content),
}
if message.tool_calls:
msg['tool_calls'] = DeepSeekMessageSerializer._serialize_tool_calls(message.tool_calls)
return msg
raise ValueError(f'Unknown message type: {type(message)}')
# -------- 列表序列化 -----------------------------------------------------
@staticmethod
def serialize_messages(messages: list[BaseMessage]) -> list[MessageDict]:
return [DeepSeekMessageSerializer.serialize(m) for m in messages]
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/llm/deepseek/serializer.py",
"license": "MIT License",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/llm/exceptions.py | class ModelError(Exception):
pass
class ModelProviderError(ModelError):
"""Exception raised when a model provider returns an error."""
def __init__(
self,
message: str,
status_code: int = 502,
model: str | None = None,
):
super().__init__(message)
self.message = message
self.status_code = status_code
self.model = model
class ModelRateLimitError(ModelProviderError):
"""Exception raised when a model provider returns a rate limit error."""
def __init__(
self,
message: str,
status_code: int = 429,
model: str | None = None,
):
super().__init__(message, status_code, model)
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/llm/exceptions.py",
"license": "MIT License",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:browser_use/llm/google/chat.py | import asyncio
import json
import logging
import random
import time
from dataclasses import dataclass, field
from typing import Any, Literal, TypeVar, overload
from google import genai
from google.auth.credentials import Credentials
from google.genai import types
from google.genai.types import MediaModality
from pydantic import BaseModel
from browser_use.llm.base import BaseChatModel
from browser_use.llm.exceptions import ModelProviderError
from browser_use.llm.google.serializer import GoogleMessageSerializer
from browser_use.llm.messages import BaseMessage
from browser_use.llm.schema import SchemaOptimizer
from browser_use.llm.views import ChatInvokeCompletion, ChatInvokeUsage
T = TypeVar('T', bound=BaseModel)
VerifiedGeminiModels = Literal[
'gemini-2.0-flash',
'gemini-2.0-flash-exp',
'gemini-2.0-flash-lite-preview-02-05',
'Gemini-2.0-exp',
'gemini-2.5-flash',
'gemini-2.5-flash-lite',
'gemini-flash-latest',
'gemini-flash-lite-latest',
'gemini-2.5-pro',
'gemini-3-pro-preview',
'gemini-3-flash-preview',
'gemma-3-27b-it',
'gemma-3-4b',
'gemma-3-12b',
'gemma-3n-e2b',
'gemma-3n-e4b',
]
@dataclass
class ChatGoogle(BaseChatModel):
"""
A wrapper around Google's Gemini chat model using the genai client.
This class accepts all genai.Client parameters while adding model,
temperature, and config parameters for the LLM interface.
Args:
model: The Gemini model to use
temperature: Temperature for response generation
config: Additional configuration parameters to pass to generate_content
(e.g., tools, safety_settings, etc.).
api_key: Google API key
vertexai: Whether to use Vertex AI
credentials: Google credentials object
project: Google Cloud project ID
location: Google Cloud location
http_options: HTTP options for the client
include_system_in_user: If True, system messages are included in the first user message
supports_structured_output: If True, uses native JSON mode; if False, uses prompt-based fallback
max_retries: Number of retries for retryable errors (default: 5)
retryable_status_codes: List of HTTP status codes to retry on (default: [429, 500, 502, 503, 504])
retry_base_delay: Base delay in seconds for exponential backoff (default: 1.0)
retry_max_delay: Maximum delay in seconds between retries (default: 60.0)
Example:
from google.genai import types
llm = ChatGoogle(
model='gemini-2.0-flash-exp',
config={
'tools': [types.Tool(code_execution=types.ToolCodeExecution())]
},
max_retries=5,
retryable_status_codes=[429, 500, 502, 503, 504],
retry_base_delay=1.0,
retry_max_delay=60.0,
)
"""
# Model configuration
model: VerifiedGeminiModels | str
temperature: float | None = 0.5
top_p: float | None = None
seed: int | None = None
thinking_budget: int | None = None # for Gemini 2.5: -1 for dynamic (default), 0 disables, or token count
thinking_level: Literal['minimal', 'low', 'medium', 'high'] | None = (
None # for Gemini 3: Pro supports low/high, Flash supports all levels
)
max_output_tokens: int | None = 8096
config: types.GenerateContentConfigDict | None = None
include_system_in_user: bool = False
supports_structured_output: bool = True # New flag
max_retries: int = 5 # Number of retries for retryable errors
retryable_status_codes: list[int] = field(default_factory=lambda: [429, 500, 502, 503, 504]) # Status codes to retry on
retry_base_delay: float = 1.0 # Base delay in seconds for exponential backoff
retry_max_delay: float = 60.0 # Maximum delay in seconds between retries
# Client initialization parameters
api_key: str | None = None
vertexai: bool | None = None
credentials: Credentials | None = None
project: str | None = None
location: str | None = None
http_options: types.HttpOptions | types.HttpOptionsDict | None = None
# Internal client cache to prevent connection issues
_client: genai.Client | None = None
# Static
@property
def provider(self) -> str:
return 'google'
@property
def logger(self) -> logging.Logger:
"""Get logger for this chat instance"""
return logging.getLogger(f'browser_use.llm.google.{self.model}')
def _get_client_params(self) -> dict[str, Any]:
"""Prepare client parameters dictionary."""
# Define base client params
base_params = {
'api_key': self.api_key,
'vertexai': self.vertexai,
'credentials': self.credentials,
'project': self.project,
'location': self.location,
'http_options': self.http_options,
}
# Create client_params dict with non-None values
client_params = {k: v for k, v in base_params.items() if v is not None}
return client_params
def get_client(self) -> genai.Client:
"""
Returns a genai.Client instance.
Returns:
genai.Client: An instance of the Google genai client.
"""
if self._client is not None:
return self._client
client_params = self._get_client_params()
self._client = genai.Client(**client_params)
return self._client
@property
def name(self) -> str:
return str(self.model)
def _get_stop_reason(self, response: types.GenerateContentResponse) -> str | None:
"""Extract stop_reason from Google response."""
if hasattr(response, 'candidates') and response.candidates:
return str(response.candidates[0].finish_reason) if hasattr(response.candidates[0], 'finish_reason') else None
return None
def _get_usage(self, response: types.GenerateContentResponse) -> ChatInvokeUsage | None:
usage: ChatInvokeUsage | None = None
if response.usage_metadata is not None:
image_tokens = 0
if response.usage_metadata.prompt_tokens_details is not None:
image_tokens = sum(
detail.token_count or 0
for detail in response.usage_metadata.prompt_tokens_details
if detail.modality == MediaModality.IMAGE
)
usage = ChatInvokeUsage(
prompt_tokens=response.usage_metadata.prompt_token_count or 0,
completion_tokens=(response.usage_metadata.candidates_token_count or 0)
+ (response.usage_metadata.thoughts_token_count or 0),
total_tokens=response.usage_metadata.total_token_count or 0,
prompt_cached_tokens=response.usage_metadata.cached_content_token_count,
prompt_cache_creation_tokens=None,
prompt_image_tokens=image_tokens,
)
return usage
@overload
async def ainvoke(
self, messages: list[BaseMessage], output_format: None = None, **kwargs: Any
) -> ChatInvokeCompletion[str]: ...
@overload
async def ainvoke(self, messages: list[BaseMessage], output_format: type[T], **kwargs: Any) -> ChatInvokeCompletion[T]: ...
async def ainvoke(
self, messages: list[BaseMessage], output_format: type[T] | None = None, **kwargs: Any
) -> ChatInvokeCompletion[T] | ChatInvokeCompletion[str]:
"""
Invoke the model with the given messages.
Args:
messages: List of chat messages
output_format: Optional Pydantic model class for structured output
Returns:
Either a string response or an instance of output_format
"""
# Serialize messages to Google format with the include_system_in_user flag
contents, system_instruction = GoogleMessageSerializer.serialize_messages(
messages, include_system_in_user=self.include_system_in_user
)
# Build config dictionary starting with user-provided config
config: types.GenerateContentConfigDict = {}
if self.config:
config = self.config.copy()
# Apply model-specific configuration (these can override config)
if self.temperature is not None:
config['temperature'] = self.temperature
# Add system instruction if present
if system_instruction:
config['system_instruction'] = system_instruction
if self.top_p is not None:
config['top_p'] = self.top_p
if self.seed is not None:
config['seed'] = self.seed
# Configure thinking based on model version
# Gemini 3 Pro: uses thinking_level only
# Gemini 3 Flash: supports both, defaults to thinking_budget=-1
# Gemini 2.5: uses thinking_budget only
is_gemini_3_pro = 'gemini-3-pro' in self.model
is_gemini_3_flash = 'gemini-3-flash' in self.model
if is_gemini_3_pro:
# Validate: thinking_budget should not be set for Gemini 3 Pro
if self.thinking_budget is not None:
self.logger.warning(
f'thinking_budget={self.thinking_budget} is deprecated for Gemini 3 Pro and may cause '
f'suboptimal performance. Use thinking_level instead.'
)
# Validate: minimal/medium only supported on Flash, not Pro
if self.thinking_level in ('minimal', 'medium'):
self.logger.warning(
f'thinking_level="{self.thinking_level}" is not supported for Gemini 3 Pro. '
f'Only "low" and "high" are valid. Falling back to "low".'
)
self.thinking_level = 'low'
# Default to 'low' for Gemini 3 Pro
if self.thinking_level is None:
self.thinking_level = 'low'
# Map to ThinkingLevel enum (SDK accepts string values)
level = types.ThinkingLevel(self.thinking_level.upper())
config['thinking_config'] = types.ThinkingConfigDict(thinking_level=level)
elif is_gemini_3_flash:
# Gemini 3 Flash supports both thinking_level and thinking_budget
# If user set thinking_level, use that; otherwise default to thinking_budget=-1
if self.thinking_level is not None:
level = types.ThinkingLevel(self.thinking_level.upper())
config['thinking_config'] = types.ThinkingConfigDict(thinking_level=level)
else:
if self.thinking_budget is None:
self.thinking_budget = -1
config['thinking_config'] = types.ThinkingConfigDict(thinking_budget=self.thinking_budget)
else:
# Gemini 2.5 and earlier: use thinking_budget only
if self.thinking_level is not None:
self.logger.warning(
f'thinking_level="{self.thinking_level}" is not supported for this model. '
f'Use thinking_budget instead (0 to disable, -1 for dynamic, or token count).'
)
# Default to -1 for dynamic/auto on 2.5 models
if self.thinking_budget is None and ('gemini-2.5' in self.model or 'gemini-flash' in self.model):
self.thinking_budget = -1
if self.thinking_budget is not None:
config['thinking_config'] = types.ThinkingConfigDict(thinking_budget=self.thinking_budget)
if self.max_output_tokens is not None:
config['max_output_tokens'] = self.max_output_tokens
async def _make_api_call():
start_time = time.time()
self.logger.debug(f'🚀 Starting API call to {self.model}')
try:
if output_format is None:
# Return string response
self.logger.debug('📄 Requesting text response')
response = await self.get_client().aio.models.generate_content(
model=self.model,
contents=contents, # type: ignore
config=config,
)
elapsed = time.time() - start_time
self.logger.debug(f'✅ Got text response in {elapsed:.2f}s')
# Handle case where response.text might be None
text = response.text or ''
if not text:
self.logger.warning('⚠️ Empty text response received')
usage = self._get_usage(response)
return ChatInvokeCompletion(
completion=text,
usage=usage,
stop_reason=self._get_stop_reason(response),
)
else:
# Handle structured output
if self.supports_structured_output:
# Use native JSON mode
self.logger.debug(f'🔧 Requesting structured output for {output_format.__name__}')
config['response_mime_type'] = 'application/json'
# Convert Pydantic model to Gemini-compatible schema
optimized_schema = SchemaOptimizer.create_gemini_optimized_schema(output_format)
gemini_schema = self._fix_gemini_schema(optimized_schema)
config['response_schema'] = gemini_schema
response = await self.get_client().aio.models.generate_content(
model=self.model,
contents=contents,
config=config,
)
elapsed = time.time() - start_time
self.logger.debug(f'✅ Got structured response in {elapsed:.2f}s')
usage = self._get_usage(response)
# Handle case where response.parsed might be None
if response.parsed is None:
self.logger.debug('📝 Parsing JSON from text response')
# When using response_schema, Gemini returns JSON as text
if response.text:
try:
# Handle JSON wrapped in markdown code blocks (common Gemini behavior)
text = response.text.strip()
if text.startswith('```json') and text.endswith('```'):
text = text[7:-3].strip()
self.logger.debug('🔧 Stripped ```json``` wrapper from response')
elif text.startswith('```') and text.endswith('```'):
text = text[3:-3].strip()
self.logger.debug('🔧 Stripped ``` wrapper from response')
# Parse the JSON text and validate with the Pydantic model
parsed_data = json.loads(text)
return ChatInvokeCompletion(
completion=output_format.model_validate(parsed_data),
usage=usage,
stop_reason=self._get_stop_reason(response),
)
except (json.JSONDecodeError, ValueError) as e:
self.logger.error(f'❌ Failed to parse JSON response: {str(e)}')
self.logger.debug(f'Raw response text: {response.text[:200]}...')
raise ModelProviderError(
message=f'Failed to parse or validate response {response}: {str(e)}',
status_code=500,
model=self.model,
) from e
else:
self.logger.error('❌ No response text received')
raise ModelProviderError(
message=f'No response from model {response}',
status_code=500,
model=self.model,
)
# Ensure we return the correct type
if isinstance(response.parsed, output_format):
return ChatInvokeCompletion(
completion=response.parsed,
usage=usage,
stop_reason=self._get_stop_reason(response),
)
else:
# If it's not the expected type, try to validate it
return ChatInvokeCompletion(
completion=output_format.model_validate(response.parsed),
usage=usage,
stop_reason=self._get_stop_reason(response),
)
else:
# Fallback: Request JSON in the prompt for models without native JSON mode
self.logger.debug(f'🔄 Using fallback JSON mode for {output_format.__name__}')
# Create a copy of messages to modify
modified_messages = [m.model_copy(deep=True) for m in messages]
# Add JSON instruction to the last message
if modified_messages and isinstance(modified_messages[-1].content, str):
json_instruction = f'\n\nPlease respond with a valid JSON object that matches this schema: {SchemaOptimizer.create_optimized_json_schema(output_format)}'
modified_messages[-1].content += json_instruction
# Re-serialize with modified messages
fallback_contents, fallback_system = GoogleMessageSerializer.serialize_messages(
modified_messages, include_system_in_user=self.include_system_in_user
)
# Update config with fallback system instruction if present
fallback_config = config.copy()
if fallback_system:
fallback_config['system_instruction'] = fallback_system
response = await self.get_client().aio.models.generate_content(
model=self.model,
contents=fallback_contents, # type: ignore
config=fallback_config,
)
elapsed = time.time() - start_time
self.logger.debug(f'✅ Got fallback response in {elapsed:.2f}s')
usage = self._get_usage(response)
# Try to extract JSON from the text response
if response.text:
try:
# Try to find JSON in the response
text = response.text.strip()
# Common patterns: JSON wrapped in markdown code blocks
if text.startswith('```json') and text.endswith('```'):
text = text[7:-3].strip()
elif text.startswith('```') and text.endswith('```'):
text = text[3:-3].strip()
# Parse and validate
parsed_data = json.loads(text)
return ChatInvokeCompletion(
completion=output_format.model_validate(parsed_data),
usage=usage,
stop_reason=self._get_stop_reason(response),
)
except (json.JSONDecodeError, ValueError) as e:
self.logger.error(f'❌ Failed to parse fallback JSON: {str(e)}')
self.logger.debug(f'Raw response text: {response.text[:200]}...')
raise ModelProviderError(
message=f'Model does not support JSON mode and failed to parse JSON from text response: {str(e)}',
status_code=500,
model=self.model,
) from e
else:
self.logger.error('❌ No response text in fallback mode')
raise ModelProviderError(
message='No response from model',
status_code=500,
model=self.model,
)
except Exception as e:
elapsed = time.time() - start_time
self.logger.error(f'💥 API call failed after {elapsed:.2f}s: {type(e).__name__}: {e}')
# Re-raise the exception
raise
# Retry logic for certain errors with exponential backoff
assert self.max_retries >= 1, 'max_retries must be at least 1'
for attempt in range(self.max_retries):
try:
return await _make_api_call()
except ModelProviderError as e:
# Retry if status code is in retryable list and we have attempts left
if e.status_code in self.retryable_status_codes and attempt < self.max_retries - 1:
# Exponential backoff with jitter: base_delay * 2^attempt + random jitter
delay = min(self.retry_base_delay * (2**attempt), self.retry_max_delay)
jitter = random.uniform(0, delay * 0.1) # 10% jitter
total_delay = delay + jitter
self.logger.warning(
f'⚠️ Got {e.status_code} error, retrying in {total_delay:.1f}s... (attempt {attempt + 1}/{self.max_retries})'
)
await asyncio.sleep(total_delay)
continue
# Otherwise raise
raise
except Exception as e:
# For non-ModelProviderError, wrap and raise
error_message = str(e)
status_code: int | None = None
# Try to extract status code if available
if hasattr(e, 'response'):
response_obj = getattr(e, 'response', None)
if response_obj and hasattr(response_obj, 'status_code'):
status_code = getattr(response_obj, 'status_code', None)
# Enhanced timeout error handling
if 'timeout' in error_message.lower() or 'cancelled' in error_message.lower():
if isinstance(e, asyncio.CancelledError) or 'CancelledError' in str(type(e)):
error_message = 'Gemini API request was cancelled (likely timeout). Consider: 1) Reducing input size, 2) Using a different model, 3) Checking network connectivity.'
status_code = 504
else:
status_code = 408
elif any(indicator in error_message.lower() for indicator in ['forbidden', '403']):
status_code = 403
elif any(
indicator in error_message.lower()
for indicator in ['rate limit', 'resource exhausted', 'quota exceeded', 'too many requests', '429']
):
status_code = 429
elif any(
indicator in error_message.lower()
for indicator in ['service unavailable', 'internal server error', 'bad gateway', '503', '502', '500']
):
status_code = 503
raise ModelProviderError(
message=error_message,
status_code=status_code or 502,
model=self.name,
) from e
raise RuntimeError('Retry loop completed without return or exception')
def _fix_gemini_schema(self, schema: dict[str, Any]) -> dict[str, Any]:
"""
Convert a Pydantic model to a Gemini-compatible schema.
This function removes unsupported properties like 'additionalProperties' and resolves
$ref references that Gemini doesn't support.
"""
# Handle $defs and $ref resolution
if '$defs' in schema:
defs = schema.pop('$defs')
def resolve_refs(obj: Any) -> Any:
if isinstance(obj, dict):
if '$ref' in obj:
ref = obj.pop('$ref')
ref_name = ref.split('/')[-1]
if ref_name in defs:
# Replace the reference with the actual definition
resolved = defs[ref_name].copy()
# Merge any additional properties from the reference
for key, value in obj.items():
if key != '$ref':
resolved[key] = value
return resolve_refs(resolved)
return obj
else:
# Recursively process all dictionary values
return {k: resolve_refs(v) for k, v in obj.items()}
elif isinstance(obj, list):
return [resolve_refs(item) for item in obj]
return obj
schema = resolve_refs(schema)
# Remove unsupported properties
def clean_schema(obj: Any, parent_key: str | None = None) -> Any:
if isinstance(obj, dict):
# Remove unsupported properties
cleaned = {}
for key, value in obj.items():
# Only strip 'title' when it's a JSON Schema metadata field (not inside 'properties')
# 'title' as a metadata field appears at schema level, not as a property name
is_metadata_title = key == 'title' and parent_key != 'properties'
if key not in ['additionalProperties', 'default'] and not is_metadata_title:
cleaned_value = clean_schema(value, parent_key=key)
# Handle empty object properties - Gemini doesn't allow empty OBJECT types
if (
key == 'properties'
and isinstance(cleaned_value, dict)
and len(cleaned_value) == 0
and isinstance(obj.get('type', ''), str)
and obj.get('type', '').upper() == 'OBJECT'
):
# Convert empty object to have at least one property
cleaned['properties'] = {'_placeholder': {'type': 'string'}}
else:
cleaned[key] = cleaned_value
# If this is an object type with empty properties, add a placeholder
if (
isinstance(cleaned.get('type', ''), str)
and cleaned.get('type', '').upper() == 'OBJECT'
and 'properties' in cleaned
and isinstance(cleaned['properties'], dict)
and len(cleaned['properties']) == 0
):
cleaned['properties'] = {'_placeholder': {'type': 'string'}}
return cleaned
elif isinstance(obj, list):
return [clean_schema(item, parent_key=parent_key) for item in obj]
return obj
return clean_schema(schema)
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/llm/google/chat.py",
"license": "MIT License",
"lines": 519,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/llm/google/serializer.py | import base64
from google.genai.types import Content, ContentListUnion, Part
from browser_use.llm.messages import (
AssistantMessage,
BaseMessage,
SystemMessage,
UserMessage,
)
class GoogleMessageSerializer:
"""Serializer for converting messages to Google Gemini format."""
@staticmethod
def serialize_messages(
messages: list[BaseMessage], include_system_in_user: bool = False
) -> tuple[ContentListUnion, str | None]:
"""
Convert a list of BaseMessages to Google format, extracting system message.
Google handles system instructions separately from the conversation, so we need to:
1. Extract any system messages and return them separately as a string (or include in first user message if flag is set)
2. Convert the remaining messages to Content objects
Args:
messages: List of messages to convert
include_system_in_user: If True, system/developer messages are prepended to the first user message
Returns:
A tuple of (formatted_messages, system_message) where:
- formatted_messages: List of Content objects for the conversation
- system_message: System instruction string or None
"""
messages = [m.model_copy(deep=True) for m in messages]
formatted_messages: ContentListUnion = []
system_message: str | None = None
system_parts: list[str] = []
for i, message in enumerate(messages):
role = message.role if hasattr(message, 'role') else None
# Handle system/developer messages
if isinstance(message, SystemMessage) or role in ['system', 'developer']:
# Extract system message content as string
if isinstance(message.content, str):
if include_system_in_user:
system_parts.append(message.content)
else:
system_message = message.content
elif message.content is not None:
# Handle Iterable of content parts
parts = []
for part in message.content:
if part.type == 'text':
parts.append(part.text)
combined_text = '\n'.join(parts)
if include_system_in_user:
system_parts.append(combined_text)
else:
system_message = combined_text
continue
# Determine the role for non-system messages
if isinstance(message, UserMessage):
role = 'user'
elif isinstance(message, AssistantMessage):
role = 'model'
else:
# Default to user for any unknown message types
role = 'user'
# Initialize message parts
message_parts: list[Part] = []
# If this is the first user message and we have system parts, prepend them
if include_system_in_user and system_parts and role == 'user' and not formatted_messages:
system_text = '\n\n'.join(system_parts)
if isinstance(message.content, str):
message_parts.append(Part.from_text(text=f'{system_text}\n\n{message.content}'))
else:
# Add system text as the first part
message_parts.append(Part.from_text(text=system_text))
system_parts = [] # Clear after using
else:
# Extract content and create parts normally
if isinstance(message.content, str):
# Regular text content
message_parts = [Part.from_text(text=message.content)]
elif message.content is not None:
# Handle Iterable of content parts
for part in message.content:
if part.type == 'text':
message_parts.append(Part.from_text(text=part.text))
elif part.type == 'refusal':
message_parts.append(Part.from_text(text=f'[Refusal] {part.refusal}'))
elif part.type == 'image_url':
# Handle images
url = part.image_url.url
# Format: data:image/jpeg;base64,<data>
header, data = url.split(',', 1)
# Decode base64 to bytes
image_bytes = base64.b64decode(data)
# Use the media_type from ImageURL, which correctly identifies the image format
mime_type = part.image_url.media_type
# Add image part
image_part = Part.from_bytes(data=image_bytes, mime_type=mime_type)
message_parts.append(image_part)
# Create the Content object
if message_parts:
final_message = Content(role=role, parts=message_parts)
# for some reason, the type checker is not able to infer the type of formatted_messages
formatted_messages.append(final_message) # type: ignore
return formatted_messages, system_message
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/llm/google/serializer.py",
"license": "MIT License",
"lines": 102,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/llm/groq/chat.py | import logging
from dataclasses import dataclass
from typing import Any, Literal, TypeVar, overload
from groq import (
APIError,
APIResponseValidationError,
APIStatusError,
AsyncGroq,
NotGiven,
RateLimitError,
Timeout,
)
from groq.types.chat import ChatCompletion, ChatCompletionToolChoiceOptionParam, ChatCompletionToolParam
from groq.types.chat.completion_create_params import (
ResponseFormatResponseFormatJsonSchema,
ResponseFormatResponseFormatJsonSchemaJsonSchema,
)
from httpx import URL
from pydantic import BaseModel
from browser_use.llm.base import BaseChatModel, ChatInvokeCompletion
from browser_use.llm.exceptions import ModelProviderError, ModelRateLimitError
from browser_use.llm.groq.parser import try_parse_groq_failed_generation
from browser_use.llm.groq.serializer import GroqMessageSerializer
from browser_use.llm.messages import BaseMessage
from browser_use.llm.schema import SchemaOptimizer
from browser_use.llm.views import ChatInvokeUsage
GroqVerifiedModels = Literal[
'meta-llama/llama-4-maverick-17b-128e-instruct',
'meta-llama/llama-4-scout-17b-16e-instruct',
'qwen/qwen3-32b',
'moonshotai/kimi-k2-instruct',
'openai/gpt-oss-20b',
'openai/gpt-oss-120b',
]
JsonSchemaModels = [
'meta-llama/llama-4-maverick-17b-128e-instruct',
'meta-llama/llama-4-scout-17b-16e-instruct',
'openai/gpt-oss-20b',
'openai/gpt-oss-120b',
]
ToolCallingModels = [
'moonshotai/kimi-k2-instruct',
]
T = TypeVar('T', bound=BaseModel)
logger = logging.getLogger(__name__)
@dataclass
class ChatGroq(BaseChatModel):
"""
A wrapper around AsyncGroq that implements the BaseLLM protocol.
"""
# Model configuration
model: GroqVerifiedModels | str
# Model params
temperature: float | None = None
service_tier: Literal['auto', 'on_demand', 'flex'] | None = None
top_p: float | None = None
seed: int | None = None
# Client initialization parameters
api_key: str | None = None
base_url: str | URL | None = None
timeout: float | Timeout | NotGiven | None = None
max_retries: int = 10 # Increase default retries for automation reliability
def get_client(self) -> AsyncGroq:
return AsyncGroq(api_key=self.api_key, base_url=self.base_url, timeout=self.timeout, max_retries=self.max_retries)
@property
def provider(self) -> str:
return 'groq'
@property
def name(self) -> str:
return str(self.model)
def _get_usage(self, response: ChatCompletion) -> ChatInvokeUsage | None:
usage = (
ChatInvokeUsage(
prompt_tokens=response.usage.prompt_tokens,
completion_tokens=response.usage.completion_tokens,
total_tokens=response.usage.total_tokens,
prompt_cached_tokens=None, # Groq doesn't support cached tokens
prompt_cache_creation_tokens=None,
prompt_image_tokens=None,
)
if response.usage is not None
else None
)
return usage
@overload
async def ainvoke(
self, messages: list[BaseMessage], output_format: None = None, **kwargs: Any
) -> ChatInvokeCompletion[str]: ...
@overload
async def ainvoke(self, messages: list[BaseMessage], output_format: type[T], **kwargs: Any) -> ChatInvokeCompletion[T]: ...
async def ainvoke(
self, messages: list[BaseMessage], output_format: type[T] | None = None, **kwargs: Any
) -> ChatInvokeCompletion[T] | ChatInvokeCompletion[str]:
groq_messages = GroqMessageSerializer.serialize_messages(messages)
try:
if output_format is None:
return await self._invoke_regular_completion(groq_messages)
else:
return await self._invoke_structured_output(groq_messages, output_format)
except RateLimitError as e:
raise ModelRateLimitError(message=e.response.text, status_code=e.response.status_code, model=self.name) from e
except APIResponseValidationError as e:
raise ModelProviderError(message=e.response.text, status_code=e.response.status_code, model=self.name) from e
except APIStatusError as e:
if output_format is None:
raise ModelProviderError(message=e.response.text, status_code=e.response.status_code, model=self.name) from e
else:
try:
logger.debug(f'Groq failed generation: {e.response.text}; fallback to manual parsing')
parsed_response = try_parse_groq_failed_generation(e, output_format)
logger.debug('Manual error parsing successful ✅')
return ChatInvokeCompletion(
completion=parsed_response,
usage=None, # because this is a hacky way to get the outputs
# TODO: @groq needs to fix their parsers and validators
)
except Exception as _:
raise ModelProviderError(message=str(e), status_code=e.response.status_code, model=self.name) from e
except APIError as e:
raise ModelProviderError(message=e.message, model=self.name) from e
except Exception as e:
raise ModelProviderError(message=str(e), model=self.name) from e
async def _invoke_regular_completion(self, groq_messages) -> ChatInvokeCompletion[str]:
"""Handle regular completion without structured output."""
chat_completion = await self.get_client().chat.completions.create(
messages=groq_messages,
model=self.model,
service_tier=self.service_tier,
temperature=self.temperature,
top_p=self.top_p,
seed=self.seed,
)
usage = self._get_usage(chat_completion)
return ChatInvokeCompletion(
completion=chat_completion.choices[0].message.content or '',
usage=usage,
)
async def _invoke_structured_output(self, groq_messages, output_format: type[T]) -> ChatInvokeCompletion[T]:
"""Handle structured output using either tool calling or JSON schema."""
schema = SchemaOptimizer.create_optimized_json_schema(output_format)
if self.model in ToolCallingModels:
response = await self._invoke_with_tool_calling(groq_messages, output_format, schema)
else:
response = await self._invoke_with_json_schema(groq_messages, output_format, schema)
if not response.choices[0].message.content:
raise ModelProviderError(
message='No content in response',
status_code=500,
model=self.name,
)
parsed_response = output_format.model_validate_json(response.choices[0].message.content)
usage = self._get_usage(response)
return ChatInvokeCompletion(
completion=parsed_response,
usage=usage,
)
async def _invoke_with_tool_calling(self, groq_messages, output_format: type[T], schema) -> ChatCompletion:
"""Handle structured output using tool calling."""
tool = ChatCompletionToolParam(
function={
'name': output_format.__name__,
'description': f'Extract information in the format of {output_format.__name__}',
'parameters': schema,
},
type='function',
)
tool_choice: ChatCompletionToolChoiceOptionParam = 'required'
return await self.get_client().chat.completions.create(
model=self.model,
messages=groq_messages,
temperature=self.temperature,
top_p=self.top_p,
seed=self.seed,
tools=[tool],
tool_choice=tool_choice,
service_tier=self.service_tier,
)
async def _invoke_with_json_schema(self, groq_messages, output_format: type[T], schema) -> ChatCompletion:
"""Handle structured output using JSON schema."""
return await self.get_client().chat.completions.create(
model=self.model,
messages=groq_messages,
temperature=self.temperature,
top_p=self.top_p,
seed=self.seed,
response_format=ResponseFormatResponseFormatJsonSchema(
json_schema=ResponseFormatResponseFormatJsonSchemaJsonSchema(
name=output_format.__name__,
description='Model output schema',
schema=schema,
),
type='json_schema',
),
service_tier=self.service_tier,
)
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/llm/groq/chat.py",
"license": "MIT License",
"lines": 195,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/llm/groq/parser.py | import json
import logging
import re
from typing import TypeVar
from groq import APIStatusError
from pydantic import BaseModel
logger = logging.getLogger(__name__)
T = TypeVar('T', bound=BaseModel)
class ParseFailedGenerationError(Exception):
pass
def try_parse_groq_failed_generation(
error: APIStatusError,
output_format: type[T],
) -> T:
"""Extract JSON from model output, handling both plain JSON and code-block-wrapped JSON."""
try:
content = error.body['error']['failed_generation'] # type: ignore
# If content is wrapped in code blocks, extract just the JSON part
if '```' in content:
# Find the JSON content between code blocks
content = content.split('```')[1]
# Remove language identifier if present (e.g., 'json\n')
if '\n' in content:
content = content.split('\n', 1)[1]
# remove html-like tags before the first { and after the last }
# This handles cases like <|header_start|>assistant<|header_end|> and <function=AgentOutput>
# Only remove content before { if content doesn't already start with {
if not content.strip().startswith('{'):
content = re.sub(r'^.*?(?=\{)', '', content, flags=re.DOTALL)
# Remove common HTML-like tags and patterns at the end, but be more conservative
# Look for patterns like </function>, <|header_start|>, etc. after the JSON
content = re.sub(r'\}(\s*<[^>]*>.*?$)', '}', content, flags=re.DOTALL)
content = re.sub(r'\}(\s*<\|[^|]*\|>.*?$)', '}', content, flags=re.DOTALL)
# Handle extra characters after the JSON, including stray braces
# Find the position of the last } that would close the main JSON object
content = content.strip()
if content.endswith('}'):
# Try to parse and see if we get valid JSON
try:
json.loads(content)
except json.JSONDecodeError:
# If parsing fails, try to find the correct end of the JSON
# by counting braces and removing anything after the balanced JSON
brace_count = 0
last_valid_pos = -1
for i, char in enumerate(content):
if char == '{':
brace_count += 1
elif char == '}':
brace_count -= 1
if brace_count == 0:
last_valid_pos = i + 1
break
if last_valid_pos > 0:
content = content[:last_valid_pos]
# Fix control characters in JSON strings before parsing
# This handles cases where literal control characters appear in JSON values
content = _fix_control_characters_in_json(content)
# Parse the cleaned content
result_dict = json.loads(content)
# some models occasionally respond with a list containing one dict: https://github.com/browser-use/browser-use/issues/1458
if isinstance(result_dict, list) and len(result_dict) == 1 and isinstance(result_dict[0], dict):
result_dict = result_dict[0]
logger.debug(f'Successfully parsed model output: {result_dict}')
return output_format.model_validate(result_dict)
except KeyError as e:
raise ParseFailedGenerationError(e) from e
except json.JSONDecodeError as e:
logger.warning(f'Failed to parse model output: {content} {str(e)}')
raise ValueError(f'Could not parse response. {str(e)}')
except Exception as e:
raise ParseFailedGenerationError(error.response.text) from e
def _fix_control_characters_in_json(content: str) -> str:
"""Fix control characters in JSON string values to make them valid JSON."""
try:
# First try to parse as-is to see if it's already valid
json.loads(content)
return content
except json.JSONDecodeError:
pass
# More sophisticated approach: only escape control characters inside string values
# while preserving JSON structure formatting
result = []
i = 0
in_string = False
escaped = False
while i < len(content):
char = content[i]
if not in_string:
# Outside of string - check if we're entering a string
if char == '"':
in_string = True
result.append(char)
else:
# Inside string - handle escaping and control characters
if escaped:
# Previous character was backslash, so this character is escaped
result.append(char)
escaped = False
elif char == '\\':
# This is an escape character
result.append(char)
escaped = True
elif char == '"':
# End of string
result.append(char)
in_string = False
elif char == '\n':
# Literal newline inside string - escape it
result.append('\\n')
elif char == '\r':
# Literal carriage return inside string - escape it
result.append('\\r')
elif char == '\t':
# Literal tab inside string - escape it
result.append('\\t')
elif char == '\b':
# Literal backspace inside string - escape it
result.append('\\b')
elif char == '\f':
# Literal form feed inside string - escape it
result.append('\\f')
elif ord(char) < 32:
# Other control characters inside string - convert to unicode escape
result.append(f'\\u{ord(char):04x}')
else:
# Normal character inside string
result.append(char)
i += 1
return ''.join(result)
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/llm/groq/parser.py",
"license": "MIT License",
"lines": 130,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/llm/groq/serializer.py | from typing import overload
from groq.types.chat import (
ChatCompletionAssistantMessageParam,
ChatCompletionContentPartImageParam,
ChatCompletionContentPartTextParam,
ChatCompletionMessageParam,
ChatCompletionMessageToolCallParam,
ChatCompletionSystemMessageParam,
ChatCompletionUserMessageParam,
)
from groq.types.chat.chat_completion_content_part_image_param import ImageURL
from groq.types.chat.chat_completion_message_tool_call_param import Function
from browser_use.llm.messages import (
AssistantMessage,
BaseMessage,
ContentPartImageParam,
ContentPartRefusalParam,
ContentPartTextParam,
SystemMessage,
ToolCall,
UserMessage,
)
class GroqMessageSerializer:
"""Serializer for converting between custom message types and OpenAI message param types."""
@staticmethod
def _serialize_content_part_text(part: ContentPartTextParam) -> ChatCompletionContentPartTextParam:
return ChatCompletionContentPartTextParam(text=part.text, type='text')
@staticmethod
def _serialize_content_part_image(part: ContentPartImageParam) -> ChatCompletionContentPartImageParam:
return ChatCompletionContentPartImageParam(
image_url=ImageURL(url=part.image_url.url, detail=part.image_url.detail),
type='image_url',
)
@staticmethod
def _serialize_user_content(
content: str | list[ContentPartTextParam | ContentPartImageParam],
) -> str | list[ChatCompletionContentPartTextParam | ChatCompletionContentPartImageParam]:
"""Serialize content for user messages (text and images allowed)."""
if isinstance(content, str):
return content
serialized_parts: list[ChatCompletionContentPartTextParam | ChatCompletionContentPartImageParam] = []
for part in content:
if part.type == 'text':
serialized_parts.append(GroqMessageSerializer._serialize_content_part_text(part))
elif part.type == 'image_url':
serialized_parts.append(GroqMessageSerializer._serialize_content_part_image(part))
return serialized_parts
@staticmethod
def _serialize_system_content(
content: str | list[ContentPartTextParam],
) -> str:
"""Serialize content for system messages (text only)."""
if isinstance(content, str):
return content
serialized_parts: list[str] = []
for part in content:
if part.type == 'text':
serialized_parts.append(GroqMessageSerializer._serialize_content_part_text(part)['text'])
return '\n'.join(serialized_parts)
@staticmethod
def _serialize_assistant_content(
content: str | list[ContentPartTextParam | ContentPartRefusalParam] | None,
) -> str | None:
"""Serialize content for assistant messages (text and refusal allowed)."""
if content is None:
return None
if isinstance(content, str):
return content
serialized_parts: list[str] = []
for part in content:
if part.type == 'text':
serialized_parts.append(GroqMessageSerializer._serialize_content_part_text(part)['text'])
return '\n'.join(serialized_parts)
@staticmethod
def _serialize_tool_call(tool_call: ToolCall) -> ChatCompletionMessageToolCallParam:
return ChatCompletionMessageToolCallParam(
id=tool_call.id,
function=Function(name=tool_call.function.name, arguments=tool_call.function.arguments),
type='function',
)
# endregion
# region - Serialize overloads
@overload
@staticmethod
def serialize(message: UserMessage) -> ChatCompletionUserMessageParam: ...
@overload
@staticmethod
def serialize(message: SystemMessage) -> ChatCompletionSystemMessageParam: ...
@overload
@staticmethod
def serialize(message: AssistantMessage) -> ChatCompletionAssistantMessageParam: ...
@staticmethod
def serialize(message: BaseMessage) -> ChatCompletionMessageParam:
"""Serialize a custom message to an OpenAI message param."""
if isinstance(message, UserMessage):
user_result: ChatCompletionUserMessageParam = {
'role': 'user',
'content': GroqMessageSerializer._serialize_user_content(message.content),
}
if message.name is not None:
user_result['name'] = message.name
return user_result
elif isinstance(message, SystemMessage):
system_result: ChatCompletionSystemMessageParam = {
'role': 'system',
'content': GroqMessageSerializer._serialize_system_content(message.content),
}
if message.name is not None:
system_result['name'] = message.name
return system_result
elif isinstance(message, AssistantMessage):
# Handle content serialization
content = None
if message.content is not None:
content = GroqMessageSerializer._serialize_assistant_content(message.content)
assistant_result: ChatCompletionAssistantMessageParam = {'role': 'assistant'}
# Only add content if it's not None
if content is not None:
assistant_result['content'] = content
if message.name is not None:
assistant_result['name'] = message.name
if message.tool_calls:
assistant_result['tool_calls'] = [GroqMessageSerializer._serialize_tool_call(tc) for tc in message.tool_calls]
return assistant_result
else:
raise ValueError(f'Unknown message type: {type(message)}')
@staticmethod
def serialize_messages(messages: list[BaseMessage]) -> list[ChatCompletionMessageParam]:
return [GroqMessageSerializer.serialize(m) for m in messages]
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/llm/groq/serializer.py",
"license": "MIT License",
"lines": 129,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/llm/messages.py | """
This implementation is based on the OpenAI types, while removing all the parts that are not needed for Browser Use.
"""
# region - Content parts
from typing import Literal, Union
from pydantic import BaseModel
def _truncate(text: str, max_length: int = 50) -> str:
"""Truncate text to max_length characters, adding ellipsis if truncated."""
if len(text) <= max_length:
return text
return text[: max_length - 3] + '...'
def _format_image_url(url: str, max_length: int = 50) -> str:
"""Format image URL for display, truncating if necessary."""
if url.startswith('data:'):
# Base64 image
media_type = url.split(';')[0].split(':')[1] if ';' in url else 'image'
return f'<base64 {media_type}>'
else:
# Regular URL
return _truncate(url, max_length)
class ContentPartTextParam(BaseModel):
text: str
type: Literal['text'] = 'text'
def __str__(self) -> str:
return f'Text: {_truncate(self.text)}'
def __repr__(self) -> str:
return f'ContentPartTextParam(text={_truncate(self.text)})'
class ContentPartRefusalParam(BaseModel):
refusal: str
type: Literal['refusal'] = 'refusal'
def __str__(self) -> str:
return f'Refusal: {_truncate(self.refusal)}'
def __repr__(self) -> str:
return f'ContentPartRefusalParam(refusal={_truncate(repr(self.refusal), 50)})'
SupportedImageMediaType = Literal['image/jpeg', 'image/png', 'image/gif', 'image/webp']
class ImageURL(BaseModel):
url: str
"""Either a URL of the image or the base64 encoded image data."""
detail: Literal['auto', 'low', 'high'] = 'auto'
"""Specifies the detail level of the image.
Learn more in the
[Vision guide](https://platform.openai.com/docs/guides/vision#low-or-high-fidelity-image-understanding).
"""
# needed for Anthropic
media_type: SupportedImageMediaType = 'image/png'
def __str__(self) -> str:
url_display = _format_image_url(self.url)
return f'🖼️ Image[{self.media_type}, detail={self.detail}]: {url_display}'
def __repr__(self) -> str:
url_repr = _format_image_url(self.url, 30)
return f'ImageURL(url={repr(url_repr)}, detail={repr(self.detail)}, media_type={repr(self.media_type)})'
class ContentPartImageParam(BaseModel):
image_url: ImageURL
type: Literal['image_url'] = 'image_url'
def __str__(self) -> str:
return str(self.image_url)
def __repr__(self) -> str:
return f'ContentPartImageParam(image_url={repr(self.image_url)})'
class Function(BaseModel):
arguments: str
"""
The arguments to call the function with, as generated by the model in JSON
format. Note that the model does not always generate valid JSON, and may
hallucinate parameters not defined by your function schema. Validate the
arguments in your code before calling your function.
"""
name: str
"""The name of the function to call."""
def __str__(self) -> str:
args_preview = _truncate(self.arguments, 80)
return f'{self.name}({args_preview})'
def __repr__(self) -> str:
args_repr = _truncate(repr(self.arguments), 50)
return f'Function(name={repr(self.name)}, arguments={args_repr})'
class ToolCall(BaseModel):
id: str
"""The ID of the tool call."""
function: Function
"""The function that the model called."""
type: Literal['function'] = 'function'
"""The type of the tool. Currently, only `function` is supported."""
def __str__(self) -> str:
return f'ToolCall[{self.id}]: {self.function}'
def __repr__(self) -> str:
return f'ToolCall(id={repr(self.id)}, function={repr(self.function)})'
# endregion
# region - Message types
class _MessageBase(BaseModel):
"""Base class for all message types"""
role: Literal['user', 'system', 'assistant']
cache: bool = False
"""Whether to cache this message. This is only applicable when using Anthropic models.
"""
class UserMessage(_MessageBase):
role: Literal['user'] = 'user'
"""The role of the messages author, in this case `user`."""
content: str | list[ContentPartTextParam | ContentPartImageParam]
"""The contents of the user message."""
name: str | None = None
"""An optional name for the participant.
Provides the model information to differentiate between participants of the same
role.
"""
@property
def text(self) -> str:
"""
Automatically parse the text inside content, whether it's a string or a list of content parts.
"""
if isinstance(self.content, str):
return self.content
elif isinstance(self.content, list):
return '\n'.join([part.text for part in self.content if part.type == 'text'])
else:
return ''
def __str__(self) -> str:
return f'UserMessage(content={self.text})'
def __repr__(self) -> str:
return f'UserMessage(content={repr(self.text)})'
class SystemMessage(_MessageBase):
role: Literal['system'] = 'system'
"""The role of the messages author, in this case `system`."""
content: str | list[ContentPartTextParam]
"""The contents of the system message."""
name: str | None = None
@property
def text(self) -> str:
"""
Automatically parse the text inside content, whether it's a string or a list of content parts.
"""
if isinstance(self.content, str):
return self.content
elif isinstance(self.content, list):
return '\n'.join([part.text for part in self.content if part.type == 'text'])
else:
return ''
def __str__(self) -> str:
return f'SystemMessage(content={self.text})'
def __repr__(self) -> str:
return f'SystemMessage(content={repr(self.text)})'
class AssistantMessage(_MessageBase):
role: Literal['assistant'] = 'assistant'
"""The role of the messages author, in this case `assistant`."""
content: str | list[ContentPartTextParam | ContentPartRefusalParam] | None
"""The contents of the assistant message."""
name: str | None = None
refusal: str | None = None
"""The refusal message by the assistant."""
tool_calls: list[ToolCall] = []
"""The tool calls generated by the model, such as function calls."""
@property
def text(self) -> str:
"""
Automatically parse the text inside content, whether it's a string or a list of content parts.
"""
if isinstance(self.content, str):
return self.content
elif isinstance(self.content, list):
text = ''
for part in self.content:
if part.type == 'text':
text += part.text
elif part.type == 'refusal':
text += f'[Refusal] {part.refusal}'
return text
else:
return ''
def __str__(self) -> str:
return f'AssistantMessage(content={self.text})'
def __repr__(self) -> str:
return f'AssistantMessage(content={repr(self.text)})'
BaseMessage = Union[UserMessage, SystemMessage, AssistantMessage]
# endregion
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/llm/messages.py",
"license": "MIT License",
"lines": 172,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/llm/models.py | """
Convenient access to LLM models.
Usage:
from browser_use import llm
# Simple model access
model = llm.azure_gpt_4_1_mini
model = llm.openai_gpt_4o
model = llm.google_gemini_2_5_pro
model = llm.bu_latest # or bu_1_0, bu_2_0
"""
import os
from typing import TYPE_CHECKING
from browser_use.llm.azure.chat import ChatAzureOpenAI
from browser_use.llm.browser_use.chat import ChatBrowserUse
from browser_use.llm.cerebras.chat import ChatCerebras
from browser_use.llm.google.chat import ChatGoogle
from browser_use.llm.mistral.chat import ChatMistral
from browser_use.llm.openai.chat import ChatOpenAI
# Optional OCI import
try:
from browser_use.llm.oci_raw.chat import ChatOCIRaw
OCI_AVAILABLE = True
except ImportError:
ChatOCIRaw = None
OCI_AVAILABLE = False
if TYPE_CHECKING:
from browser_use.llm.base import BaseChatModel
# Type stubs for IDE autocomplete
openai_gpt_4o: 'BaseChatModel'
openai_gpt_4o_mini: 'BaseChatModel'
openai_gpt_4_1_mini: 'BaseChatModel'
openai_o1: 'BaseChatModel'
openai_o1_mini: 'BaseChatModel'
openai_o1_pro: 'BaseChatModel'
openai_o3: 'BaseChatModel'
openai_o3_mini: 'BaseChatModel'
openai_o3_pro: 'BaseChatModel'
openai_o4_mini: 'BaseChatModel'
openai_gpt_5: 'BaseChatModel'
openai_gpt_5_mini: 'BaseChatModel'
openai_gpt_5_nano: 'BaseChatModel'
azure_gpt_4o: 'BaseChatModel'
azure_gpt_4o_mini: 'BaseChatModel'
azure_gpt_4_1_mini: 'BaseChatModel'
azure_o1: 'BaseChatModel'
azure_o1_mini: 'BaseChatModel'
azure_o1_pro: 'BaseChatModel'
azure_o3: 'BaseChatModel'
azure_o3_mini: 'BaseChatModel'
azure_o3_pro: 'BaseChatModel'
azure_gpt_5: 'BaseChatModel'
azure_gpt_5_mini: 'BaseChatModel'
google_gemini_2_0_flash: 'BaseChatModel'
google_gemini_2_0_pro: 'BaseChatModel'
google_gemini_2_5_pro: 'BaseChatModel'
google_gemini_2_5_flash: 'BaseChatModel'
google_gemini_2_5_flash_lite: 'BaseChatModel'
mistral_large: 'BaseChatModel'
mistral_medium: 'BaseChatModel'
mistral_small: 'BaseChatModel'
codestral: 'BaseChatModel'
pixtral_large: 'BaseChatModel'
cerebras_llama3_1_8b: 'BaseChatModel'
cerebras_llama3_3_70b: 'BaseChatModel'
cerebras_gpt_oss_120b: 'BaseChatModel'
cerebras_llama_4_scout_17b_16e_instruct: 'BaseChatModel'
cerebras_llama_4_maverick_17b_128e_instruct: 'BaseChatModel'
cerebras_qwen_3_32b: 'BaseChatModel'
cerebras_qwen_3_235b_a22b_instruct_2507: 'BaseChatModel'
cerebras_qwen_3_235b_a22b_thinking_2507: 'BaseChatModel'
cerebras_qwen_3_coder_480b: 'BaseChatModel'
bu_latest: 'BaseChatModel'
bu_1_0: 'BaseChatModel'
bu_2_0: 'BaseChatModel'
def get_llm_by_name(model_name: str):
"""
Factory function to create LLM instances from string names with API keys from environment.
Args:
model_name: String name like 'azure_gpt_4_1_mini', 'openai_gpt_4o', etc.
Returns:
LLM instance with API keys from environment variables
Raises:
ValueError: If model_name is not recognized
"""
if not model_name:
raise ValueError('Model name cannot be empty')
# Handle top-level Mistral aliases without provider prefix
mistral_aliases = {
'mistral_large': 'mistral-large-latest',
'mistral_medium': 'mistral-medium-latest',
'mistral_small': 'mistral-small-latest',
'codestral': 'codestral-latest',
'pixtral_large': 'pixtral-large-latest',
}
if model_name in mistral_aliases:
api_key = os.getenv('MISTRAL_API_KEY')
base_url = os.getenv('MISTRAL_BASE_URL', 'https://api.mistral.ai/v1')
return ChatMistral(model=mistral_aliases[model_name], api_key=api_key, base_url=base_url)
# Parse model name
parts = model_name.split('_', 1)
if len(parts) < 2:
raise ValueError(f"Invalid model name format: '{model_name}'. Expected format: 'provider_model_name'")
provider = parts[0]
model_part = parts[1]
# Convert underscores back to dots/dashes for actual model names
if 'gpt_4_1_mini' in model_part:
model = model_part.replace('gpt_4_1_mini', 'gpt-4.1-mini')
elif 'gpt_4o_mini' in model_part:
model = model_part.replace('gpt_4o_mini', 'gpt-4o-mini')
elif 'gpt_4o' in model_part:
model = model_part.replace('gpt_4o', 'gpt-4o')
elif 'gemini_2_0' in model_part:
model = model_part.replace('gemini_2_0', 'gemini-2.0').replace('_', '-')
elif 'gemini_2_5' in model_part:
model = model_part.replace('gemini_2_5', 'gemini-2.5').replace('_', '-')
elif 'llama3_1' in model_part:
model = model_part.replace('llama3_1', 'llama3.1').replace('_', '-')
elif 'llama3_3' in model_part:
model = model_part.replace('llama3_3', 'llama-3.3').replace('_', '-')
elif 'llama_4_scout' in model_part:
model = model_part.replace('llama_4_scout', 'llama-4-scout').replace('_', '-')
elif 'llama_4_maverick' in model_part:
model = model_part.replace('llama_4_maverick', 'llama-4-maverick').replace('_', '-')
elif 'gpt_oss_120b' in model_part:
model = model_part.replace('gpt_oss_120b', 'gpt-oss-120b')
elif 'qwen_3_32b' in model_part:
model = model_part.replace('qwen_3_32b', 'qwen-3-32b')
elif 'qwen_3_235b_a22b_instruct' in model_part:
if model_part.endswith('_2507'):
model = model_part.replace('qwen_3_235b_a22b_instruct_2507', 'qwen-3-235b-a22b-instruct-2507')
else:
model = model_part.replace('qwen_3_235b_a22b_instruct', 'qwen-3-235b-a22b-instruct-2507')
elif 'qwen_3_235b_a22b_thinking' in model_part:
if model_part.endswith('_2507'):
model = model_part.replace('qwen_3_235b_a22b_thinking_2507', 'qwen-3-235b-a22b-thinking-2507')
else:
model = model_part.replace('qwen_3_235b_a22b_thinking', 'qwen-3-235b-a22b-thinking-2507')
elif 'qwen_3_coder_480b' in model_part:
model = model_part.replace('qwen_3_coder_480b', 'qwen-3-coder-480b')
else:
model = model_part.replace('_', '-')
# OpenAI Models
if provider == 'openai':
api_key = os.getenv('OPENAI_API_KEY')
return ChatOpenAI(model=model, api_key=api_key)
# Azure OpenAI Models
elif provider == 'azure':
api_key = os.getenv('AZURE_OPENAI_KEY') or os.getenv('AZURE_OPENAI_API_KEY')
azure_endpoint = os.getenv('AZURE_OPENAI_ENDPOINT')
return ChatAzureOpenAI(model=model, api_key=api_key, azure_endpoint=azure_endpoint)
# Google Models
elif provider == 'google':
api_key = os.getenv('GOOGLE_API_KEY')
return ChatGoogle(model=model, api_key=api_key)
# Mistral Models
elif provider == 'mistral':
api_key = os.getenv('MISTRAL_API_KEY')
base_url = os.getenv('MISTRAL_BASE_URL', 'https://api.mistral.ai/v1')
mistral_map = {
'large': 'mistral-large-latest',
'medium': 'mistral-medium-latest',
'small': 'mistral-small-latest',
'codestral': 'codestral-latest',
'pixtral-large': 'pixtral-large-latest',
}
normalized_model_part = model_part.replace('_', '-')
resolved_model = mistral_map.get(normalized_model_part, model.replace('_', '-'))
return ChatMistral(model=resolved_model, api_key=api_key, base_url=base_url)
# OCI Models
elif provider == 'oci':
# OCI requires more complex configuration that can't be easily inferred from env vars
# Users should use ChatOCIRaw directly with proper configuration
raise ValueError('OCI models require manual configuration. Use ChatOCIRaw directly with your OCI credentials.')
# Cerebras Models
elif provider == 'cerebras':
api_key = os.getenv('CEREBRAS_API_KEY')
return ChatCerebras(model=model, api_key=api_key)
# Browser Use Models
elif provider == 'bu':
# Handle bu_latest -> bu-latest conversion (need to prepend 'bu-' back)
model = f'bu-{model_part.replace("_", "-")}'
api_key = os.getenv('BROWSER_USE_API_KEY')
return ChatBrowserUse(model=model, api_key=api_key)
else:
available_providers = ['openai', 'azure', 'google', 'oci', 'cerebras', 'bu']
raise ValueError(f"Unknown provider: '{provider}'. Available providers: {', '.join(available_providers)}")
# Pre-configured model instances (lazy loaded via __getattr__)
def __getattr__(name: str) -> 'BaseChatModel':
"""Create model instances on demand with API keys from environment."""
# Handle chat classes first
if name == 'ChatOpenAI':
return ChatOpenAI # type: ignore
elif name == 'ChatAzureOpenAI':
return ChatAzureOpenAI # type: ignore
elif name == 'ChatGoogle':
return ChatGoogle # type: ignore
elif name == 'ChatMistral':
return ChatMistral # type: ignore
elif name == 'ChatOCIRaw':
if not OCI_AVAILABLE:
raise ImportError('OCI integration not available. Install with: pip install "browser-use[oci]"')
return ChatOCIRaw # type: ignore
elif name == 'ChatCerebras':
return ChatCerebras # type: ignore
elif name == 'ChatBrowserUse':
return ChatBrowserUse # type: ignore
# Handle model instances - these are the main use case
try:
return get_llm_by_name(name)
except ValueError:
raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
# Export all classes and preconfigured instances, conditionally including ChatOCIRaw
__all__ = [
'ChatOpenAI',
'ChatAzureOpenAI',
'ChatGoogle',
'ChatMistral',
'ChatCerebras',
'ChatBrowserUse',
]
if OCI_AVAILABLE:
__all__.append('ChatOCIRaw')
__all__ += [
'get_llm_by_name',
# OpenAI instances - created on demand
'openai_gpt_4o',
'openai_gpt_4o_mini',
'openai_gpt_4_1_mini',
'openai_o1',
'openai_o1_mini',
'openai_o1_pro',
'openai_o3',
'openai_o3_mini',
'openai_o3_pro',
'openai_o4_mini',
'openai_gpt_5',
'openai_gpt_5_mini',
'openai_gpt_5_nano',
# Azure instances - created on demand
'azure_gpt_4o',
'azure_gpt_4o_mini',
'azure_gpt_4_1_mini',
'azure_o1',
'azure_o1_mini',
'azure_o1_pro',
'azure_o3',
'azure_o3_mini',
'azure_o3_pro',
'azure_gpt_5',
'azure_gpt_5_mini',
# Google instances - created on demand
'google_gemini_2_0_flash',
'google_gemini_2_0_pro',
'google_gemini_2_5_pro',
'google_gemini_2_5_flash',
'google_gemini_2_5_flash_lite',
# Mistral instances - created on demand
'mistral_large',
'mistral_medium',
'mistral_small',
'codestral',
'pixtral_large',
# Cerebras instances - created on demand
'cerebras_llama3_1_8b',
'cerebras_llama3_3_70b',
'cerebras_gpt_oss_120b',
'cerebras_llama_4_scout_17b_16e_instruct',
'cerebras_llama_4_maverick_17b_128e_instruct',
'cerebras_qwen_3_32b',
'cerebras_qwen_3_235b_a22b_instruct_2507',
'cerebras_qwen_3_235b_a22b_thinking_2507',
'cerebras_qwen_3_coder_480b',
# Browser Use instances - created on demand
'bu_latest',
'bu_1_0',
'bu_2_0',
]
# NOTE: OCI backend is optional. The try/except ImportError and conditional __all__ are required
# so this module can be imported without browser-use[oci] installed.
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/llm/models.py",
"license": "MIT License",
"lines": 279,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/llm/ollama/chat.py | from collections.abc import Mapping
from dataclasses import dataclass
from typing import Any, TypeVar, overload
import httpx
from ollama import AsyncClient as OllamaAsyncClient
from ollama import Options
from pydantic import BaseModel
from browser_use.llm.base import BaseChatModel
from browser_use.llm.exceptions import ModelProviderError
from browser_use.llm.messages import BaseMessage
from browser_use.llm.ollama.serializer import OllamaMessageSerializer
from browser_use.llm.views import ChatInvokeCompletion
T = TypeVar('T', bound=BaseModel)
@dataclass
class ChatOllama(BaseChatModel):
"""
A wrapper around Ollama's chat model.
"""
model: str
# # Model params
# TODO (matic): Why is this commented out?
# temperature: float | None = None
# Client initialization parameters
host: str | None = None
timeout: float | httpx.Timeout | None = None
client_params: dict[str, Any] | None = None
ollama_options: Mapping[str, Any] | Options | None = None
# Static
@property
def provider(self) -> str:
return 'ollama'
def _get_client_params(self) -> dict[str, Any]:
"""Prepare client parameters dictionary."""
return {
'host': self.host,
'timeout': self.timeout,
'client_params': self.client_params,
}
def get_client(self) -> OllamaAsyncClient:
"""
Returns an OllamaAsyncClient client.
"""
return OllamaAsyncClient(host=self.host, timeout=self.timeout, **self.client_params or {})
@property
def name(self) -> str:
return self.model
@overload
async def ainvoke(
self, messages: list[BaseMessage], output_format: None = None, **kwargs: Any
) -> ChatInvokeCompletion[str]: ...
@overload
async def ainvoke(self, messages: list[BaseMessage], output_format: type[T], **kwargs: Any) -> ChatInvokeCompletion[T]: ...
async def ainvoke(
self, messages: list[BaseMessage], output_format: type[T] | None = None, **kwargs: Any
) -> ChatInvokeCompletion[T] | ChatInvokeCompletion[str]:
ollama_messages = OllamaMessageSerializer.serialize_messages(messages)
try:
if output_format is None:
response = await self.get_client().chat(
model=self.model,
messages=ollama_messages,
options=self.ollama_options,
)
return ChatInvokeCompletion(completion=response.message.content or '', usage=None)
else:
schema = output_format.model_json_schema()
response = await self.get_client().chat(
model=self.model,
messages=ollama_messages,
format=schema,
options=self.ollama_options,
)
completion = response.message.content or ''
if output_format is not None:
completion = output_format.model_validate_json(completion)
return ChatInvokeCompletion(completion=completion, usage=None)
except Exception as e:
raise ModelProviderError(message=str(e), model=self.name) from e
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/llm/ollama/chat.py",
"license": "MIT License",
"lines": 78,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:browser_use/llm/ollama/serializer.py | import base64
import json
from typing import Any, overload
from ollama._types import Image, Message
from browser_use.llm.messages import (
AssistantMessage,
BaseMessage,
SystemMessage,
ToolCall,
UserMessage,
)
class OllamaMessageSerializer:
"""Serializer for converting between custom message types and Ollama message types."""
@staticmethod
def _extract_text_content(content: Any) -> str:
"""Extract text content from message content, ignoring images."""
if content is None:
return ''
if isinstance(content, str):
return content
text_parts: list[str] = []
for part in content:
if hasattr(part, 'type'):
if part.type == 'text':
text_parts.append(part.text)
elif part.type == 'refusal':
text_parts.append(f'[Refusal] {part.refusal}')
# Skip image parts as they're handled separately
return '\n'.join(text_parts)
@staticmethod
def _extract_images(content: Any) -> list[Image]:
"""Extract images from message content."""
if content is None or isinstance(content, str):
return []
images: list[Image] = []
for part in content:
if hasattr(part, 'type') and part.type == 'image_url':
url = part.image_url.url
if url.startswith('data:'):
# Handle base64 encoded images
# Format: data:image/jpeg;base64,<data>
_, data = url.split(',', 1)
# Decode base64 to bytes
image_bytes = base64.b64decode(data)
images.append(Image(value=image_bytes))
else:
# Handle URL images (Ollama will download them)
images.append(Image(value=url))
return images
@staticmethod
def _serialize_tool_calls(tool_calls: list[ToolCall]) -> list[Message.ToolCall]:
"""Convert browser-use ToolCalls to Ollama ToolCalls."""
ollama_tool_calls: list[Message.ToolCall] = []
for tool_call in tool_calls:
# Parse arguments from JSON string to dict for Ollama
try:
arguments_dict = json.loads(tool_call.function.arguments)
except json.JSONDecodeError:
# If parsing fails, wrap in a dict
arguments_dict = {'arguments': tool_call.function.arguments}
ollama_tool_call = Message.ToolCall(
function=Message.ToolCall.Function(name=tool_call.function.name, arguments=arguments_dict)
)
ollama_tool_calls.append(ollama_tool_call)
return ollama_tool_calls
# region - Serialize overloads
@overload
@staticmethod
def serialize(message: UserMessage) -> Message: ...
@overload
@staticmethod
def serialize(message: SystemMessage) -> Message: ...
@overload
@staticmethod
def serialize(message: AssistantMessage) -> Message: ...
@staticmethod
def serialize(message: BaseMessage) -> Message:
"""Serialize a custom message to an Ollama Message."""
if isinstance(message, UserMessage):
text_content = OllamaMessageSerializer._extract_text_content(message.content)
images = OllamaMessageSerializer._extract_images(message.content)
ollama_message = Message(
role='user',
content=text_content if text_content else None,
)
if images:
ollama_message.images = images
return ollama_message
elif isinstance(message, SystemMessage):
text_content = OllamaMessageSerializer._extract_text_content(message.content)
return Message(
role='system',
content=text_content if text_content else None,
)
elif isinstance(message, AssistantMessage):
# Handle content
text_content = None
if message.content is not None:
text_content = OllamaMessageSerializer._extract_text_content(message.content)
ollama_message = Message(
role='assistant',
content=text_content if text_content else None,
)
# Handle tool calls
if message.tool_calls:
ollama_message.tool_calls = OllamaMessageSerializer._serialize_tool_calls(message.tool_calls)
return ollama_message
else:
raise ValueError(f'Unknown message type: {type(message)}')
@staticmethod
def serialize_messages(messages: list[BaseMessage]) -> list[Message]:
"""Serialize a list of browser_use messages to Ollama Messages."""
return [OllamaMessageSerializer.serialize(m) for m in messages]
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/llm/ollama/serializer.py",
"license": "MIT License",
"lines": 113,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/llm/openai/chat.py | from collections.abc import Iterable, Mapping
from dataclasses import dataclass, field
from typing import Any, Literal, TypeVar, overload
import httpx
from openai import APIConnectionError, APIStatusError, AsyncOpenAI, RateLimitError
from openai.types.chat import ChatCompletionContentPartTextParam
from openai.types.chat.chat_completion import ChatCompletion
from openai.types.shared.chat_model import ChatModel
from openai.types.shared_params.reasoning_effort import ReasoningEffort
from openai.types.shared_params.response_format_json_schema import JSONSchema, ResponseFormatJSONSchema
from pydantic import BaseModel
from browser_use.llm.base import BaseChatModel
from browser_use.llm.exceptions import ModelProviderError, ModelRateLimitError
from browser_use.llm.messages import BaseMessage
from browser_use.llm.openai.serializer import OpenAIMessageSerializer
from browser_use.llm.schema import SchemaOptimizer
from browser_use.llm.views import ChatInvokeCompletion, ChatInvokeUsage
T = TypeVar('T', bound=BaseModel)
@dataclass
class ChatOpenAI(BaseChatModel):
"""
A wrapper around AsyncOpenAI that implements the BaseLLM protocol.
This class accepts all AsyncOpenAI parameters while adding model
and temperature parameters for the LLM interface (if temperature it not `None`).
"""
# Model configuration
model: ChatModel | str
# Model params
temperature: float | None = 0.2
frequency_penalty: float | None = 0.3 # this avoids infinite generation of \t for models like 4.1-mini
reasoning_effort: ReasoningEffort = 'low'
seed: int | None = None
service_tier: Literal['auto', 'default', 'flex', 'priority', 'scale'] | None = None
top_p: float | None = None
add_schema_to_system_prompt: bool = False # Add JSON schema to system prompt instead of using response_format
dont_force_structured_output: bool = False # If True, the model will not be forced to output a structured output
remove_min_items_from_schema: bool = (
False # If True, remove minItems from JSON schema (for compatibility with some providers)
)
remove_defaults_from_schema: bool = (
False # If True, remove default values from JSON schema (for compatibility with some providers)
)
# Client initialization parameters
api_key: str | None = None
organization: str | None = None
project: str | None = None
base_url: str | httpx.URL | None = None
websocket_base_url: str | httpx.URL | None = None
timeout: float | httpx.Timeout | None = None
max_retries: int = 5 # Increase default retries for automation reliability
default_headers: Mapping[str, str] | None = None
default_query: Mapping[str, object] | None = None
http_client: httpx.AsyncClient | None = None
_strict_response_validation: bool = False
max_completion_tokens: int | None = 4096
reasoning_models: list[ChatModel | str] | None = field(
default_factory=lambda: [
'o4-mini',
'o3',
'o3-mini',
'o1',
'o1-pro',
'o3-pro',
'gpt-5',
'gpt-5-mini',
'gpt-5-nano',
]
)
# Static
@property
def provider(self) -> str:
return 'openai'
def _get_client_params(self) -> dict[str, Any]:
"""Prepare client parameters dictionary."""
# Define base client params
base_params = {
'api_key': self.api_key,
'organization': self.organization,
'project': self.project,
'base_url': self.base_url,
'websocket_base_url': self.websocket_base_url,
'timeout': self.timeout,
'max_retries': self.max_retries,
'default_headers': self.default_headers,
'default_query': self.default_query,
'_strict_response_validation': self._strict_response_validation,
}
# Create client_params dict with non-None values
client_params = {k: v for k, v in base_params.items() if v is not None}
# Add http_client if provided
if self.http_client is not None:
client_params['http_client'] = self.http_client
return client_params
def get_client(self) -> AsyncOpenAI:
"""
Returns an AsyncOpenAI client.
Returns:
AsyncOpenAI: An instance of the AsyncOpenAI client.
"""
client_params = self._get_client_params()
return AsyncOpenAI(**client_params)
@property
def name(self) -> str:
return str(self.model)
def _get_usage(self, response: ChatCompletion) -> ChatInvokeUsage | None:
if response.usage is not None:
# Note: completion_tokens already includes reasoning_tokens per OpenAI API docs.
# Unlike Google Gemini where thinking_tokens are reported separately,
# OpenAI's reasoning_tokens are a subset of completion_tokens.
usage = ChatInvokeUsage(
prompt_tokens=response.usage.prompt_tokens,
prompt_cached_tokens=response.usage.prompt_tokens_details.cached_tokens
if response.usage.prompt_tokens_details is not None
else None,
prompt_cache_creation_tokens=None,
prompt_image_tokens=None,
# Completion
completion_tokens=response.usage.completion_tokens,
total_tokens=response.usage.total_tokens,
)
else:
usage = None
return usage
@overload
async def ainvoke(
self, messages: list[BaseMessage], output_format: None = None, **kwargs: Any
) -> ChatInvokeCompletion[str]: ...
@overload
async def ainvoke(self, messages: list[BaseMessage], output_format: type[T], **kwargs: Any) -> ChatInvokeCompletion[T]: ...
async def ainvoke(
self, messages: list[BaseMessage], output_format: type[T] | None = None, **kwargs: Any
) -> ChatInvokeCompletion[T] | ChatInvokeCompletion[str]:
"""
Invoke the model with the given messages.
Args:
messages: List of chat messages
output_format: Optional Pydantic model class for structured output
Returns:
Either a string response or an instance of output_format
"""
openai_messages = OpenAIMessageSerializer.serialize_messages(messages)
try:
model_params: dict[str, Any] = {}
if self.temperature is not None:
model_params['temperature'] = self.temperature
if self.frequency_penalty is not None:
model_params['frequency_penalty'] = self.frequency_penalty
if self.max_completion_tokens is not None:
model_params['max_completion_tokens'] = self.max_completion_tokens
if self.top_p is not None:
model_params['top_p'] = self.top_p
if self.seed is not None:
model_params['seed'] = self.seed
if self.service_tier is not None:
model_params['service_tier'] = self.service_tier
if self.reasoning_models and any(str(m).lower() in str(self.model).lower() for m in self.reasoning_models):
model_params['reasoning_effort'] = self.reasoning_effort
model_params.pop('temperature', None)
model_params.pop('frequency_penalty', None)
if output_format is None:
# Return string response
response = await self.get_client().chat.completions.create(
model=self.model,
messages=openai_messages,
**model_params,
)
choice = response.choices[0] if response.choices else None
if choice is None:
base_url = str(self.base_url) if self.base_url is not None else None
hint = f' (base_url={base_url})' if base_url is not None else ''
raise ModelProviderError(
message=(
'Invalid OpenAI chat completion response: missing or empty `choices`.'
' If you are using a proxy via `base_url`, ensure it implements the OpenAI'
' `/v1/chat/completions` schema and returns `choices` as a non-empty list.'
f'{hint}'
),
status_code=502,
model=self.name,
)
usage = self._get_usage(response)
return ChatInvokeCompletion(
completion=choice.message.content or '',
usage=usage,
stop_reason=choice.finish_reason,
)
else:
response_format: JSONSchema = {
'name': 'agent_output',
'strict': True,
'schema': SchemaOptimizer.create_optimized_json_schema(
output_format,
remove_min_items=self.remove_min_items_from_schema,
remove_defaults=self.remove_defaults_from_schema,
),
}
# Add JSON schema to system prompt if requested
if self.add_schema_to_system_prompt and openai_messages and openai_messages[0]['role'] == 'system':
schema_text = f'\n<json_schema>\n{response_format}\n</json_schema>'
if isinstance(openai_messages[0]['content'], str):
openai_messages[0]['content'] += schema_text
elif isinstance(openai_messages[0]['content'], Iterable):
openai_messages[0]['content'] = list(openai_messages[0]['content']) + [
ChatCompletionContentPartTextParam(text=schema_text, type='text')
]
if self.dont_force_structured_output:
response = await self.get_client().chat.completions.create(
model=self.model,
messages=openai_messages,
**model_params,
)
else:
# Return structured response
response = await self.get_client().chat.completions.create(
model=self.model,
messages=openai_messages,
response_format=ResponseFormatJSONSchema(json_schema=response_format, type='json_schema'),
**model_params,
)
choice = response.choices[0] if response.choices else None
if choice is None:
base_url = str(self.base_url) if self.base_url is not None else None
hint = f' (base_url={base_url})' if base_url is not None else ''
raise ModelProviderError(
message=(
'Invalid OpenAI chat completion response: missing or empty `choices`.'
' If you are using a proxy via `base_url`, ensure it implements the OpenAI'
' `/v1/chat/completions` schema and returns `choices` as a non-empty list.'
f'{hint}'
),
status_code=502,
model=self.name,
)
if choice.message.content is None:
raise ModelProviderError(
message='Failed to parse structured output from model response',
status_code=500,
model=self.name,
)
usage = self._get_usage(response)
parsed = output_format.model_validate_json(choice.message.content)
return ChatInvokeCompletion(
completion=parsed,
usage=usage,
stop_reason=choice.finish_reason,
)
except ModelProviderError:
# Preserve status_code and message from validation errors
raise
except RateLimitError as e:
raise ModelRateLimitError(message=e.message, model=self.name) from e
except APIConnectionError as e:
raise ModelProviderError(message=str(e), model=self.name) from e
except APIStatusError as e:
raise ModelProviderError(message=e.message, status_code=e.status_code, model=self.name) from e
except Exception as e:
raise ModelProviderError(message=str(e), model=self.name) from e
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/llm/openai/chat.py",
"license": "MIT License",
"lines": 257,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/llm/openai/like.py | from dataclasses import dataclass
from browser_use.llm.openai.chat import ChatOpenAI
@dataclass
class ChatOpenAILike(ChatOpenAI):
"""
A class for to interact with any provider using the OpenAI API schema.
Args:
model (str): The name of the OpenAI model to use.
"""
model: str
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/llm/openai/like.py",
"license": "MIT License",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
browser-use/browser-use:browser_use/llm/openai/serializer.py | from typing import overload
from openai.types.chat import (
ChatCompletionAssistantMessageParam,
ChatCompletionContentPartImageParam,
ChatCompletionContentPartRefusalParam,
ChatCompletionContentPartTextParam,
ChatCompletionMessageFunctionToolCallParam,
ChatCompletionMessageParam,
ChatCompletionSystemMessageParam,
ChatCompletionUserMessageParam,
)
from openai.types.chat.chat_completion_content_part_image_param import ImageURL
from openai.types.chat.chat_completion_message_function_tool_call_param import Function
from browser_use.llm.messages import (
AssistantMessage,
BaseMessage,
ContentPartImageParam,
ContentPartRefusalParam,
ContentPartTextParam,
SystemMessage,
ToolCall,
UserMessage,
)
class OpenAIMessageSerializer:
"""Serializer for converting between custom message types and OpenAI message param types."""
@staticmethod
def _serialize_content_part_text(part: ContentPartTextParam) -> ChatCompletionContentPartTextParam:
return ChatCompletionContentPartTextParam(text=part.text, type='text')
@staticmethod
def _serialize_content_part_image(part: ContentPartImageParam) -> ChatCompletionContentPartImageParam:
return ChatCompletionContentPartImageParam(
image_url=ImageURL(url=part.image_url.url, detail=part.image_url.detail),
type='image_url',
)
@staticmethod
def _serialize_content_part_refusal(part: ContentPartRefusalParam) -> ChatCompletionContentPartRefusalParam:
return ChatCompletionContentPartRefusalParam(refusal=part.refusal, type='refusal')
@staticmethod
def _serialize_user_content(
content: str | list[ContentPartTextParam | ContentPartImageParam],
) -> str | list[ChatCompletionContentPartTextParam | ChatCompletionContentPartImageParam]:
"""Serialize content for user messages (text and images allowed)."""
if isinstance(content, str):
return content
serialized_parts: list[ChatCompletionContentPartTextParam | ChatCompletionContentPartImageParam] = []
for part in content:
if part.type == 'text':
serialized_parts.append(OpenAIMessageSerializer._serialize_content_part_text(part))
elif part.type == 'image_url':
serialized_parts.append(OpenAIMessageSerializer._serialize_content_part_image(part))
return serialized_parts
@staticmethod
def _serialize_system_content(
content: str | list[ContentPartTextParam],
) -> str | list[ChatCompletionContentPartTextParam]:
"""Serialize content for system messages (text only)."""
if isinstance(content, str):
return content
serialized_parts: list[ChatCompletionContentPartTextParam] = []
for part in content:
if part.type == 'text':
serialized_parts.append(OpenAIMessageSerializer._serialize_content_part_text(part))
return serialized_parts
@staticmethod
def _serialize_assistant_content(
content: str | list[ContentPartTextParam | ContentPartRefusalParam] | None,
) -> str | list[ChatCompletionContentPartTextParam | ChatCompletionContentPartRefusalParam] | None:
"""Serialize content for assistant messages (text and refusal allowed)."""
if content is None:
return None
if isinstance(content, str):
return content
serialized_parts: list[ChatCompletionContentPartTextParam | ChatCompletionContentPartRefusalParam] = []
for part in content:
if part.type == 'text':
serialized_parts.append(OpenAIMessageSerializer._serialize_content_part_text(part))
elif part.type == 'refusal':
serialized_parts.append(OpenAIMessageSerializer._serialize_content_part_refusal(part))
return serialized_parts
@staticmethod
def _serialize_tool_call(tool_call: ToolCall) -> ChatCompletionMessageFunctionToolCallParam:
return ChatCompletionMessageFunctionToolCallParam(
id=tool_call.id,
function=Function(name=tool_call.function.name, arguments=tool_call.function.arguments),
type='function',
)
# endregion
# region - Serialize overloads
@overload
@staticmethod
def serialize(message: UserMessage) -> ChatCompletionUserMessageParam: ...
@overload
@staticmethod
def serialize(message: SystemMessage) -> ChatCompletionSystemMessageParam: ...
@overload
@staticmethod
def serialize(message: AssistantMessage) -> ChatCompletionAssistantMessageParam: ...
@staticmethod
def serialize(message: BaseMessage) -> ChatCompletionMessageParam:
"""Serialize a custom message to an OpenAI message param."""
if isinstance(message, UserMessage):
user_result: ChatCompletionUserMessageParam = {
'role': 'user',
'content': OpenAIMessageSerializer._serialize_user_content(message.content),
}
if message.name is not None:
user_result['name'] = message.name
return user_result
elif isinstance(message, SystemMessage):
system_result: ChatCompletionSystemMessageParam = {
'role': 'system',
'content': OpenAIMessageSerializer._serialize_system_content(message.content),
}
if message.name is not None:
system_result['name'] = message.name
return system_result
elif isinstance(message, AssistantMessage):
# Handle content serialization
content = None
if message.content is not None:
content = OpenAIMessageSerializer._serialize_assistant_content(message.content)
assistant_result: ChatCompletionAssistantMessageParam = {'role': 'assistant'}
# Only add content if it's not None
if content is not None:
assistant_result['content'] = content
if message.name is not None:
assistant_result['name'] = message.name
if message.refusal is not None:
assistant_result['refusal'] = message.refusal
if message.tool_calls:
assistant_result['tool_calls'] = [OpenAIMessageSerializer._serialize_tool_call(tc) for tc in message.tool_calls]
return assistant_result
else:
raise ValueError(f'Unknown message type: {type(message)}')
@staticmethod
def serialize_messages(messages: list[BaseMessage]) -> list[ChatCompletionMessageParam]:
return [OpenAIMessageSerializer.serialize(m) for m in messages]
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/llm/openai/serializer.py",
"license": "MIT License",
"lines": 137,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/llm/openrouter/chat.py | from collections.abc import Mapping
from dataclasses import dataclass
from typing import Any, TypeVar, overload
import httpx
from openai import APIConnectionError, APIStatusError, AsyncOpenAI, RateLimitError
from openai.types.chat.chat_completion import ChatCompletion
from openai.types.shared_params.response_format_json_schema import (
JSONSchema,
ResponseFormatJSONSchema,
)
from pydantic import BaseModel
from browser_use.llm.base import BaseChatModel
from browser_use.llm.exceptions import ModelProviderError, ModelRateLimitError
from browser_use.llm.messages import BaseMessage
from browser_use.llm.openrouter.serializer import OpenRouterMessageSerializer
from browser_use.llm.schema import SchemaOptimizer
from browser_use.llm.views import ChatInvokeCompletion, ChatInvokeUsage
T = TypeVar('T', bound=BaseModel)
@dataclass
class ChatOpenRouter(BaseChatModel):
"""
A wrapper around OpenRouter's chat API, which provides access to various LLM models
through a unified OpenAI-compatible interface.
This class implements the BaseChatModel protocol for OpenRouter's API.
"""
# Model configuration
model: str
# Model params
temperature: float | None = None
top_p: float | None = None
seed: int | None = None
# Client initialization parameters
api_key: str | None = None
http_referer: str | None = None # OpenRouter specific parameter for tracking
base_url: str | httpx.URL = 'https://openrouter.ai/api/v1'
timeout: float | httpx.Timeout | None = None
max_retries: int = 10
default_headers: Mapping[str, str] | None = None
default_query: Mapping[str, object] | None = None
http_client: httpx.AsyncClient | None = None
_strict_response_validation: bool = False
extra_body: dict[str, Any] | None = None
# Static
@property
def provider(self) -> str:
return 'openrouter'
def _get_client_params(self) -> dict[str, Any]:
"""Prepare client parameters dictionary."""
# Define base client params
base_params = {
'api_key': self.api_key,
'base_url': self.base_url,
'timeout': self.timeout,
'max_retries': self.max_retries,
'default_headers': self.default_headers,
'default_query': self.default_query,
'_strict_response_validation': self._strict_response_validation,
'top_p': self.top_p,
'seed': self.seed,
}
# Create client_params dict with non-None values
client_params = {k: v for k, v in base_params.items() if v is not None}
# Add http_client if provided
if self.http_client is not None:
client_params['http_client'] = self.http_client
return client_params
def get_client(self) -> AsyncOpenAI:
"""
Returns an AsyncOpenAI client configured for OpenRouter.
Returns:
AsyncOpenAI: An instance of the AsyncOpenAI client with OpenRouter base URL.
"""
if not hasattr(self, '_client'):
client_params = self._get_client_params()
self._client = AsyncOpenAI(**client_params)
return self._client
@property
def name(self) -> str:
return str(self.model)
def _get_usage(self, response: ChatCompletion) -> ChatInvokeUsage | None:
"""Extract usage information from the OpenRouter response."""
if response.usage is None:
return None
prompt_details = getattr(response.usage, 'prompt_tokens_details', None)
cached_tokens = prompt_details.cached_tokens if prompt_details else None
return ChatInvokeUsage(
prompt_tokens=response.usage.prompt_tokens,
prompt_cached_tokens=cached_tokens,
prompt_cache_creation_tokens=None,
prompt_image_tokens=None,
# Completion
completion_tokens=response.usage.completion_tokens,
total_tokens=response.usage.total_tokens,
)
@overload
async def ainvoke(
self, messages: list[BaseMessage], output_format: None = None, **kwargs: Any
) -> ChatInvokeCompletion[str]: ...
@overload
async def ainvoke(self, messages: list[BaseMessage], output_format: type[T], **kwargs: Any) -> ChatInvokeCompletion[T]: ...
async def ainvoke(
self, messages: list[BaseMessage], output_format: type[T] | None = None, **kwargs: Any
) -> ChatInvokeCompletion[T] | ChatInvokeCompletion[str]:
"""
Invoke the model with the given messages through OpenRouter.
Args:
messages: List of chat messages
output_format: Optional Pydantic model class for structured output
Returns:
Either a string response or an instance of output_format
"""
openrouter_messages = OpenRouterMessageSerializer.serialize_messages(messages)
# Set up extra headers for OpenRouter
extra_headers = {}
if self.http_referer:
extra_headers['HTTP-Referer'] = self.http_referer
try:
if output_format is None:
# Return string response
response = await self.get_client().chat.completions.create(
model=self.model,
messages=openrouter_messages,
temperature=self.temperature,
top_p=self.top_p,
seed=self.seed,
extra_headers=extra_headers,
**(self.extra_body or {}),
)
usage = self._get_usage(response)
return ChatInvokeCompletion(
completion=response.choices[0].message.content or '',
usage=usage,
)
else:
# Create a JSON schema for structured output
schema = SchemaOptimizer.create_optimized_json_schema(output_format)
response_format_schema: JSONSchema = {
'name': 'agent_output',
'strict': True,
'schema': schema,
}
# Return structured response
response = await self.get_client().chat.completions.create(
model=self.model,
messages=openrouter_messages,
temperature=self.temperature,
top_p=self.top_p,
seed=self.seed,
response_format=ResponseFormatJSONSchema(
json_schema=response_format_schema,
type='json_schema',
),
extra_headers=extra_headers,
**(self.extra_body or {}),
)
if response.choices[0].message.content is None:
raise ModelProviderError(
message='Failed to parse structured output from model response',
status_code=500,
model=self.name,
)
usage = self._get_usage(response)
parsed = output_format.model_validate_json(response.choices[0].message.content)
return ChatInvokeCompletion(
completion=parsed,
usage=usage,
)
except RateLimitError as e:
raise ModelRateLimitError(message=e.message, model=self.name) from e
except APIConnectionError as e:
raise ModelProviderError(message=str(e), model=self.name) from e
except APIStatusError as e:
raise ModelProviderError(message=e.message, status_code=e.status_code, model=self.name) from e
except Exception as e:
raise ModelProviderError(message=str(e), model=self.name) from e
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/llm/openrouter/chat.py",
"license": "MIT License",
"lines": 175,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/llm/openrouter/serializer.py | from openai.types.chat import ChatCompletionMessageParam
from browser_use.llm.messages import BaseMessage
from browser_use.llm.openai.serializer import OpenAIMessageSerializer
class OpenRouterMessageSerializer:
"""
Serializer for converting between custom message types and OpenRouter message formats.
OpenRouter uses the OpenAI-compatible API, so we can reuse the OpenAI serializer.
"""
@staticmethod
def serialize_messages(messages: list[BaseMessage]) -> list[ChatCompletionMessageParam]:
"""
Serialize a list of browser_use messages to OpenRouter-compatible messages.
Args:
messages: List of browser_use messages
Returns:
List of OpenRouter-compatible messages (identical to OpenAI format)
"""
# OpenRouter uses the same message format as OpenAI
return OpenAIMessageSerializer.serialize_messages(messages)
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/llm/openrouter/serializer.py",
"license": "MIT License",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
browser-use/browser-use:browser_use/llm/schema.py | """
Utilities for creating optimized Pydantic schemas for LLM usage.
"""
from typing import Any
from pydantic import BaseModel
class SchemaOptimizer:
@staticmethod
def create_optimized_json_schema(
model: type[BaseModel],
*,
remove_min_items: bool = False,
remove_defaults: bool = False,
) -> dict[str, Any]:
"""
Create the most optimized schema by flattening all $ref/$defs while preserving
FULL descriptions and ALL action definitions. Also ensures OpenAI strict mode compatibility.
Args:
model: The Pydantic model to optimize
remove_min_items: If True, remove minItems from the schema
remove_defaults: If True, remove default values from the schema
Returns:
Optimized schema with all $refs resolved and strict mode compatibility
"""
# Generate original schema
original_schema = model.model_json_schema()
# Extract $defs for reference resolution, then flatten everything
defs_lookup = original_schema.get('$defs', {})
# Create optimized schema with flattening
# Pass flags to optimize_schema via closure
def optimize_schema(obj: Any, defs_lookup: dict[str, Any] | None = None, *, in_properties: bool = False) -> Any:
"""Apply all optimization techniques including flattening all $ref/$defs"""
if isinstance(obj, dict):
optimized: dict[str, Any] = {}
flattened_ref: dict[str, Any] | None = None
# Skip unnecessary fields AND $defs (we'll inline everything)
skip_fields = ['additionalProperties', '$defs']
for key, value in obj.items():
if key in skip_fields:
continue
# Skip metadata "title" unless we're iterating inside an actual `properties` map
if key == 'title' and not in_properties:
continue
# Preserve FULL descriptions without truncation, skip empty ones
elif key == 'description':
if value: # Only include non-empty descriptions
optimized[key] = value
# Handle type field - must recursively process in case value contains $ref
elif key == 'type':
optimized[key] = value if not isinstance(value, (dict, list)) else optimize_schema(value, defs_lookup)
# FLATTEN: Resolve $ref by inlining the actual definition
elif key == '$ref' and defs_lookup:
ref_path = value.split('/')[-1] # Get the definition name from "#/$defs/SomeName"
if ref_path in defs_lookup:
# Get the referenced definition and flatten it
referenced_def = defs_lookup[ref_path]
flattened_ref = optimize_schema(referenced_def, defs_lookup)
# Skip minItems/min_items and default if requested (check BEFORE processing)
elif key in ('minItems', 'min_items') and remove_min_items:
continue # Skip minItems/min_items
elif key == 'default' and remove_defaults:
continue # Skip default values
# Keep all anyOf structures (action unions) and resolve any $refs within
elif key == 'anyOf' and isinstance(value, list):
optimized[key] = [optimize_schema(item, defs_lookup) for item in value]
# Recursively optimize nested structures
elif key in ['properties', 'items']:
optimized[key] = optimize_schema(
value,
defs_lookup,
in_properties=(key == 'properties'),
)
# Keep essential validation fields
elif key in [
'type',
'required',
'minimum',
'maximum',
'minItems',
'min_items',
'maxItems',
'pattern',
'default',
]:
optimized[key] = value if not isinstance(value, (dict, list)) else optimize_schema(value, defs_lookup)
# Recursively process all other fields
else:
optimized[key] = optimize_schema(value, defs_lookup) if isinstance(value, (dict, list)) else value
# If we have a flattened reference, merge it with the optimized properties
if flattened_ref is not None and isinstance(flattened_ref, dict):
# Start with the flattened reference as the base
result = flattened_ref.copy()
# Merge in any sibling properties that were processed
for key, value in optimized.items():
# Preserve descriptions from the original object if they exist
if key == 'description' and 'description' not in result:
result[key] = value
elif key != 'description': # Don't overwrite description from flattened ref
result[key] = value
return result
else:
# No $ref, just return the optimized object
# CRITICAL: Add additionalProperties: false to ALL objects for OpenAI strict mode
if optimized.get('type') == 'object':
optimized['additionalProperties'] = False
return optimized
elif isinstance(obj, list):
return [optimize_schema(item, defs_lookup, in_properties=in_properties) for item in obj]
return obj
optimized_result = optimize_schema(original_schema, defs_lookup)
# Ensure we have a dictionary (should always be the case for schema root)
if not isinstance(optimized_result, dict):
raise ValueError('Optimized schema result is not a dictionary')
optimized_schema: dict[str, Any] = optimized_result
# Additional pass to ensure ALL objects have additionalProperties: false
def ensure_additional_properties_false(obj: Any) -> None:
"""Ensure all objects have additionalProperties: false"""
if isinstance(obj, dict):
# If it's an object type, ensure additionalProperties is false
if obj.get('type') == 'object':
obj['additionalProperties'] = False
# Recursively apply to all values
for value in obj.values():
if isinstance(value, (dict, list)):
ensure_additional_properties_false(value)
elif isinstance(obj, list):
for item in obj:
if isinstance(item, (dict, list)):
ensure_additional_properties_false(item)
ensure_additional_properties_false(optimized_schema)
SchemaOptimizer._make_strict_compatible(optimized_schema)
# Final pass to remove minItems/min_items and default values if requested
if remove_min_items or remove_defaults:
def remove_forbidden_fields(obj: Any) -> None:
"""Recursively remove minItems/min_items and default values"""
if isinstance(obj, dict):
# Remove forbidden keys
if remove_min_items:
obj.pop('minItems', None)
obj.pop('min_items', None)
if remove_defaults:
obj.pop('default', None)
# Recursively process all values
for value in obj.values():
if isinstance(value, (dict, list)):
remove_forbidden_fields(value)
elif isinstance(obj, list):
for item in obj:
if isinstance(item, (dict, list)):
remove_forbidden_fields(item)
remove_forbidden_fields(optimized_schema)
return optimized_schema
@staticmethod
def _make_strict_compatible(schema: dict[str, Any] | list[Any]) -> None:
"""Ensure all properties are required for OpenAI strict mode"""
if isinstance(schema, dict):
# First recursively apply to nested objects
for key, value in schema.items():
if isinstance(value, (dict, list)) and key != 'required':
SchemaOptimizer._make_strict_compatible(value)
# Then update required for this level
if 'properties' in schema and 'type' in schema and schema['type'] == 'object':
# Add all properties to required array
all_props = list(schema['properties'].keys())
schema['required'] = all_props # Set all properties as required
elif isinstance(schema, list):
for item in schema:
SchemaOptimizer._make_strict_compatible(item)
@staticmethod
def create_gemini_optimized_schema(model: type[BaseModel]) -> dict[str, Any]:
"""
Create Gemini-optimized schema, preserving explicit `required` arrays so Gemini
respects mandatory fields defined by the caller.
Args:
model: The Pydantic model to optimize
Returns:
Optimized schema suitable for Gemini structured output
"""
return SchemaOptimizer.create_optimized_json_schema(model)
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/llm/schema.py",
"license": "MIT License",
"lines": 178,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/llm/tests/test_anthropic_cache.py | import logging
from typing import cast
from browser_use.agent.service import Agent
from browser_use.llm.anthropic.chat import ChatAnthropic
from browser_use.llm.anthropic.serializer import AnthropicMessageSerializer, NonSystemMessage
from browser_use.llm.messages import (
AssistantMessage,
BaseMessage,
ContentPartImageParam,
ContentPartTextParam,
Function,
ImageURL,
SystemMessage,
ToolCall,
UserMessage,
)
logger = logging.getLogger(__name__)
class TestAnthropicCache:
"""Comprehensive test for Anthropic cache serialization."""
def test_cache_basic_functionality(self):
"""Test basic cache functionality for all message types."""
# Test cache with different message types
messages: list[BaseMessage] = [
SystemMessage(content='System message!', cache=True),
UserMessage(content='User message!', cache=True),
AssistantMessage(content='Assistant message!', cache=False),
]
anthropic_messages, system_message = AnthropicMessageSerializer.serialize_messages(messages)
assert len(anthropic_messages) == 2
assert isinstance(system_message, list)
assert isinstance(anthropic_messages[0]['content'], list)
assert isinstance(anthropic_messages[1]['content'], str)
# Test cache with assistant message
agent_messages: list[BaseMessage] = [
SystemMessage(content='System message!'),
UserMessage(content='User message!'),
AssistantMessage(content='Assistant message!', cache=True),
]
anthropic_messages, system_message = AnthropicMessageSerializer.serialize_messages(agent_messages)
assert isinstance(system_message, str)
assert isinstance(anthropic_messages[0]['content'], str)
assert isinstance(anthropic_messages[1]['content'], list)
def test_cache_with_tool_calls(self):
"""Test cache functionality with tool calls."""
tool_call = ToolCall(id='test_id', function=Function(name='test_function', arguments='{"arg": "value"}'))
# Assistant with tool calls and cache
assistant_with_tools = AssistantMessage(content='Assistant with tools', tool_calls=[tool_call], cache=True)
messages, _ = AnthropicMessageSerializer.serialize_messages([assistant_with_tools])
assert len(messages) == 1
assert isinstance(messages[0]['content'], list)
# Should have both text and tool_use blocks
assert len(messages[0]['content']) >= 2
def test_cache_with_images(self):
"""Test cache functionality with image content."""
user_with_image = UserMessage(
content=[
ContentPartTextParam(text='Here is an image:', type='text'),
ContentPartImageParam(image_url=ImageURL(url='https://example.com/image.jpg'), type='image_url'),
],
cache=True,
)
messages, _ = AnthropicMessageSerializer.serialize_messages([user_with_image])
assert len(messages) == 1
assert isinstance(messages[0]['content'], list)
assert len(messages[0]['content']) == 2
def test_cache_with_base64_images(self):
"""Test cache functionality with base64 images."""
base64_url = 'data:image/jpeg;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg=='
user_with_base64 = UserMessage(
content=[
ContentPartTextParam(text='Base64 image:', type='text'),
ContentPartImageParam(image_url=ImageURL(url=base64_url), type='image_url'),
],
cache=True,
)
messages, _ = AnthropicMessageSerializer.serialize_messages([user_with_base64])
assert len(messages) == 1
assert isinstance(messages[0]['content'], list)
def test_cache_content_types(self):
"""Test different content types with cache."""
# String content with cache should become list
user_string_cached = UserMessage(content='String message', cache=True)
messages, _ = AnthropicMessageSerializer.serialize_messages([user_string_cached])
assert isinstance(messages[0]['content'], list)
# String content without cache should remain string
user_string_no_cache = UserMessage(content='String message', cache=False)
messages, _ = AnthropicMessageSerializer.serialize_messages([user_string_no_cache])
assert isinstance(messages[0]['content'], str)
# List content maintains list format regardless of cache
user_list_cached = UserMessage(content=[ContentPartTextParam(text='List message', type='text')], cache=True)
messages, _ = AnthropicMessageSerializer.serialize_messages([user_list_cached])
assert isinstance(messages[0]['content'], list)
user_list_no_cache = UserMessage(content=[ContentPartTextParam(text='List message', type='text')], cache=False)
messages, _ = AnthropicMessageSerializer.serialize_messages([user_list_no_cache])
assert isinstance(messages[0]['content'], list)
def test_assistant_cache_empty_content(self):
"""Test AssistantMessage with empty content and cache."""
# With cache
assistant_empty_cached = AssistantMessage(content=None, cache=True)
messages, _ = AnthropicMessageSerializer.serialize_messages([assistant_empty_cached])
assert len(messages) == 1
assert isinstance(messages[0]['content'], list)
# Without cache
assistant_empty_no_cache = AssistantMessage(content=None, cache=False)
messages, _ = AnthropicMessageSerializer.serialize_messages([assistant_empty_no_cache])
assert len(messages) == 1
assert isinstance(messages[0]['content'], str)
def test_mixed_cache_scenarios(self):
"""Test various combinations of cached and non-cached messages."""
messages_list: list[BaseMessage] = [
SystemMessage(content='System with cache', cache=True),
UserMessage(content='User with cache', cache=True),
AssistantMessage(content='Assistant without cache', cache=False),
UserMessage(content='User without cache', cache=False),
AssistantMessage(content='Assistant with cache', cache=True),
]
serialized_messages, system_message = AnthropicMessageSerializer.serialize_messages(messages_list)
# Check system message is cached (becomes list)
assert isinstance(system_message, list)
# Check serialized messages
assert len(serialized_messages) == 4
# User with cache should be string (cache was cleaned to False by _clean_cache_messages)
# Only the last message with cache=True remains cached
assert isinstance(serialized_messages[0]['content'], str)
# Assistant without cache should be string
assert isinstance(serialized_messages[1]['content'], str)
# User without cache should be string
assert isinstance(serialized_messages[2]['content'], str)
# Assistant with cache should be list (this is the last cached message)
assert isinstance(serialized_messages[3]['content'], list)
def test_system_message_cache_behavior(self):
"""Test SystemMessage specific cache behavior."""
# With cache
system_cached = SystemMessage(content='System message with cache', cache=True)
result = AnthropicMessageSerializer.serialize(system_cached)
assert isinstance(result, SystemMessage)
# Test serialization to string format
serialized_content = AnthropicMessageSerializer._serialize_content_to_str(result.content, use_cache=True)
assert isinstance(serialized_content, list)
# Without cache
system_no_cache = SystemMessage(content='System message without cache', cache=False)
result = AnthropicMessageSerializer.serialize(system_no_cache)
assert isinstance(result, SystemMessage)
serialized_content = AnthropicMessageSerializer._serialize_content_to_str(result.content, use_cache=False)
assert isinstance(serialized_content, str)
def test_agent_messages_integration(self):
"""Test integration with actual agent messages."""
agent = Agent(task='Hello, world!', llm=ChatAnthropic(''))
messages = agent.message_manager.get_messages()
anthropic_messages, system_message = AnthropicMessageSerializer.serialize_messages(messages)
# System message should be properly handled
assert system_message is not None
def test_cache_cleaning_last_message_only(self):
"""Test that only the last cache=True message remains cached."""
# Create multiple messages with cache=True
messages_list: list[BaseMessage] = [
UserMessage(content='First user message', cache=True),
AssistantMessage(content='First assistant message', cache=True),
UserMessage(content='Second user message', cache=True),
AssistantMessage(content='Second assistant message', cache=False),
UserMessage(content='Third user message', cache=True), # This should be the only one cached
]
# Test the cleaning method directly (only accepts non-system messages)
normal_messages = cast(list[NonSystemMessage], [msg for msg in messages_list if not isinstance(msg, SystemMessage)])
cleaned_messages = AnthropicMessageSerializer._clean_cache_messages(normal_messages)
# Verify only the last cache=True message remains cached
assert not cleaned_messages[0].cache # First user message should be uncached
assert not cleaned_messages[1].cache # First assistant message should be uncached
assert not cleaned_messages[2].cache # Second user message should be uncached
assert not cleaned_messages[3].cache # Second assistant message was already uncached
assert cleaned_messages[4].cache # Third user message should remain cached
# Test through serialize_messages
serialized_messages, system_message = AnthropicMessageSerializer.serialize_messages(messages_list)
# Count how many messages have list content (indicating caching)
cached_content_count = sum(1 for msg in serialized_messages if isinstance(msg['content'], list))
# Only one message should have cached content
assert cached_content_count == 1
# The last message should be the cached one
assert isinstance(serialized_messages[-1]['content'], list)
def test_cache_cleaning_with_system_message(self):
"""Test that system messages are not affected by cache cleaning logic."""
messages_list: list[BaseMessage] = [
SystemMessage(content='System message', cache=True), # System messages are handled separately
UserMessage(content='First user message', cache=True),
AssistantMessage(content='Assistant message', cache=True), # This should be the only normal message cached
]
# Test through serialize_messages to see the full integration
serialized_messages, system_message = AnthropicMessageSerializer.serialize_messages(messages_list)
# System message should be cached
assert isinstance(system_message, list)
# Only one normal message should have cached content (the last one)
cached_content_count = sum(1 for msg in serialized_messages if isinstance(msg['content'], list))
assert cached_content_count == 1
# The last message should be the cached one
assert isinstance(serialized_messages[-1]['content'], list)
def test_cache_cleaning_no_cached_messages(self):
"""Test that messages without cache=True are not affected."""
normal_messages_list = [
UserMessage(content='User message 1', cache=False),
AssistantMessage(content='Assistant message 1', cache=False),
UserMessage(content='User message 2', cache=False),
]
cleaned_messages = AnthropicMessageSerializer._clean_cache_messages(normal_messages_list)
# All messages should remain uncached
for msg in cleaned_messages:
assert not msg.cache
def test_max_4_cache_blocks(self):
"""Test that the max number of cache blocks is 4."""
agent = Agent(task='Hello, world!', llm=ChatAnthropic(''))
messages = agent.message_manager.get_messages()
anthropic_messages, system_message = AnthropicMessageSerializer.serialize_messages(messages)
logger.info(anthropic_messages)
logger.info(system_message)
def test_cache_only_last_block_in_message(self):
"""Test that only the LAST block in a message gets cache_control when cache=True."""
# Test UserMessage with multiple text parts
user_msg = UserMessage(
content=[
ContentPartTextParam(text='Part 1', type='text'),
ContentPartTextParam(text='Part 2', type='text'),
ContentPartTextParam(text='Part 3', type='text'),
],
cache=True,
)
serialized = AnthropicMessageSerializer.serialize(user_msg)
assert isinstance(serialized['content'], list)
content_blocks = serialized['content']
# Count blocks with cache_control
# Note: content_blocks are dicts at runtime despite type annotations
cache_count = sum(1 for block in content_blocks if block.get('cache_control') is not None) # type: ignore[attr-defined]
assert cache_count == 1, f'Expected 1 cache_control block, got {cache_count}'
# Verify it's the last block
assert content_blocks[-1].get('cache_control') is not None # type: ignore[attr-defined]
assert content_blocks[0].get('cache_control') is None # type: ignore[attr-defined]
assert content_blocks[1].get('cache_control') is None # type: ignore[attr-defined]
def test_cache_only_last_tool_call(self):
"""Test that only the LAST tool_use block gets cache_control."""
tool_calls = [
ToolCall(id='id1', function=Function(name='func1', arguments='{"arg": "1"}')),
ToolCall(id='id2', function=Function(name='func2', arguments='{"arg": "2"}')),
ToolCall(id='id3', function=Function(name='func3', arguments='{"arg": "3"}')),
]
assistant_msg = AssistantMessage(content=None, tool_calls=tool_calls, cache=True)
serialized = AnthropicMessageSerializer.serialize(assistant_msg)
assert isinstance(serialized['content'], list)
content_blocks = serialized['content']
# Count tool_use blocks with cache_control
# Note: content_blocks are dicts at runtime despite type annotations
cache_count = sum(1 for block in content_blocks if block.get('cache_control') is not None) # type: ignore[attr-defined]
assert cache_count == 1, f'Expected 1 cache_control block, got {cache_count}'
# Verify it's the last tool_use block
assert content_blocks[-1].get('cache_control') is not None # type: ignore[attr-defined]
assert content_blocks[0].get('cache_control') is None # type: ignore[attr-defined]
assert content_blocks[1].get('cache_control') is None # type: ignore[attr-defined]
def test_cache_assistant_with_content_and_tools(self):
"""Test AssistantMessage with both content and tool calls - only last tool gets cache."""
tool_call = ToolCall(id='test_id', function=Function(name='test_function', arguments='{"arg": "value"}'))
assistant_msg = AssistantMessage(
content=[
ContentPartTextParam(text='Text part 1', type='text'),
ContentPartTextParam(text='Text part 2', type='text'),
],
tool_calls=[tool_call],
cache=True,
)
serialized = AnthropicMessageSerializer.serialize(assistant_msg)
assert isinstance(serialized['content'], list)
content_blocks = serialized['content']
# Should have 2 text blocks + 1 tool_use block = 3 blocks total
assert len(content_blocks) == 3
# Only the last block (tool_use) should have cache_control
# Note: content_blocks are dicts at runtime despite type annotations
cache_count = sum(1 for block in content_blocks if block.get('cache_control') is not None) # type: ignore[attr-defined]
assert cache_count == 1, f'Expected 1 cache_control block, got {cache_count}'
assert content_blocks[-1].get('cache_control') is not None # type: ignore[attr-defined] # Last tool_use block
assert content_blocks[0].get('cache_control') is None # type: ignore[attr-defined] # First text block
assert content_blocks[1].get('cache_control') is None # type: ignore[attr-defined] # Second text block
if __name__ == '__main__':
test_instance = TestAnthropicCache()
test_instance.test_cache_basic_functionality()
test_instance.test_cache_with_tool_calls()
test_instance.test_cache_with_images()
test_instance.test_cache_with_base64_images()
test_instance.test_cache_content_types()
test_instance.test_assistant_cache_empty_content()
test_instance.test_mixed_cache_scenarios()
test_instance.test_system_message_cache_behavior()
test_instance.test_agent_messages_integration()
test_instance.test_cache_cleaning_last_message_only()
test_instance.test_cache_cleaning_with_system_message()
test_instance.test_cache_cleaning_no_cached_messages()
test_instance.test_max_4_cache_blocks()
test_instance.test_cache_only_last_block_in_message()
test_instance.test_cache_only_last_tool_call()
test_instance.test_cache_assistant_with_content_and_tools()
print('All cache tests passed!')
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/llm/tests/test_anthropic_cache.py",
"license": "MIT License",
"lines": 296,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:browser_use/llm/tests/test_chat_models.py | import os
import pytest
from pydantic import BaseModel
from browser_use.llm import ChatAnthropic, ChatGoogle, ChatGroq, ChatOpenAI, ChatOpenRouter
from browser_use.llm.messages import ContentPartTextParam
# Optional OCI import
try:
from examples.models.oci_models import xai_llm
OCI_MODELS_AVAILABLE = True
except ImportError:
xai_llm = None
OCI_MODELS_AVAILABLE = False
class CapitalResponse(BaseModel):
"""Structured response for capital question"""
country: str
capital: str
class TestChatModels:
from browser_use.llm.messages import (
AssistantMessage,
BaseMessage,
SystemMessage,
UserMessage,
)
"""Test suite for all chat model implementations"""
# Test Constants
SYSTEM_MESSAGE = SystemMessage(content=[ContentPartTextParam(text='You are a helpful assistant.', type='text')])
FRANCE_QUESTION = UserMessage(content='What is the capital of France? Answer in one word.')
FRANCE_ANSWER = AssistantMessage(content='Paris')
GERMANY_QUESTION = UserMessage(content='What is the capital of Germany? Answer in one word.')
# Expected values
EXPECTED_GERMANY_CAPITAL = 'berlin'
EXPECTED_FRANCE_COUNTRY = 'france'
EXPECTED_FRANCE_CAPITAL = 'paris'
# Test messages for conversation
CONVERSATION_MESSAGES: list[BaseMessage] = [
SYSTEM_MESSAGE,
FRANCE_QUESTION,
FRANCE_ANSWER,
GERMANY_QUESTION,
]
# Test messages for structured output
STRUCTURED_MESSAGES: list[BaseMessage] = [UserMessage(content='What is the capital of France?')]
# OpenAI Tests
@pytest.fixture
def openrouter_chat(self):
"""Provides an initialized ChatOpenRouter client for tests."""
if not os.getenv('OPENROUTER_API_KEY'):
pytest.skip('OPENROUTER_API_KEY not set')
return ChatOpenRouter(model='openai/gpt-4o-mini', api_key=os.getenv('OPENROUTER_API_KEY'), temperature=0)
@pytest.mark.asyncio
async def test_openai_ainvoke_normal(self):
"""Test normal text response from OpenAI"""
# Skip if no API key
if not os.getenv('OPENAI_API_KEY'):
pytest.skip('OPENAI_API_KEY not set')
chat = ChatOpenAI(model='gpt-4o-mini', temperature=0)
response = await chat.ainvoke(self.CONVERSATION_MESSAGES)
completion = response.completion
assert isinstance(completion, str)
assert self.EXPECTED_GERMANY_CAPITAL in completion.lower()
@pytest.mark.asyncio
async def test_openai_ainvoke_structured(self):
"""Test structured output from OpenAI"""
# Skip if no API key
if not os.getenv('OPENAI_API_KEY'):
pytest.skip('OPENAI_API_KEY not set')
chat = ChatOpenAI(model='gpt-4o-mini', temperature=0)
response = await chat.ainvoke(self.STRUCTURED_MESSAGES, output_format=CapitalResponse)
completion = response.completion
assert isinstance(completion, CapitalResponse)
assert completion.country.lower() == self.EXPECTED_FRANCE_COUNTRY
assert completion.capital.lower() == self.EXPECTED_FRANCE_CAPITAL
# Anthropic Tests
@pytest.mark.asyncio
async def test_anthropic_ainvoke_normal(self):
"""Test normal text response from Anthropic"""
# Skip if no API key
if not os.getenv('ANTHROPIC_API_KEY'):
pytest.skip('ANTHROPIC_API_KEY not set')
chat = ChatAnthropic(model='claude-3-5-haiku-latest', max_tokens=100, temperature=0)
response = await chat.ainvoke(self.CONVERSATION_MESSAGES)
completion = response.completion
assert isinstance(completion, str)
assert self.EXPECTED_GERMANY_CAPITAL in completion.lower()
@pytest.mark.asyncio
async def test_anthropic_ainvoke_structured(self):
"""Test structured output from Anthropic"""
# Skip if no API key
if not os.getenv('ANTHROPIC_API_KEY'):
pytest.skip('ANTHROPIC_API_KEY not set')
chat = ChatAnthropic(model='claude-3-5-haiku-latest', max_tokens=100, temperature=0)
response = await chat.ainvoke(self.STRUCTURED_MESSAGES, output_format=CapitalResponse)
completion = response.completion
assert isinstance(completion, CapitalResponse)
assert completion.country.lower() == self.EXPECTED_FRANCE_COUNTRY
assert completion.capital.lower() == self.EXPECTED_FRANCE_CAPITAL
# Google Gemini Tests
@pytest.mark.asyncio
async def test_google_ainvoke_normal(self):
"""Test normal text response from Google Gemini"""
# Skip if no API key
if not os.getenv('GOOGLE_API_KEY'):
pytest.skip('GOOGLE_API_KEY not set')
chat = ChatGoogle(model='gemini-2.0-flash', api_key=os.getenv('GOOGLE_API_KEY'), temperature=0)
response = await chat.ainvoke(self.CONVERSATION_MESSAGES)
completion = response.completion
assert isinstance(completion, str)
assert self.EXPECTED_GERMANY_CAPITAL in completion.lower()
@pytest.mark.asyncio
async def test_google_ainvoke_structured(self):
"""Test structured output from Google Gemini"""
# Skip if no API key
if not os.getenv('GOOGLE_API_KEY'):
pytest.skip('GOOGLE_API_KEY not set')
chat = ChatGoogle(model='gemini-2.0-flash', api_key=os.getenv('GOOGLE_API_KEY'), temperature=0)
response = await chat.ainvoke(self.STRUCTURED_MESSAGES, output_format=CapitalResponse)
completion = response.completion
assert isinstance(completion, CapitalResponse)
assert completion.country.lower() == self.EXPECTED_FRANCE_COUNTRY
assert completion.capital.lower() == self.EXPECTED_FRANCE_CAPITAL
# Google Gemini with Vertex AI Tests
@pytest.mark.asyncio
async def test_google_vertex_ainvoke_normal(self):
"""Test normal text response from Google Gemini via Vertex AI"""
# Skip if no project ID
if not os.getenv('GOOGLE_CLOUD_PROJECT'):
pytest.skip('GOOGLE_CLOUD_PROJECT not set')
chat = ChatGoogle(
model='gemini-2.0-flash',
vertexai=True,
project=os.getenv('GOOGLE_CLOUD_PROJECT'),
location='us-central1',
temperature=0,
)
response = await chat.ainvoke(self.CONVERSATION_MESSAGES)
completion = response.completion
assert isinstance(completion, str)
assert self.EXPECTED_GERMANY_CAPITAL in completion.lower()
@pytest.mark.asyncio
async def test_google_vertex_ainvoke_structured(self):
"""Test structured output from Google Gemini via Vertex AI"""
# Skip if no project ID
if not os.getenv('GOOGLE_CLOUD_PROJECT'):
pytest.skip('GOOGLE_CLOUD_PROJECT not set')
chat = ChatGoogle(
model='gemini-2.0-flash',
vertexai=True,
project=os.getenv('GOOGLE_CLOUD_PROJECT'),
location='us-central1',
temperature=0,
)
response = await chat.ainvoke(self.STRUCTURED_MESSAGES, output_format=CapitalResponse)
completion = response.completion
assert isinstance(completion, CapitalResponse)
assert completion.country.lower() == self.EXPECTED_FRANCE_COUNTRY
assert completion.capital.lower() == self.EXPECTED_FRANCE_CAPITAL
# Groq Tests
@pytest.mark.asyncio
async def test_groq_ainvoke_normal(self):
"""Test normal text response from Groq"""
# Skip if no API key
if not os.getenv('GROQ_API_KEY'):
pytest.skip('GROQ_API_KEY not set')
chat = ChatGroq(model='meta-llama/llama-4-maverick-17b-128e-instruct', temperature=0)
response = await chat.ainvoke(self.CONVERSATION_MESSAGES)
completion = response.completion
assert isinstance(completion, str)
assert self.EXPECTED_GERMANY_CAPITAL in completion.lower()
@pytest.mark.asyncio
async def test_groq_ainvoke_structured(self):
"""Test structured output from Groq"""
# Skip if no API key
if not os.getenv('GROQ_API_KEY'):
pytest.skip('GROQ_API_KEY not set')
chat = ChatGroq(model='meta-llama/llama-4-maverick-17b-128e-instruct', temperature=0)
response = await chat.ainvoke(self.STRUCTURED_MESSAGES, output_format=CapitalResponse)
completion = response.completion
assert isinstance(completion, CapitalResponse)
assert completion.country.lower() == self.EXPECTED_FRANCE_COUNTRY
assert completion.capital.lower() == self.EXPECTED_FRANCE_CAPITAL
# OpenRouter Tests
@pytest.mark.asyncio
async def test_openrouter_ainvoke_normal(self):
"""Test normal text response from OpenRouter"""
# Skip if no API key
if not os.getenv('OPENROUTER_API_KEY'):
pytest.skip('OPENROUTER_API_KEY not set')
chat = ChatOpenRouter(model='openai/gpt-4o-mini', api_key=os.getenv('OPENROUTER_API_KEY'), temperature=0)
response = await chat.ainvoke(self.CONVERSATION_MESSAGES)
completion = response.completion
assert isinstance(completion, str)
assert self.EXPECTED_GERMANY_CAPITAL in completion.lower()
@pytest.mark.asyncio
async def test_openrouter_ainvoke_structured(self):
"""Test structured output from OpenRouter"""
# Skip if no API key
if not os.getenv('OPENROUTER_API_KEY'):
pytest.skip('OPENROUTER_API_KEY not set')
chat = ChatOpenRouter(model='openai/gpt-4o-mini', api_key=os.getenv('OPENROUTER_API_KEY'), temperature=0)
response = await chat.ainvoke(self.STRUCTURED_MESSAGES, output_format=CapitalResponse)
completion = response.completion
assert isinstance(completion, CapitalResponse)
assert completion.country.lower() == self.EXPECTED_FRANCE_COUNTRY
assert completion.capital.lower() == self.EXPECTED_FRANCE_CAPITAL
# OCI Raw Tests
@pytest.fixture
def oci_raw_chat(self):
"""Provides an initialized ChatOCIRaw client for tests."""
# Skip if OCI models not available
if not OCI_MODELS_AVAILABLE:
pytest.skip('OCI models not available - install with pip install "browser-use[oci]"')
# Skip if OCI credentials not available - check for config file existence
try:
import oci
oci.config.from_file('~/.oci/config', 'DEFAULT')
except Exception:
pytest.skip('OCI credentials not available')
# Skip if using placeholder config
if xai_llm and hasattr(xai_llm, 'compartment_id') and 'example' in xai_llm.compartment_id.lower():
pytest.skip('OCI model using placeholder configuration - set real credentials')
return xai_llm # xai or cohere
@pytest.mark.asyncio
async def test_oci_raw_ainvoke_normal(self, oci_raw_chat):
"""Test normal text response from OCI Raw"""
response = await oci_raw_chat.ainvoke(self.CONVERSATION_MESSAGES)
completion = response.completion
assert isinstance(completion, str)
assert self.EXPECTED_GERMANY_CAPITAL in completion.lower()
@pytest.mark.asyncio
async def test_oci_raw_ainvoke_structured(self, oci_raw_chat):
"""Test structured output from OCI Raw"""
response = await oci_raw_chat.ainvoke(self.STRUCTURED_MESSAGES, output_format=CapitalResponse)
completion = response.completion
assert isinstance(completion, CapitalResponse)
assert completion.country.lower() == self.EXPECTED_FRANCE_COUNTRY
assert completion.capital.lower() == self.EXPECTED_FRANCE_CAPITAL
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/llm/tests/test_chat_models.py",
"license": "MIT License",
"lines": 236,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:browser_use/llm/tests/test_gemini_image.py | import asyncio
import base64
import io
import random
from PIL import Image, ImageDraw, ImageFont
from browser_use.llm.google.chat import ChatGoogle
from browser_use.llm.google.serializer import GoogleMessageSerializer
from browser_use.llm.messages import (
BaseMessage,
ContentPartImageParam,
ContentPartTextParam,
ImageURL,
SystemMessage,
UserMessage,
)
def create_random_text_image(text: str = 'hello world', width: int = 4000, height: int = 4000) -> str:
# Create image with random background color
bg_color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
image = Image.new('RGB', (width, height), bg_color)
draw = ImageDraw.Draw(image)
# Try to use a default font, fallback to default if not available
try:
font = ImageFont.truetype('arial.ttf', 24)
except Exception:
font = ImageFont.load_default()
# Calculate text position to center it
bbox = draw.textbbox((0, 0), text, font=font)
text_width = bbox[2] - bbox[0]
text_height = bbox[3] - bbox[1]
x = (width - text_width) // 2
y = (height - text_height) // 2
# Draw text with contrasting color
text_color = (255 - bg_color[0], 255 - bg_color[1], 255 - bg_color[2])
draw.text((x, y), text, fill=text_color, font=font)
# Convert to base64
buffer = io.BytesIO()
image.save(buffer, format='JPEG')
img_data = base64.b64encode(buffer.getvalue()).decode()
return f'data:image/jpeg;base64,{img_data}'
async def test_gemini_image_vision():
"""Test Gemini's ability to see and describe images."""
# Create the LLM
llm = ChatGoogle(model='gemini-2.0-flash-exp')
# Create a random image with text
image_data_url = create_random_text_image('Hello Gemini! Can you see this text?')
# Create messages with image
messages: list[BaseMessage] = [
SystemMessage(content='You are a helpful assistant that can see and describe images.'),
UserMessage(
content=[
ContentPartTextParam(text='What do you see in this image? Please describe the text and any visual elements.'),
ContentPartImageParam(image_url=ImageURL(url=image_data_url)),
]
),
]
# Serialize messages for Google format
serializer = GoogleMessageSerializer()
formatted_messages, system_message = serializer.serialize_messages(messages)
print('Testing Gemini image vision...')
print(f'System message: {system_message}')
# Make the API call
try:
response = await llm.ainvoke(messages)
print('\n=== Gemini Response ===')
print(response.completion)
print(response.usage)
print('=======================')
except Exception as e:
print(f'Error calling Gemini: {e}')
print(f'Error type: {type(e)}')
if __name__ == '__main__':
asyncio.run(test_gemini_image_vision())
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/llm/tests/test_gemini_image.py",
"license": "MIT License",
"lines": 72,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:browser_use/llm/tests/test_groq_loop.py | import asyncio
from browser_use.llm import ContentText
from browser_use.llm.groq.chat import ChatGroq
from browser_use.llm.messages import SystemMessage, UserMessage
llm = ChatGroq(
model='meta-llama/llama-4-maverick-17b-128e-instruct',
temperature=0.5,
)
# llm = ChatOpenAI(model='gpt-4.1-mini')
async def main():
from pydantic import BaseModel
from browser_use.tokens.service import TokenCost
tk = TokenCost().register_llm(llm)
class Output(BaseModel):
reasoning: str
answer: str
message = [
SystemMessage(content='You are a helpful assistant that can answer questions and help with tasks.'),
UserMessage(
content=[
ContentText(
text=r"Why is the sky blue? write exactly this into reasoning make sure to output ' with exactly like in the input : "
),
ContentText(
text="""
The user's request is to find the lowest priced women's plus size one piece swimsuit in color black with a customer rating of at least 5 on Kohls.com. I am currently on the homepage of Kohls. The page has a search bar and various category links. To begin, I need to navigate to the women's section and search for swimsuits. I will start by clicking on the 'Women' category link."""
),
]
),
]
for i in range(10):
print('-' * 50)
print(f'start loop {i}')
response = await llm.ainvoke(message, output_format=Output)
completion = response.completion
print(f'start reasoning: {completion.reasoning}')
print(f'answer: {completion.answer}')
print('-' * 50)
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/llm/tests/test_groq_loop.py",
"license": "MIT License",
"lines": 40,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:browser_use/llm/tests/test_single_step.py | import logging
import os
import tempfile
import pytest
from browser_use.agent.prompts import AgentMessagePrompt
from browser_use.agent.service import Agent
from browser_use.browser.views import BrowserStateSummary, TabInfo
from browser_use.dom.views import DOMSelectorMap, EnhancedDOMTreeNode, NodeType, SerializedDOMState, SimplifiedNode
from browser_use.filesystem.file_system import FileSystem
from browser_use.llm.anthropic.chat import ChatAnthropic
from browser_use.llm.azure.chat import ChatAzureOpenAI
from browser_use.llm.base import BaseChatModel
from browser_use.llm.google.chat import ChatGoogle
from browser_use.llm.groq.chat import ChatGroq
# Optional OCI import
try:
from browser_use.llm.oci_raw.chat import ChatOCIRaw
OCI_AVAILABLE = True
except ImportError:
ChatOCIRaw = None
OCI_AVAILABLE = False
from browser_use.llm.openai.chat import ChatOpenAI
# Set logging level to INFO for this module
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def _check_oci_credentials() -> bool:
"""Check if OCI credentials are available."""
if not OCI_AVAILABLE:
return False
try:
import oci
oci.config.from_file('~/.oci/config', 'DEFAULT')
return True
except Exception:
return False
def create_mock_state_message(temp_dir: str):
"""Create a mock state message with a single clickable element."""
# Create a mock DOM element with a single clickable button
mock_button = EnhancedDOMTreeNode(
node_id=1,
backend_node_id=1,
node_type=NodeType.ELEMENT_NODE,
node_name='button',
node_value='Click Me',
attributes={'id': 'test-button'},
is_scrollable=False,
is_visible=True,
absolute_position=None,
session_id=None,
target_id='ABCD1234ABCD1234ABCD1234ABCD1234ABCD1234',
frame_id=None,
content_document=None,
shadow_root_type=None,
shadow_roots=None,
parent_node=None,
children_nodes=None,
ax_node=None,
snapshot_node=None,
)
# Create selector map (keyed by backend_node_id)
selector_map: DOMSelectorMap = {mock_button.backend_node_id: mock_button}
# Create mock tab info with proper target_id
mock_tab = TabInfo(
target_id='ABCD1234ABCD1234ABCD1234ABCD1234ABCD1234',
url='https://example.com',
title='Test Page',
)
dom_state = SerializedDOMState(
_root=SimplifiedNode(
original_node=mock_button,
children=[],
should_display=True,
is_interactive=True,
),
selector_map=selector_map,
)
# Create mock browser state with required selector_map
mock_browser_state = BrowserStateSummary(
dom_state=dom_state, # Using the actual DOM element
url='https://example.com',
title='Test Page',
tabs=[mock_tab],
screenshot='', # Empty screenshot
pixels_above=0,
pixels_below=0,
)
# Create file system using the provided temp directory
mock_file_system = FileSystem(temp_dir)
# Create the agent message prompt
agent_prompt = AgentMessagePrompt(
browser_state_summary=mock_browser_state,
file_system=mock_file_system, # Now using actual FileSystem instance
agent_history_description='', # Empty history
read_state_description='', # Empty read state
task='Click the button on the page',
include_attributes=['id'],
step_info=None,
page_filtered_actions=None,
max_clickable_elements_length=40000,
sensitive_data=None,
)
# Override the clickable_elements_to_string method to return our simple element
dom_state.llm_representation = lambda include_attributes=None: '[1]<button id="test-button">Click Me</button>'
# Get the formatted message
message = agent_prompt.get_user_message(use_vision=False)
return message
# Pytest parameterized version
@pytest.mark.parametrize(
'llm_class,model_name',
[
(ChatGroq, 'meta-llama/llama-4-maverick-17b-128e-instruct'),
(ChatGoogle, 'gemini-2.0-flash-exp'),
(ChatOpenAI, 'gpt-4.1-mini'),
(ChatAnthropic, 'claude-3-5-sonnet-latest'),
(ChatAzureOpenAI, 'gpt-4.1-mini'),
pytest.param(
ChatOCIRaw,
{
'model_id': os.getenv('OCI_MODEL_ID', 'placeholder'),
'service_endpoint': os.getenv(
'OCI_SERVICE_ENDPOINT', 'https://inference.generativeai.us-chicago-1.oci.oraclecloud.com'
),
'compartment_id': os.getenv('OCI_COMPARTMENT_ID', 'placeholder'),
'provider': 'meta',
'temperature': 0.7,
'max_tokens': 800,
'frequency_penalty': 0.0,
'presence_penalty': 0.0,
'top_p': 0.9,
'auth_type': 'API_KEY',
'auth_profile': 'DEFAULT',
},
marks=pytest.mark.skipif(
not _check_oci_credentials() or not os.getenv('OCI_MODEL_ID') or not os.getenv('OCI_COMPARTMENT_ID'),
reason='OCI credentials or environment variables not available',
),
),
],
)
async def test_single_step_parametrized(llm_class, model_name):
"""Test single step with different LLM providers using pytest parametrize."""
if isinstance(model_name, dict):
# Handle ChatOCIRaw which requires keyword arguments
llm = llm_class(**model_name)
else:
llm = llm_class(model=model_name)
agent = Agent(task='Click the button on the page', llm=llm)
# Create temporary directory that will stay alive during the test
with tempfile.TemporaryDirectory() as temp_dir:
# Create mock state message
mock_message = create_mock_state_message(temp_dir)
agent.message_manager._set_message_with_type(mock_message, 'state')
messages = agent.message_manager.get_messages()
# Test with simple question
response = await llm.ainvoke(messages, agent.AgentOutput)
# Additional validation for OCI Raw
if ChatOCIRaw is not None and isinstance(llm, ChatOCIRaw):
# Verify OCI Raw generates proper Agent actions
assert response.completion.action is not None
assert len(response.completion.action) > 0
# Basic assertions to ensure response is valid
assert response.completion is not None
assert response.usage is not None
assert response.usage.total_tokens > 0
async def test_single_step():
"""Original test function that tests all models in a loop."""
# Create a list of models to test
models: list[BaseChatModel] = [
ChatGroq(model='meta-llama/llama-4-maverick-17b-128e-instruct'),
ChatGoogle(model='gemini-2.0-flash-exp'),
ChatOpenAI(model='gpt-4.1'),
ChatAnthropic(model='claude-3-5-sonnet-latest'), # Using haiku for cost efficiency
ChatAzureOpenAI(model='gpt-4o-mini'),
]
for llm in models:
print(f'\n{"=" * 60}')
print(f'Testing with model: {llm.provider} - {llm.model}')
print(f'{"=" * 60}\n')
agent = Agent(task='Click the button on the page', llm=llm)
# Create temporary directory that will stay alive during the test
with tempfile.TemporaryDirectory() as temp_dir:
# Create mock state message
mock_message = create_mock_state_message(temp_dir)
# Print the mock message content to see what it looks like
print('Mock state message:')
print(mock_message.content)
print('\n' + '=' * 50 + '\n')
agent.message_manager._set_message_with_type(mock_message, 'state')
messages = agent.message_manager.get_messages()
# Test with simple question
try:
response = await llm.ainvoke(messages, agent.AgentOutput)
logger.info(f'Response from {llm.provider}: {response.completion}')
logger.info(f'Actions: {str(response.completion.action)}')
except Exception as e:
logger.error(f'Error with {llm.provider}: {type(e).__name__}: {str(e)}')
print(f'\n{"=" * 60}\n')
if __name__ == '__main__':
import asyncio
asyncio.run(test_single_step())
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/llm/tests/test_single_step.py",
"license": "MIT License",
"lines": 200,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:browser_use/llm/views.py | from typing import Generic, TypeVar, Union
from pydantic import BaseModel
T = TypeVar('T', bound=Union[BaseModel, str])
class ChatInvokeUsage(BaseModel):
"""
Usage information for a chat model invocation.
"""
prompt_tokens: int
"""The number of tokens in the prompt (this includes the cached tokens as well. When calculating the cost, subtract the cached tokens from the prompt tokens)"""
prompt_cached_tokens: int | None
"""The number of cached tokens."""
prompt_cache_creation_tokens: int | None
"""Anthropic only: The number of tokens used to create the cache."""
prompt_image_tokens: int | None
"""Google only: The number of tokens in the image (prompt tokens is the text tokens + image tokens in that case)"""
completion_tokens: int
"""The number of tokens in the completion."""
total_tokens: int
"""The total number of tokens in the response."""
class ChatInvokeCompletion(BaseModel, Generic[T]):
"""
Response from a chat model invocation.
"""
completion: T
"""The completion of the response."""
# Thinking stuff
thinking: str | None = None
redacted_thinking: str | None = None
usage: ChatInvokeUsage | None
"""The usage of the response."""
stop_reason: str | None = None
"""The reason the model stopped generating. Common values: 'end_turn', 'max_tokens', 'stop_sequence'."""
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/llm/views.py",
"license": "MIT License",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
browser-use/browser-use:browser_use/mcp/__main__.py | """Entry point for running MCP server as a module.
Usage:
python -m browser_use.mcp
"""
import asyncio
from browser_use.mcp.server import main
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/mcp/__main__.py",
"license": "MIT License",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
browser-use/browser-use:browser_use/mcp/client.py | """MCP (Model Context Protocol) client integration for browser-use.
This module provides integration between external MCP servers and browser-use's action registry.
MCP tools are dynamically discovered and registered as browser-use actions.
Example usage:
from browser_use import Tools
from browser_use.mcp.client import MCPClient
tools = Tools()
# Connect to an MCP server
mcp_client = MCPClient(
server_name="my-server",
command="npx",
args=["@mycompany/mcp-server@latest"]
)
# Register all MCP tools as browser-use actions
await mcp_client.register_to_tools(tools)
# Now use with Agent as normal - MCP tools are available as actions
"""
import asyncio
import logging
import time
from typing import Any
from pydantic import BaseModel, ConfigDict, Field, create_model
from browser_use.agent.views import ActionResult
from browser_use.telemetry import MCPClientTelemetryEvent, ProductTelemetry
from browser_use.tools.registry.service import Registry
from browser_use.tools.service import Tools
from browser_use.utils import create_task_with_error_handling, get_browser_use_version
logger = logging.getLogger(__name__)
# Import MCP SDK
from mcp import ClientSession, StdioServerParameters, types
from mcp.client.stdio import stdio_client
MCP_AVAILABLE = True
class MCPClient:
"""Client for connecting to MCP servers and exposing their tools as browser-use actions."""
def __init__(
self,
server_name: str,
command: str,
args: list[str] | None = None,
env: dict[str, str] | None = None,
):
"""Initialize MCP client.
Args:
server_name: Name of the MCP server (for logging and identification)
command: Command to start the MCP server (e.g., "npx", "python")
args: Arguments for the command (e.g., ["@playwright/mcp@latest"])
env: Environment variables for the server process
"""
self.server_name = server_name
self.command = command
self.args = args or []
self.env = env
self.session: ClientSession | None = None
self._stdio_task = None
self._read_stream = None
self._write_stream = None
self._tools: dict[str, types.Tool] = {}
self._registered_actions: set[str] = set()
self._connected = False
self._disconnect_event = asyncio.Event()
self._telemetry = ProductTelemetry()
async def connect(self) -> None:
"""Connect to the MCP server and discover available tools."""
if self._connected:
logger.debug(f'Already connected to {self.server_name}')
return
start_time = time.time()
error_msg = None
try:
logger.info(f"🔌 Connecting to MCP server '{self.server_name}': {self.command} {' '.join(self.args)}")
# Create server parameters
server_params = StdioServerParameters(command=self.command, args=self.args, env=self.env)
# Start stdio client in background task
self._stdio_task = create_task_with_error_handling(
self._run_stdio_client(server_params), name='mcp_stdio_client', suppress_exceptions=True
)
# Wait for connection to be established
retries = 0
max_retries = 100 # 10 second timeout (increased for parallel test execution)
while not self._connected and retries < max_retries:
await asyncio.sleep(0.1)
retries += 1
if not self._connected:
error_msg = f"Failed to connect to MCP server '{self.server_name}' after {max_retries * 0.1} seconds"
raise RuntimeError(error_msg)
logger.info(f"📦 Discovered {len(self._tools)} tools from '{self.server_name}': {list(self._tools.keys())}")
except Exception as e:
error_msg = str(e)
raise
finally:
# Capture telemetry for connect action
duration = time.time() - start_time
self._telemetry.capture(
MCPClientTelemetryEvent(
server_name=self.server_name,
command=self.command,
tools_discovered=len(self._tools),
version=get_browser_use_version(),
action='connect',
duration_seconds=duration,
error_message=error_msg,
)
)
async def _run_stdio_client(self, server_params: StdioServerParameters):
"""Run the stdio client connection in a background task."""
try:
async with stdio_client(server_params) as (read_stream, write_stream):
self._read_stream = read_stream
self._write_stream = write_stream
# Create and initialize session
async with ClientSession(read_stream, write_stream) as session:
self.session = session
# Initialize the connection
await session.initialize()
# Discover available tools
tools_response = await session.list_tools()
self._tools = {tool.name: tool for tool in tools_response.tools}
# Mark as connected
self._connected = True
# Keep the connection alive until disconnect is called
await self._disconnect_event.wait()
except Exception as e:
logger.error(f'MCP server connection error: {e}')
self._connected = False
raise
finally:
self._connected = False
self.session = None
async def disconnect(self) -> None:
"""Disconnect from the MCP server."""
if not self._connected:
return
start_time = time.time()
error_msg = None
try:
logger.info(f"🔌 Disconnecting from MCP server '{self.server_name}'")
# Signal disconnect
self._connected = False
self._disconnect_event.set()
# Wait for stdio task to finish
if self._stdio_task:
try:
await asyncio.wait_for(self._stdio_task, timeout=2.0)
except TimeoutError:
logger.warning(f"Timeout waiting for MCP server '{self.server_name}' to disconnect")
self._stdio_task.cancel()
try:
await self._stdio_task
except asyncio.CancelledError:
pass
self._tools.clear()
self._registered_actions.clear()
except Exception as e:
error_msg = str(e)
logger.error(f'Error disconnecting from MCP server: {e}')
finally:
# Capture telemetry for disconnect action
duration = time.time() - start_time
self._telemetry.capture(
MCPClientTelemetryEvent(
server_name=self.server_name,
command=self.command,
tools_discovered=0, # Tools cleared on disconnect
version=get_browser_use_version(),
action='disconnect',
duration_seconds=duration,
error_message=error_msg,
)
)
self._telemetry.flush()
async def register_to_tools(
self,
tools: Tools,
tool_filter: list[str] | None = None,
prefix: str | None = None,
) -> None:
"""Register MCP tools as actions in the browser-use tools.
Args:
tools: Browser-use tools to register actions to
tool_filter: Optional list of tool names to register (None = all tools)
prefix: Optional prefix to add to action names (e.g., "playwright_")
"""
if not self._connected:
await self.connect()
registry = tools.registry
for tool_name, tool in self._tools.items():
# Skip if not in filter
if tool_filter and tool_name not in tool_filter:
continue
# Apply prefix if specified
action_name = f'{prefix}{tool_name}' if prefix else tool_name
# Skip if already registered
if action_name in self._registered_actions:
continue
# Register the tool as an action
self._register_tool_as_action(registry, action_name, tool)
self._registered_actions.add(action_name)
logger.info(f"✅ Registered {len(self._registered_actions)} MCP tools from '{self.server_name}' as browser-use actions")
def _register_tool_as_action(self, registry: Registry, action_name: str, tool: Any) -> None:
"""Register a single MCP tool as a browser-use action.
Args:
registry: Browser-use registry to register action to
action_name: Name for the registered action
tool: MCP Tool object with schema information
"""
# Parse tool parameters to create Pydantic model
param_fields = {}
if tool.inputSchema:
# MCP tools use JSON Schema for parameters
properties = tool.inputSchema.get('properties', {})
required = set(tool.inputSchema.get('required', []))
for param_name, param_schema in properties.items():
# Convert JSON Schema type to Python type
param_type = self._json_schema_to_python_type(param_schema, f'{action_name}_{param_name}')
# Determine if field is required and handle defaults
if param_name in required:
default = ... # Required field
else:
# Optional field - make type optional and handle default
param_type = param_type | None
if 'default' in param_schema:
default = param_schema['default']
else:
default = None
# Add field with description if available
field_kwargs = {}
if 'description' in param_schema:
field_kwargs['description'] = param_schema['description']
param_fields[param_name] = (param_type, Field(default, **field_kwargs))
# Create Pydantic model for the tool parameters
if param_fields:
# Create a BaseModel class with proper configuration
class ConfiguredBaseModel(BaseModel):
model_config = ConfigDict(extra='forbid', validate_by_name=True, validate_by_alias=True)
param_model = create_model(f'{action_name}_Params', __base__=ConfiguredBaseModel, **param_fields)
else:
# No parameters - create empty model
param_model = None
# Determine if this is a browser-specific tool
is_browser_tool = tool.name.startswith('browser_') or 'page' in tool.name.lower()
# Set up action filters
domains = None
# Note: page_filter has been removed since we no longer use Page objects
# Browser tools filtering would need to be done via domain filters instead
# Create async wrapper function for the MCP tool
# Need to define function with explicit parameters to satisfy registry validation
if param_model:
# Type 1: Function takes param model as first parameter
async def mcp_action_wrapper(params: param_model) -> ActionResult: # type: ignore[no-redef]
"""Wrapper function that calls the MCP tool."""
if not self.session or not self._connected:
return ActionResult(error=f"MCP server '{self.server_name}' not connected", success=False)
# Convert pydantic model to dict for MCP call
tool_params = params.model_dump(exclude_none=True)
logger.debug(f"🔧 Calling MCP tool '{tool.name}' with params: {tool_params}")
start_time = time.time()
error_msg = None
try:
# Call the MCP tool
result = await self.session.call_tool(tool.name, tool_params)
# Convert MCP result to ActionResult
extracted_content = self._format_mcp_result(result)
return ActionResult(
extracted_content=extracted_content,
long_term_memory=f"Used MCP tool '{tool.name}' from {self.server_name}",
include_extracted_content_only_once=True,
)
except Exception as e:
error_msg = f"MCP tool '{tool.name}' failed: {str(e)}"
logger.error(error_msg)
return ActionResult(error=error_msg, success=False)
finally:
# Capture telemetry for tool call
duration = time.time() - start_time
self._telemetry.capture(
MCPClientTelemetryEvent(
server_name=self.server_name,
command=self.command,
tools_discovered=len(self._tools),
version=get_browser_use_version(),
action='tool_call',
tool_name=tool.name,
duration_seconds=duration,
error_message=error_msg,
)
)
else:
# No parameters - empty function signature
async def mcp_action_wrapper() -> ActionResult: # type: ignore[no-redef]
"""Wrapper function that calls the MCP tool."""
if not self.session or not self._connected:
return ActionResult(error=f"MCP server '{self.server_name}' not connected", success=False)
logger.debug(f"🔧 Calling MCP tool '{tool.name}' with no params")
start_time = time.time()
error_msg = None
try:
# Call the MCP tool with empty params
result = await self.session.call_tool(tool.name, {})
# Convert MCP result to ActionResult
extracted_content = self._format_mcp_result(result)
return ActionResult(
extracted_content=extracted_content,
long_term_memory=f"Used MCP tool '{tool.name}' from {self.server_name}",
include_extracted_content_only_once=True,
)
except Exception as e:
error_msg = f"MCP tool '{tool.name}' failed: {str(e)}"
logger.error(error_msg)
return ActionResult(error=error_msg, success=False)
finally:
# Capture telemetry for tool call
duration = time.time() - start_time
self._telemetry.capture(
MCPClientTelemetryEvent(
server_name=self.server_name,
command=self.command,
tools_discovered=len(self._tools),
version=get_browser_use_version(),
action='tool_call',
tool_name=tool.name,
duration_seconds=duration,
error_message=error_msg,
)
)
# Set function metadata for better debugging
mcp_action_wrapper.__name__ = action_name
mcp_action_wrapper.__qualname__ = f'mcp.{self.server_name}.{action_name}'
# Register the action with browser-use
description = tool.description or f'MCP tool from {self.server_name}: {tool.name}'
# Use the registry's action decorator
registry.action(description=description, param_model=param_model, domains=domains)(mcp_action_wrapper)
logger.debug(f"✅ Registered MCP tool '{tool.name}' as action '{action_name}'")
def _format_mcp_result(self, result: Any) -> str:
"""Format MCP tool result into a string for ActionResult.
Args:
result: Raw result from MCP tool call
Returns:
Formatted string representation of the result
"""
# Handle different MCP result formats
if hasattr(result, 'content'):
# Structured content response
if isinstance(result.content, list):
# Multiple content items
parts = []
for item in result.content:
if hasattr(item, 'text'):
parts.append(item.text)
elif hasattr(item, 'type') and item.type == 'text':
parts.append(str(item))
else:
parts.append(str(item))
return '\n'.join(parts)
else:
return str(result.content)
elif isinstance(result, list):
# List of content items
parts = []
for item in result:
if hasattr(item, 'text'):
parts.append(item.text)
else:
parts.append(str(item))
return '\n'.join(parts)
else:
# Direct result or unknown format
return str(result)
def _json_schema_to_python_type(self, schema: dict, model_name: str = 'NestedModel') -> Any:
"""Convert JSON Schema type to Python type.
Args:
schema: JSON Schema definition
model_name: Name for nested models
Returns:
Python type corresponding to the schema
"""
json_type = schema.get('type', 'string')
# Basic type mapping
type_mapping = {
'string': str,
'number': float,
'integer': int,
'boolean': bool,
'array': list,
'null': type(None),
}
# Handle enums (they're still strings)
if 'enum' in schema:
return str
# Handle objects with nested properties
if json_type == 'object':
properties = schema.get('properties', {})
if properties:
# Create nested pydantic model for objects with properties
nested_fields = {}
required_fields = set(schema.get('required', []))
for prop_name, prop_schema in properties.items():
# Recursively process nested properties
prop_type = self._json_schema_to_python_type(prop_schema, f'{model_name}_{prop_name}')
# Determine if field is required and handle defaults
if prop_name in required_fields:
default = ... # Required field
else:
# Optional field - make type optional and handle default
prop_type = prop_type | None
if 'default' in prop_schema:
default = prop_schema['default']
else:
default = None
# Add field with description if available
field_kwargs = {}
if 'description' in prop_schema:
field_kwargs['description'] = prop_schema['description']
nested_fields[prop_name] = (prop_type, Field(default, **field_kwargs))
# Create a BaseModel class with proper configuration
class ConfiguredBaseModel(BaseModel):
model_config = ConfigDict(extra='forbid', validate_by_name=True, validate_by_alias=True)
try:
# Create and return nested pydantic model
return create_model(model_name, __base__=ConfiguredBaseModel, **nested_fields)
except Exception as e:
logger.error(f'Failed to create nested model {model_name}: {e}')
logger.debug(f'Fields: {nested_fields}')
# Fallback to basic dict if model creation fails
return dict
else:
# Object without properties - just return dict
return dict
# Handle arrays with specific item types
if json_type == 'array':
if 'items' in schema:
# Get the item type recursively
item_type = self._json_schema_to_python_type(schema['items'], f'{model_name}_item')
# Return properly typed list
return list[item_type]
else:
# Array without item type specification
return list
# Get base type for non-object types
base_type = type_mapping.get(json_type, str)
# Handle nullable/optional types
if schema.get('nullable', False) or json_type == 'null':
return base_type | None
return base_type
async def __aenter__(self):
"""Async context manager entry."""
await self.connect()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
"""Async context manager exit."""
await self.disconnect()
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/mcp/client.py",
"license": "MIT License",
"lines": 450,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/mcp/controller.py | """MCP (Model Context Protocol) tool wrapper for browser-use.
This module provides integration between MCP tools and browser-use's action registry system.
MCP tools are dynamically discovered and registered as browser-use actions.
"""
import asyncio
import logging
from typing import Any
from pydantic import Field, create_model
from browser_use.agent.views import ActionResult
from browser_use.tools.registry.service import Registry
logger = logging.getLogger(__name__)
try:
from mcp import ClientSession, StdioServerParameters
from mcp.client.stdio import stdio_client
from mcp.types import TextContent, Tool
MCP_AVAILABLE = True
except ImportError:
MCP_AVAILABLE = False
logger.warning('MCP SDK not installed. Install with: pip install mcp')
class MCPToolWrapper:
"""Wrapper to integrate MCP tools as browser-use actions."""
def __init__(self, registry: Registry, mcp_command: str, mcp_args: list[str] | None = None):
"""Initialize MCP tool wrapper.
Args:
registry: Browser-use action registry to register MCP tools
mcp_command: Command to start MCP server (e.g., "npx")
mcp_args: Arguments for MCP command (e.g., ["@playwright/mcp@latest"])
"""
if not MCP_AVAILABLE:
raise ImportError('MCP SDK not installed. Install with: pip install mcp')
self.registry = registry
self.mcp_command = mcp_command
self.mcp_args = mcp_args or []
self.session: ClientSession | None = None
self._tools: dict[str, Tool] = {}
self._registered_actions: set[str] = set()
self._shutdown_event = asyncio.Event()
async def connect(self):
"""Connect to MCP server and discover available tools."""
if self.session:
return # Already connected
logger.info(f'🔌 Connecting to MCP server: {self.mcp_command} {" ".join(self.mcp_args)}')
# Create server parameters
server_params = StdioServerParameters(command=self.mcp_command, args=self.mcp_args, env=None)
# Connect to the MCP server
async with stdio_client(server_params) as (read, write):
async with ClientSession(read, write) as session:
self.session = session
# Initialize the connection
await session.initialize()
# Discover available tools
tools_response = await session.list_tools()
self._tools = {tool.name: tool for tool in tools_response.tools}
logger.info(f'📦 Discovered {len(self._tools)} MCP tools: {list(self._tools.keys())}')
# Register all discovered tools as actions
for tool_name, tool in self._tools.items():
self._register_tool_as_action(tool_name, tool)
# Keep session alive while tools are being used
await self._keep_session_alive()
async def _keep_session_alive(self):
"""Keep the MCP session alive."""
# This will block until the session is closed
# In practice, you'd want to manage this lifecycle better
try:
await self._shutdown_event.wait()
except asyncio.CancelledError:
pass
def _register_tool_as_action(self, tool_name: str, tool: Tool):
"""Register an MCP tool as a browser-use action.
Args:
tool_name: Name of the MCP tool
tool: MCP Tool object with schema information
"""
if tool_name in self._registered_actions:
return # Already registered
# Parse tool parameters to create Pydantic model
param_fields = {}
if tool.inputSchema:
# MCP tools use JSON Schema for parameters
properties = tool.inputSchema.get('properties', {})
required = set(tool.inputSchema.get('required', []))
for param_name, param_schema in properties.items():
# Convert JSON Schema type to Python type
param_type = self._json_schema_to_python_type(param_schema)
# Determine if field is required
if param_name in required:
default = ... # Required field
else:
default = param_schema.get('default', None)
# Add field description if available
field_kwargs = {}
if 'description' in param_schema:
field_kwargs['description'] = param_schema['description']
param_fields[param_name] = (param_type, Field(default, **field_kwargs))
# Create Pydantic model for the tool parameters
param_model = create_model(f'{tool_name}_Params', **param_fields) if param_fields else None
# Determine if this is a browser-specific tool
is_browser_tool = tool_name.startswith('browser_')
domains = None
# Note: page_filter has been removed since we no longer use Page objects
# Create wrapper function for the MCP tool
async def mcp_action_wrapper(**kwargs):
"""Wrapper function that calls the MCP tool."""
if not self.session:
raise RuntimeError(f'MCP session not connected for tool {tool_name}')
# Extract parameters (excluding special injected params)
special_params = {
'page',
'browser_session',
'context',
'page_extraction_llm',
'file_system',
'available_file_paths',
'has_sensitive_data',
'browser',
'browser_context',
}
tool_params = {k: v for k, v in kwargs.items() if k not in special_params}
logger.debug(f'🔧 Calling MCP tool {tool_name} with params: {tool_params}')
try:
# Call the MCP tool
result = await self.session.call_tool(tool_name, tool_params)
# Convert MCP result to ActionResult
# MCP tools return results in various formats
if hasattr(result, 'content'):
# Handle structured content responses
if isinstance(result.content, list):
# Multiple content items
content_parts = []
for item in result.content:
if isinstance(item, TextContent):
content_parts.append(item.text) # type: ignore[reportAttributeAccessIssue]
else:
content_parts.append(str(item))
extracted_content = '\n'.join(content_parts)
else:
extracted_content = str(result.content)
else:
# Direct result
extracted_content = str(result)
return ActionResult(extracted_content=extracted_content)
except Exception as e:
logger.error(f'❌ MCP tool {tool_name} failed: {e}')
return ActionResult(extracted_content=f'MCP tool {tool_name} failed: {str(e)}', error=str(e))
# Set function name for better debugging
mcp_action_wrapper.__name__ = tool_name
mcp_action_wrapper.__qualname__ = f'mcp.{tool_name}'
# Register the action with browser-use
description = tool.description or f'MCP tool: {tool_name}'
# Use the decorator to register the action
decorated_wrapper = self.registry.action(description=description, param_model=param_model, domains=domains)(
mcp_action_wrapper
)
self._registered_actions.add(tool_name)
logger.info(f'✅ Registered MCP tool as action: {tool_name}')
async def disconnect(self):
"""Disconnect from the MCP server and clean up resources."""
self._shutdown_event.set()
if self.session:
# Session cleanup will be handled by the context manager
self.session = None
def _json_schema_to_python_type(self, schema: dict) -> Any:
"""Convert JSON Schema type to Python type.
Args:
schema: JSON Schema definition
Returns:
Python type corresponding to the schema
"""
json_type = schema.get('type', 'string')
type_mapping = {
'string': str,
'number': float,
'integer': int,
'boolean': bool,
'array': list,
'object': dict,
}
base_type = type_mapping.get(json_type, str)
# Handle nullable types
if schema.get('nullable', False):
return base_type | None
return base_type
# Convenience function for easy integration
async def register_mcp_tools(registry: Registry, mcp_command: str, mcp_args: list[str] | None = None) -> MCPToolWrapper:
"""Register MCP tools with a browser-use registry.
Args:
registry: Browser-use action registry
mcp_command: Command to start MCP server
mcp_args: Arguments for MCP command
Returns:
MCPToolWrapper instance (connected)
Example:
```python
from browser_use import Tools
from browser_use.mcp.tools import register_mcp_tools
tools = Tools()
# Register Playwright MCP tools
mcp = await register_mcp_tools(tools.registry, 'npx', ['@playwright/mcp@latest', '--headless'])
# Now all MCP tools are available as browser-use actions
```
"""
wrapper = MCPToolWrapper(registry, mcp_command, mcp_args)
await wrapper.connect()
return wrapper
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/mcp/controller.py",
"license": "MIT License",
"lines": 204,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/mcp/server.py | """MCP Server for browser-use - exposes browser automation capabilities via Model Context Protocol.
This server provides tools for:
- Running autonomous browser tasks with an AI agent
- Direct browser control (navigation, clicking, typing, etc.)
- Content extraction from web pages
- File system operations
Usage:
uvx browser-use --mcp
Or as an MCP server in Claude Desktop or other MCP clients:
{
"mcpServers": {
"browser-use": {
"command": "uvx",
"args": ["browser-use[cli]", "--mcp"],
"env": {
"OPENAI_API_KEY": "sk-proj-1234567890",
}
}
}
}
"""
import os
import sys
# Set environment variables BEFORE any browser_use imports to prevent early logging
os.environ['BROWSER_USE_LOGGING_LEVEL'] = 'critical'
os.environ['BROWSER_USE_SETUP_LOGGING'] = 'false'
import asyncio
import json
import logging
import time
from pathlib import Path
from typing import Any
from browser_use.llm import ChatAWSBedrock
# Configure logging for MCP mode - redirect to stderr but preserve critical diagnostics
logging.basicConfig(
stream=sys.stderr, level=logging.WARNING, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', force=True
)
try:
import psutil
PSUTIL_AVAILABLE = True
except ImportError:
PSUTIL_AVAILABLE = False
# Add browser-use to path if running from source
sys.path.insert(0, str(Path(__file__).parent.parent))
# Import and configure logging to use stderr before other imports
from browser_use.logging_config import setup_logging
def _configure_mcp_server_logging():
"""Configure logging for MCP server mode - redirect all logs to stderr to prevent JSON RPC interference."""
# Set environment to suppress browser-use logging during server mode
os.environ['BROWSER_USE_LOGGING_LEVEL'] = 'warning'
os.environ['BROWSER_USE_SETUP_LOGGING'] = 'false' # Prevent automatic logging setup
# Configure logging to stderr for MCP mode - preserve warnings and above for troubleshooting
setup_logging(stream=sys.stderr, log_level='warning', force_setup=True)
# Also configure the root logger and all existing loggers to use stderr
logging.root.handlers = []
stderr_handler = logging.StreamHandler(sys.stderr)
stderr_handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logging.root.addHandler(stderr_handler)
logging.root.setLevel(logging.CRITICAL)
# Configure all existing loggers to use stderr and CRITICAL level
for name in list(logging.root.manager.loggerDict.keys()):
logger_obj = logging.getLogger(name)
logger_obj.handlers = []
logger_obj.setLevel(logging.CRITICAL)
logger_obj.addHandler(stderr_handler)
logger_obj.propagate = False
# Configure MCP server logging before any browser_use imports to capture early log lines
_configure_mcp_server_logging()
# Additional suppression - disable all logging completely for MCP mode
logging.disable(logging.CRITICAL)
# Import browser_use modules
from browser_use import ActionModel, Agent
from browser_use.browser import BrowserProfile, BrowserSession
from browser_use.config import get_default_llm, get_default_profile, load_browser_use_config
from browser_use.filesystem.file_system import FileSystem
from browser_use.llm.openai.chat import ChatOpenAI
from browser_use.tools.service import Tools
logger = logging.getLogger(__name__)
def _ensure_all_loggers_use_stderr():
"""Ensure ALL loggers only output to stderr, not stdout."""
# Get the stderr handler
stderr_handler = None
for handler in logging.root.handlers:
if hasattr(handler, 'stream') and handler.stream == sys.stderr: # type: ignore
stderr_handler = handler
break
if not stderr_handler:
stderr_handler = logging.StreamHandler(sys.stderr)
stderr_handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
# Configure root logger
logging.root.handlers = [stderr_handler]
logging.root.setLevel(logging.CRITICAL)
# Configure all existing loggers
for name in list(logging.root.manager.loggerDict.keys()):
logger_obj = logging.getLogger(name)
logger_obj.handlers = [stderr_handler]
logger_obj.setLevel(logging.CRITICAL)
logger_obj.propagate = False
# Ensure stderr logging after all imports
_ensure_all_loggers_use_stderr()
# Try to import MCP SDK
try:
import mcp.server.stdio
import mcp.types as types
from mcp.server import NotificationOptions, Server
from mcp.server.models import InitializationOptions
MCP_AVAILABLE = True
# Configure MCP SDK logging to stderr as well
mcp_logger = logging.getLogger('mcp')
mcp_logger.handlers = []
mcp_logger.addHandler(logging.root.handlers[0] if logging.root.handlers else logging.StreamHandler(sys.stderr))
mcp_logger.setLevel(logging.ERROR)
mcp_logger.propagate = False
except ImportError:
MCP_AVAILABLE = False
logger.error('MCP SDK not installed. Install with: pip install mcp')
sys.exit(1)
from browser_use.telemetry import MCPServerTelemetryEvent, ProductTelemetry
from browser_use.utils import create_task_with_error_handling, get_browser_use_version
def get_parent_process_cmdline() -> str | None:
"""Get the command line of all parent processes up the chain."""
if not PSUTIL_AVAILABLE:
return None
try:
cmdlines = []
current_process = psutil.Process()
parent = current_process.parent()
while parent:
try:
cmdline = parent.cmdline()
if cmdline:
cmdlines.append(' '.join(cmdline))
except (psutil.AccessDenied, psutil.NoSuchProcess):
# Skip processes we can't access (like system processes)
pass
try:
parent = parent.parent()
except (psutil.AccessDenied, psutil.NoSuchProcess):
# Can't go further up the chain
break
return ';'.join(cmdlines) if cmdlines else None
except Exception:
# If we can't get parent process info, just return None
return None
class BrowserUseServer:
"""MCP Server for browser-use capabilities."""
def __init__(self, session_timeout_minutes: int = 10):
# Ensure all logging goes to stderr (in case new loggers were created)
_ensure_all_loggers_use_stderr()
self.server = Server('browser-use')
self.config = load_browser_use_config()
self.agent: Agent | None = None
self.browser_session: BrowserSession | None = None
self.tools: Tools | None = None
self.llm: ChatOpenAI | None = None
self.file_system: FileSystem | None = None
self._telemetry = ProductTelemetry()
self._start_time = time.time()
# Session management
self.active_sessions: dict[str, dict[str, Any]] = {} # session_id -> session info
self.session_timeout_minutes = session_timeout_minutes
self._cleanup_task: Any = None
# Setup handlers
self._setup_handlers()
def _setup_handlers(self):
"""Setup MCP server handlers."""
@self.server.list_tools()
async def handle_list_tools() -> list[types.Tool]:
"""List all available browser-use tools."""
return [
# Agent tools
# Direct browser control tools
types.Tool(
name='browser_navigate',
description='Navigate to a URL in the browser',
inputSchema={
'type': 'object',
'properties': {
'url': {'type': 'string', 'description': 'The URL to navigate to'},
'new_tab': {'type': 'boolean', 'description': 'Whether to open in a new tab', 'default': False},
},
'required': ['url'],
},
),
types.Tool(
name='browser_click',
description='Click an element by index or at specific viewport coordinates. Use index for elements from browser_get_state, or coordinate_x/coordinate_y for pixel-precise clicking.',
inputSchema={
'type': 'object',
'properties': {
'index': {
'type': 'integer',
'description': 'The index of the element to click (from browser_get_state). Use this OR coordinates.',
},
'coordinate_x': {
'type': 'integer',
'description': 'X coordinate (pixels from left edge of viewport). Use with coordinate_y.',
},
'coordinate_y': {
'type': 'integer',
'description': 'Y coordinate (pixels from top edge of viewport). Use with coordinate_x.',
},
'new_tab': {
'type': 'boolean',
'description': 'Whether to open any resulting navigation in a new tab',
'default': False,
},
},
'oneOf': [
{'required': ['index']},
{'required': ['coordinate_x', 'coordinate_y']},
],
},
),
types.Tool(
name='browser_type',
description='Type text into an input field',
inputSchema={
'type': 'object',
'properties': {
'index': {
'type': 'integer',
'description': 'The index of the input element (from browser_get_state)',
},
'text': {'type': 'string', 'description': 'The text to type'},
},
'required': ['index', 'text'],
},
),
types.Tool(
name='browser_get_state',
description='Get the current state of the page including all interactive elements',
inputSchema={
'type': 'object',
'properties': {
'include_screenshot': {
'type': 'boolean',
'description': 'Whether to include a screenshot of the current page',
'default': False,
}
},
},
),
types.Tool(
name='browser_extract_content',
description='Extract structured content from the current page based on a query',
inputSchema={
'type': 'object',
'properties': {
'query': {'type': 'string', 'description': 'What information to extract from the page'},
'extract_links': {
'type': 'boolean',
'description': 'Whether to include links in the extraction',
'default': False,
},
},
'required': ['query'],
},
),
types.Tool(
name='browser_get_html',
description='Get the raw HTML of the current page or a specific element by CSS selector',
inputSchema={
'type': 'object',
'properties': {
'selector': {
'type': 'string',
'description': 'Optional CSS selector to get HTML of a specific element. If omitted, returns full page HTML.',
},
},
},
),
types.Tool(
name='browser_screenshot',
description='Take a screenshot of the current page. Returns viewport metadata as text and the screenshot as an image.',
inputSchema={
'type': 'object',
'properties': {
'full_page': {
'type': 'boolean',
'description': 'Whether to capture the full scrollable page or just the visible viewport',
'default': False,
},
},
},
),
types.Tool(
name='browser_scroll',
description='Scroll the page',
inputSchema={
'type': 'object',
'properties': {
'direction': {
'type': 'string',
'enum': ['up', 'down'],
'description': 'Direction to scroll',
'default': 'down',
}
},
},
),
types.Tool(
name='browser_go_back',
description='Go back to the previous page',
inputSchema={'type': 'object', 'properties': {}},
),
# Tab management
types.Tool(
name='browser_list_tabs', description='List all open tabs', inputSchema={'type': 'object', 'properties': {}}
),
types.Tool(
name='browser_switch_tab',
description='Switch to a different tab',
inputSchema={
'type': 'object',
'properties': {'tab_id': {'type': 'string', 'description': '4 Character Tab ID of the tab to switch to'}},
'required': ['tab_id'],
},
),
types.Tool(
name='browser_close_tab',
description='Close a tab',
inputSchema={
'type': 'object',
'properties': {'tab_id': {'type': 'string', 'description': '4 Character Tab ID of the tab to close'}},
'required': ['tab_id'],
},
),
# types.Tool(
# name="browser_close",
# description="Close the browser session",
# inputSchema={
# "type": "object",
# "properties": {}
# }
# ),
types.Tool(
name='retry_with_browser_use_agent',
description='Retry a task using the browser-use agent. Only use this as a last resort if you fail to interact with a page multiple times.',
inputSchema={
'type': 'object',
'properties': {
'task': {
'type': 'string',
'description': 'The high-level goal and detailed step-by-step description of the task the AI browser agent needs to attempt, along with any relevant data needed to complete the task and info about previous attempts.',
},
'max_steps': {
'type': 'integer',
'description': 'Maximum number of steps an agent can take.',
'default': 100,
},
'model': {
'type': 'string',
'description': 'LLM model to use (e.g., gpt-4o, claude-3-opus-20240229). Defaults to the configured model.',
},
'allowed_domains': {
'type': 'array',
'items': {'type': 'string'},
'description': 'List of domains the agent is allowed to visit (security feature)',
'default': [],
},
'use_vision': {
'type': 'boolean',
'description': 'Whether to use vision capabilities (screenshots) for the agent',
'default': True,
},
},
'required': ['task'],
},
),
# Browser session management tools
types.Tool(
name='browser_list_sessions',
description='List all active browser sessions with their details and last activity time',
inputSchema={'type': 'object', 'properties': {}},
),
types.Tool(
name='browser_close_session',
description='Close a specific browser session by its ID',
inputSchema={
'type': 'object',
'properties': {
'session_id': {
'type': 'string',
'description': 'The browser session ID to close (get from browser_list_sessions)',
}
},
'required': ['session_id'],
},
),
types.Tool(
name='browser_close_all',
description='Close all active browser sessions and clean up resources',
inputSchema={'type': 'object', 'properties': {}},
),
]
@self.server.list_resources()
async def handle_list_resources() -> list[types.Resource]:
"""List available resources (none for browser-use)."""
return []
@self.server.list_prompts()
async def handle_list_prompts() -> list[types.Prompt]:
"""List available prompts (none for browser-use)."""
return []
@self.server.call_tool()
async def handle_call_tool(name: str, arguments: dict[str, Any] | None) -> list[types.TextContent | types.ImageContent]:
"""Handle tool execution."""
start_time = time.time()
error_msg = None
try:
result = await self._execute_tool(name, arguments or {})
if isinstance(result, list):
return result
return [types.TextContent(type='text', text=result)]
except Exception as e:
error_msg = str(e)
logger.error(f'Tool execution failed: {e}', exc_info=True)
return [types.TextContent(type='text', text=f'Error: {str(e)}')]
finally:
# Capture telemetry for tool calls
duration = time.time() - start_time
self._telemetry.capture(
MCPServerTelemetryEvent(
version=get_browser_use_version(),
action='tool_call',
tool_name=name,
duration_seconds=duration,
error_message=error_msg,
)
)
async def _execute_tool(
self, tool_name: str, arguments: dict[str, Any]
) -> str | list[types.TextContent | types.ImageContent]:
"""Execute a browser-use tool. Returns str for most tools, or a content list for tools with image output."""
# Agent-based tools
if tool_name == 'retry_with_browser_use_agent':
return await self._retry_with_browser_use_agent(
task=arguments['task'],
max_steps=arguments.get('max_steps', 100),
model=arguments.get('model'),
allowed_domains=arguments.get('allowed_domains', []),
use_vision=arguments.get('use_vision', True),
)
# Browser session management tools (don't require active session)
if tool_name == 'browser_list_sessions':
return await self._list_sessions()
elif tool_name == 'browser_close_session':
return await self._close_session(arguments['session_id'])
elif tool_name == 'browser_close_all':
return await self._close_all_sessions()
# Direct browser control tools (require active session)
elif tool_name.startswith('browser_'):
# Ensure browser session exists
if not self.browser_session:
await self._init_browser_session()
if tool_name == 'browser_navigate':
return await self._navigate(arguments['url'], arguments.get('new_tab', False))
elif tool_name == 'browser_click':
return await self._click(
index=arguments.get('index'),
coordinate_x=arguments.get('coordinate_x'),
coordinate_y=arguments.get('coordinate_y'),
new_tab=arguments.get('new_tab', False),
)
elif tool_name == 'browser_type':
return await self._type_text(arguments['index'], arguments['text'])
elif tool_name == 'browser_get_state':
state_json, screenshot_b64 = await self._get_browser_state(arguments.get('include_screenshot', False))
content: list[types.TextContent | types.ImageContent] = [types.TextContent(type='text', text=state_json)]
if screenshot_b64:
content.append(types.ImageContent(type='image', data=screenshot_b64, mimeType='image/png'))
return content
elif tool_name == 'browser_get_html':
return await self._get_html(arguments.get('selector'))
elif tool_name == 'browser_screenshot':
meta_json, screenshot_b64 = await self._screenshot(arguments.get('full_page', False))
content: list[types.TextContent | types.ImageContent] = [types.TextContent(type='text', text=meta_json)]
if screenshot_b64:
content.append(types.ImageContent(type='image', data=screenshot_b64, mimeType='image/png'))
return content
elif tool_name == 'browser_extract_content':
return await self._extract_content(arguments['query'], arguments.get('extract_links', False))
elif tool_name == 'browser_scroll':
return await self._scroll(arguments.get('direction', 'down'))
elif tool_name == 'browser_go_back':
return await self._go_back()
elif tool_name == 'browser_close':
return await self._close_browser()
elif tool_name == 'browser_list_tabs':
return await self._list_tabs()
elif tool_name == 'browser_switch_tab':
return await self._switch_tab(arguments['tab_id'])
elif tool_name == 'browser_close_tab':
return await self._close_tab(arguments['tab_id'])
return f'Unknown tool: {tool_name}'
async def _init_browser_session(self, allowed_domains: list[str] | None = None, **kwargs):
"""Initialize browser session using config"""
if self.browser_session:
return
# Ensure all logging goes to stderr before browser initialization
_ensure_all_loggers_use_stderr()
logger.debug('Initializing browser session...')
# Get profile config
profile_config = get_default_profile(self.config)
# Merge profile config with defaults and overrides
profile_data = {
'downloads_path': str(Path.home() / 'Downloads' / 'browser-use-mcp'),
'wait_between_actions': 0.5,
'keep_alive': True,
'user_data_dir': '~/.config/browseruse/profiles/default',
'device_scale_factor': 1.0,
'disable_security': False,
'headless': False,
**profile_config, # Config values override defaults
}
# Tool parameter overrides (highest priority)
if allowed_domains is not None:
profile_data['allowed_domains'] = allowed_domains
# Merge any additional kwargs that are valid BrowserProfile fields
for key, value in kwargs.items():
profile_data[key] = value
# Create browser profile
profile = BrowserProfile(**profile_data)
# Create browser session
self.browser_session = BrowserSession(browser_profile=profile)
await self.browser_session.start()
# Track the session for management
self._track_session(self.browser_session)
# Create tools for direct actions
self.tools = Tools()
# Initialize LLM from config
llm_config = get_default_llm(self.config)
base_url = llm_config.get('base_url', None)
kwargs = {}
if base_url:
kwargs['base_url'] = base_url
if api_key := llm_config.get('api_key'):
self.llm = ChatOpenAI(
model=llm_config.get('model', 'gpt-o4-mini'),
api_key=api_key,
temperature=llm_config.get('temperature', 0.7),
**kwargs,
)
# Initialize FileSystem for extraction actions
file_system_path = profile_config.get('file_system_path', '~/.browser-use-mcp')
self.file_system = FileSystem(base_dir=Path(file_system_path).expanduser())
logger.debug('Browser session initialized')
async def _retry_with_browser_use_agent(
self,
task: str,
max_steps: int = 100,
model: str | None = None,
allowed_domains: list[str] | None = None,
use_vision: bool = True,
) -> str:
"""Run an autonomous agent task."""
logger.debug(f'Running agent task: {task}')
# Get LLM config
llm_config = get_default_llm(self.config)
# Get LLM provider
model_provider = llm_config.get('model_provider') or os.getenv('MODEL_PROVIDER')
# Get Bedrock-specific config
if model_provider and model_provider.lower() == 'bedrock':
llm_model = llm_config.get('model') or os.getenv('MODEL') or 'us.anthropic.claude-sonnet-4-20250514-v1:0'
aws_region = llm_config.get('region') or os.getenv('REGION')
if not aws_region:
aws_region = 'us-east-1'
aws_sso_auth = llm_config.get('aws_sso_auth', False)
llm = ChatAWSBedrock(
model=llm_model, # or any Bedrock model
aws_region=aws_region,
aws_sso_auth=aws_sso_auth,
)
else:
api_key = llm_config.get('api_key') or os.getenv('OPENAI_API_KEY')
if not api_key:
return 'Error: OPENAI_API_KEY not set in config or environment'
# Use explicit model from tool call, otherwise fall back to configured default
llm_model = model or llm_config.get('model', 'gpt-4o')
base_url = llm_config.get('base_url', None)
kwargs = {}
if base_url:
kwargs['base_url'] = base_url
llm = ChatOpenAI(
model=llm_model,
api_key=api_key,
temperature=llm_config.get('temperature', 0.7),
**kwargs,
)
# Get profile config and merge with tool parameters
profile_config = get_default_profile(self.config)
# Override allowed_domains if provided in tool call
if allowed_domains is not None:
profile_config['allowed_domains'] = allowed_domains
# Create browser profile using config
profile = BrowserProfile(**profile_config)
# Create and run agent
agent = Agent(
task=task,
llm=llm,
browser_profile=profile,
use_vision=use_vision,
)
try:
history = await agent.run(max_steps=max_steps)
# Format results
results = []
results.append(f'Task completed in {len(history.history)} steps')
results.append(f'Success: {history.is_successful()}')
# Get final result if available
final_result = history.final_result()
if final_result:
results.append(f'\nFinal result:\n{final_result}')
# Include any errors
errors = history.errors()
if errors:
results.append(f'\nErrors encountered:\n{json.dumps(errors, indent=2)}')
# Include URLs visited
urls = history.urls()
if urls:
# Filter out None values and convert to strings
valid_urls = [str(url) for url in urls if url is not None]
if valid_urls:
results.append(f'\nURLs visited: {", ".join(valid_urls)}')
return '\n'.join(results)
except Exception as e:
logger.error(f'Agent task failed: {e}', exc_info=True)
return f'Agent task failed: {str(e)}'
finally:
# Clean up
await agent.close()
async def _navigate(self, url: str, new_tab: bool = False) -> str:
"""Navigate to a URL."""
if not self.browser_session:
return 'Error: No browser session active'
# Update session activity
self._update_session_activity(self.browser_session.id)
from browser_use.browser.events import NavigateToUrlEvent
if new_tab:
event = self.browser_session.event_bus.dispatch(NavigateToUrlEvent(url=url, new_tab=True))
await event
return f'Opened new tab with URL: {url}'
else:
event = self.browser_session.event_bus.dispatch(NavigateToUrlEvent(url=url))
await event
return f'Navigated to: {url}'
async def _click(
self,
index: int | None = None,
coordinate_x: int | None = None,
coordinate_y: int | None = None,
new_tab: bool = False,
) -> str:
"""Click an element by index or at viewport coordinates."""
if not self.browser_session:
return 'Error: No browser session active'
# Update session activity
self._update_session_activity(self.browser_session.id)
# Coordinate-based clicking
if coordinate_x is not None and coordinate_y is not None:
from browser_use.browser.events import ClickCoordinateEvent
event = self.browser_session.event_bus.dispatch(
ClickCoordinateEvent(coordinate_x=coordinate_x, coordinate_y=coordinate_y)
)
await event
return f'Clicked at coordinates ({coordinate_x}, {coordinate_y})'
# Index-based clicking
if index is None:
return 'Error: Provide either index or both coordinate_x and coordinate_y'
# Get the element
element = await self.browser_session.get_dom_element_by_index(index)
if not element:
return f'Element with index {index} not found'
if new_tab:
# For links, extract href and open in new tab
href = element.attributes.get('href')
if href:
# Convert relative href to absolute URL
state = await self.browser_session.get_browser_state_summary()
current_url = state.url
if href.startswith('/'):
# Relative URL - construct full URL
from urllib.parse import urlparse
parsed = urlparse(current_url)
full_url = f'{parsed.scheme}://{parsed.netloc}{href}'
else:
full_url = href
# Open link in new tab
from browser_use.browser.events import NavigateToUrlEvent
event = self.browser_session.event_bus.dispatch(NavigateToUrlEvent(url=full_url, new_tab=True))
await event
return f'Clicked element {index} and opened in new tab {full_url[:20]}...'
else:
# For non-link elements, just do a normal click
from browser_use.browser.events import ClickElementEvent
event = self.browser_session.event_bus.dispatch(ClickElementEvent(node=element))
await event
return f'Clicked element {index} (new tab not supported for non-link elements)'
else:
# Normal click
from browser_use.browser.events import ClickElementEvent
event = self.browser_session.event_bus.dispatch(ClickElementEvent(node=element))
await event
return f'Clicked element {index}'
async def _type_text(self, index: int, text: str) -> str:
"""Type text into an element."""
if not self.browser_session:
return 'Error: No browser session active'
element = await self.browser_session.get_dom_element_by_index(index)
if not element:
return f'Element with index {index} not found'
from browser_use.browser.events import TypeTextEvent
# Conservative heuristic to detect potentially sensitive data
# Only flag very obvious patterns to minimize false positives
is_potentially_sensitive = len(text) >= 6 and (
# Email pattern: contains @ and a domain-like suffix
('@' in text and '.' in text.split('@')[-1] if '@' in text else False)
# Mixed alphanumeric with reasonable complexity (likely API keys/tokens)
or (
len(text) >= 16
and any(char.isdigit() for char in text)
and any(char.isalpha() for char in text)
and any(char in '.-_' for char in text)
)
)
# Use generic key names to avoid information leakage about detection patterns
sensitive_key_name = None
if is_potentially_sensitive:
if '@' in text and '.' in text.split('@')[-1]:
sensitive_key_name = 'email'
else:
sensitive_key_name = 'credential'
event = self.browser_session.event_bus.dispatch(
TypeTextEvent(node=element, text=text, is_sensitive=is_potentially_sensitive, sensitive_key_name=sensitive_key_name)
)
await event
if is_potentially_sensitive:
if sensitive_key_name:
return f'Typed <{sensitive_key_name}> into element {index}'
else:
return f'Typed <sensitive> into element {index}'
else:
return f"Typed '{text}' into element {index}"
async def _get_browser_state(self, include_screenshot: bool = False) -> tuple[str, str | None]:
"""Get current browser state. Returns (state_json, screenshot_b64 | None)."""
if not self.browser_session:
return 'Error: No browser session active', None
state = await self.browser_session.get_browser_state_summary()
result: dict[str, Any] = {
'url': state.url,
'title': state.title,
'tabs': [{'url': tab.url, 'title': tab.title} for tab in state.tabs],
'interactive_elements': [],
}
# Add viewport info so the LLM knows the coordinate space
if state.page_info:
pi = state.page_info
result['viewport'] = {
'width': pi.viewport_width,
'height': pi.viewport_height,
}
result['page'] = {
'width': pi.page_width,
'height': pi.page_height,
}
result['scroll'] = {
'x': pi.scroll_x,
'y': pi.scroll_y,
}
# Add interactive elements with their indices
for index, element in state.dom_state.selector_map.items():
elem_info: dict[str, Any] = {
'index': index,
'tag': element.tag_name,
'text': element.get_all_children_text(max_depth=2)[:100],
}
if element.attributes.get('placeholder'):
elem_info['placeholder'] = element.attributes['placeholder']
if element.attributes.get('href'):
elem_info['href'] = element.attributes['href']
result['interactive_elements'].append(elem_info)
# Return screenshot separately as ImageContent instead of embedding base64 in JSON
screenshot_b64 = None
if include_screenshot and state.screenshot:
screenshot_b64 = state.screenshot
# Include viewport dimensions in JSON so LLM can map pixels to coordinates
if state.page_info:
result['screenshot_dimensions'] = {
'width': state.page_info.viewport_width,
'height': state.page_info.viewport_height,
}
return json.dumps(result, indent=2), screenshot_b64
async def _get_html(self, selector: str | None = None) -> str:
"""Get raw HTML of the page or a specific element."""
if not self.browser_session:
return 'Error: No browser session active'
self._update_session_activity(self.browser_session.id)
cdp_session = await self.browser_session.get_or_create_cdp_session(target_id=None, focus=False)
if not cdp_session:
return 'Error: No active CDP session'
if selector:
js = (
f'(function(){{ const el = document.querySelector({json.dumps(selector)}); return el ? el.outerHTML : null; }})()'
)
else:
js = 'document.documentElement.outerHTML'
result = await cdp_session.cdp_client.send.Runtime.evaluate(
params={'expression': js, 'returnByValue': True},
session_id=cdp_session.session_id,
)
html = result.get('result', {}).get('value')
if html is None:
return f'No element found for selector: {selector}' if selector else 'Error: Could not get page HTML'
return html
async def _screenshot(self, full_page: bool = False) -> tuple[str, str | None]:
"""Take a screenshot. Returns (metadata_json, screenshot_b64 | None)."""
if not self.browser_session:
return 'Error: No browser session active', None
import base64
self._update_session_activity(self.browser_session.id)
data = await self.browser_session.take_screenshot(full_page=full_page)
b64 = base64.b64encode(data).decode()
# Return screenshot separately as ImageContent instead of embedding base64 in JSON
state = await self.browser_session.get_browser_state_summary()
result: dict[str, Any] = {
'size_bytes': len(data),
}
if state.page_info:
result['viewport'] = {
'width': state.page_info.viewport_width,
'height': state.page_info.viewport_height,
}
return json.dumps(result), b64
async def _extract_content(self, query: str, extract_links: bool = False) -> str:
"""Extract content from current page."""
if not self.llm:
return 'Error: LLM not initialized (set OPENAI_API_KEY)'
if not self.file_system:
return 'Error: FileSystem not initialized'
if not self.browser_session:
return 'Error: No browser session active'
if not self.tools:
return 'Error: Tools not initialized'
state = await self.browser_session.get_browser_state_summary()
# Use the extract action
# Create a dynamic action model that matches the tools's expectations
from pydantic import create_model
# Create action model dynamically
ExtractAction = create_model(
'ExtractAction',
__base__=ActionModel,
extract=dict[str, Any],
)
# Use model_validate because Pyright does not understand the dynamic model
action = ExtractAction.model_validate(
{
'extract': {'query': query, 'extract_links': extract_links},
}
)
action_result = await self.tools.act(
action=action,
browser_session=self.browser_session,
page_extraction_llm=self.llm,
file_system=self.file_system,
)
return action_result.extracted_content or 'No content extracted'
async def _scroll(self, direction: str = 'down') -> str:
"""Scroll the page."""
if not self.browser_session:
return 'Error: No browser session active'
from browser_use.browser.events import ScrollEvent
# Scroll by a standard amount (500 pixels)
event = self.browser_session.event_bus.dispatch(
ScrollEvent(
direction=direction, # type: ignore
amount=500,
)
)
await event
return f'Scrolled {direction}'
async def _go_back(self) -> str:
"""Go back in browser history."""
if not self.browser_session:
return 'Error: No browser session active'
from browser_use.browser.events import GoBackEvent
event = self.browser_session.event_bus.dispatch(GoBackEvent())
await event
return 'Navigated back'
async def _close_browser(self) -> str:
"""Close the browser session."""
if self.browser_session:
from browser_use.browser.events import BrowserStopEvent
event = self.browser_session.event_bus.dispatch(BrowserStopEvent())
await event
self.browser_session = None
self.tools = None
return 'Browser closed'
return 'No browser session to close'
async def _list_tabs(self) -> str:
"""List all open tabs."""
if not self.browser_session:
return 'Error: No browser session active'
tabs_info = await self.browser_session.get_tabs()
tabs = []
for i, tab in enumerate(tabs_info):
tabs.append({'tab_id': tab.target_id[-4:], 'url': tab.url, 'title': tab.title or ''})
return json.dumps(tabs, indent=2)
async def _switch_tab(self, tab_id: str) -> str:
"""Switch to a different tab."""
if not self.browser_session:
return 'Error: No browser session active'
from browser_use.browser.events import SwitchTabEvent
target_id = await self.browser_session.get_target_id_from_tab_id(tab_id)
event = self.browser_session.event_bus.dispatch(SwitchTabEvent(target_id=target_id))
await event
state = await self.browser_session.get_browser_state_summary()
return f'Switched to tab {tab_id}: {state.url}'
async def _close_tab(self, tab_id: str) -> str:
"""Close a specific tab."""
if not self.browser_session:
return 'Error: No browser session active'
from browser_use.browser.events import CloseTabEvent
target_id = await self.browser_session.get_target_id_from_tab_id(tab_id)
event = self.browser_session.event_bus.dispatch(CloseTabEvent(target_id=target_id))
await event
current_url = await self.browser_session.get_current_page_url()
return f'Closed tab # {tab_id}, now on {current_url}'
def _track_session(self, session: BrowserSession) -> None:
"""Track a browser session for management."""
self.active_sessions[session.id] = {
'session': session,
'created_at': time.time(),
'last_activity': time.time(),
'url': getattr(session, 'current_url', None),
}
def _update_session_activity(self, session_id: str) -> None:
"""Update the last activity time for a session."""
if session_id in self.active_sessions:
self.active_sessions[session_id]['last_activity'] = time.time()
async def _list_sessions(self) -> str:
"""List all active browser sessions."""
if not self.active_sessions:
return 'No active browser sessions'
sessions_info = []
for session_id, session_data in self.active_sessions.items():
session = session_data['session']
created_at = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(session_data['created_at']))
last_activity = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(session_data['last_activity']))
# Check if session is still active
is_active = hasattr(session, 'cdp_client') and session.cdp_client is not None
sessions_info.append(
{
'session_id': session_id,
'created_at': created_at,
'last_activity': last_activity,
'active': is_active,
'current_url': session_data.get('url', 'Unknown'),
'age_minutes': (time.time() - session_data['created_at']) / 60,
}
)
return json.dumps(sessions_info, indent=2)
async def _close_session(self, session_id: str) -> str:
"""Close a specific browser session."""
if session_id not in self.active_sessions:
return f'Session {session_id} not found'
session_data = self.active_sessions[session_id]
session = session_data['session']
try:
# Close the session
if hasattr(session, 'kill'):
await session.kill()
elif hasattr(session, 'close'):
await session.close()
# Remove from tracking
del self.active_sessions[session_id]
# If this was the current session, clear it
if self.browser_session and self.browser_session.id == session_id:
self.browser_session = None
self.tools = None
return f'Successfully closed session {session_id}'
except Exception as e:
return f'Error closing session {session_id}: {str(e)}'
async def _close_all_sessions(self) -> str:
"""Close all active browser sessions."""
if not self.active_sessions:
return 'No active sessions to close'
closed_count = 0
errors = []
for session_id in list(self.active_sessions.keys()):
try:
result = await self._close_session(session_id)
if 'Successfully closed' in result:
closed_count += 1
else:
errors.append(f'{session_id}: {result}')
except Exception as e:
errors.append(f'{session_id}: {str(e)}')
# Clear current session references
self.browser_session = None
self.tools = None
result = f'Closed {closed_count} sessions'
if errors:
result += f'. Errors: {"; ".join(errors)}'
return result
async def _cleanup_expired_sessions(self) -> None:
"""Background task to clean up expired sessions."""
current_time = time.time()
timeout_seconds = self.session_timeout_minutes * 60
expired_sessions = []
for session_id, session_data in self.active_sessions.items():
last_activity = session_data['last_activity']
if current_time - last_activity > timeout_seconds:
expired_sessions.append(session_id)
for session_id in expired_sessions:
try:
await self._close_session(session_id)
logger.info(f'Auto-closed expired session {session_id}')
except Exception as e:
logger.error(f'Error auto-closing session {session_id}: {e}')
async def _start_cleanup_task(self) -> None:
"""Start the background cleanup task."""
async def cleanup_loop():
while True:
try:
await self._cleanup_expired_sessions()
# Check every 2 minutes
await asyncio.sleep(120)
except Exception as e:
logger.error(f'Error in cleanup task: {e}')
await asyncio.sleep(120)
self._cleanup_task = create_task_with_error_handling(cleanup_loop(), name='mcp_cleanup_loop', suppress_exceptions=True)
async def run(self):
"""Run the MCP server."""
# Start the cleanup task
await self._start_cleanup_task()
async with mcp.server.stdio.stdio_server() as (read_stream, write_stream):
await self.server.run(
read_stream,
write_stream,
InitializationOptions(
server_name='browser-use',
server_version='0.1.0',
capabilities=self.server.get_capabilities(
notification_options=NotificationOptions(),
experimental_capabilities={},
),
),
)
async def main(session_timeout_minutes: int = 10):
if not MCP_AVAILABLE:
print('MCP SDK is required. Install with: pip install mcp', file=sys.stderr)
sys.exit(1)
server = BrowserUseServer(session_timeout_minutes=session_timeout_minutes)
server._telemetry.capture(
MCPServerTelemetryEvent(
version=get_browser_use_version(),
action='start',
parent_process_cmdline=get_parent_process_cmdline(),
)
)
try:
await server.run()
finally:
duration = time.time() - server._start_time
server._telemetry.capture(
MCPServerTelemetryEvent(
version=get_browser_use_version(),
action='stop',
duration_seconds=duration,
parent_process_cmdline=get_parent_process_cmdline(),
)
)
server._telemetry.flush()
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/mcp/server.py",
"license": "MIT License",
"lines": 1075,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/observability.py | # @file purpose: Observability module for browser-use that handles optional lmnr integration with debug mode support
"""
Observability module for browser-use
This module provides observability decorators that optionally integrate with lmnr (Laminar) for tracing.
If lmnr is not installed, it provides no-op wrappers that accept the same parameters.
Features:
- Optional lmnr integration - works with or without lmnr installed
- Debug mode support - observe_debug only traces when in debug mode
- Full parameter compatibility with lmnr observe decorator
- No-op fallbacks when lmnr is unavailable
"""
import logging
import os
from collections.abc import Callable
from functools import wraps
from typing import Any, Literal, TypeVar, cast
logger = logging.getLogger(__name__)
from dotenv import load_dotenv
load_dotenv()
# Type definitions
F = TypeVar('F', bound=Callable[..., Any])
# Check if we're in debug mode
def _is_debug_mode() -> bool:
"""Check if we're in debug mode based on environment variables or logging level."""
lmnr_debug_mode = os.getenv('LMNR_LOGGING_LEVEL', '').lower()
if lmnr_debug_mode == 'debug':
# logger.info('Debug mode is enabled for observability')
return True
# logger.info('Debug mode is disabled for observability')
return False
# Try to import lmnr observe
_LMNR_AVAILABLE = False
_lmnr_observe = None
try:
from lmnr import observe as _lmnr_observe # type: ignore
if os.environ.get('BROWSER_USE_VERBOSE_OBSERVABILITY', 'false').lower() == 'true':
logger.debug('Lmnr is available for observability')
_LMNR_AVAILABLE = True
except ImportError:
if os.environ.get('BROWSER_USE_VERBOSE_OBSERVABILITY', 'false').lower() == 'true':
logger.debug('Lmnr is not available for observability')
_LMNR_AVAILABLE = False
def _create_no_op_decorator(
name: str | None = None,
ignore_input: bool = False,
ignore_output: bool = False,
metadata: dict[str, Any] | None = None,
**kwargs: Any,
) -> Callable[[F], F]:
"""Create a no-op decorator that accepts all lmnr observe parameters but does nothing."""
import asyncio
def decorator(func: F) -> F:
if asyncio.iscoroutinefunction(func):
@wraps(func)
async def async_wrapper(*args, **kwargs):
return await func(*args, **kwargs)
return cast(F, async_wrapper)
else:
@wraps(func)
def sync_wrapper(*args, **kwargs):
return func(*args, **kwargs)
return cast(F, sync_wrapper)
return decorator
def observe(
name: str | None = None,
ignore_input: bool = False,
ignore_output: bool = False,
metadata: dict[str, Any] | None = None,
span_type: Literal['DEFAULT', 'LLM', 'TOOL'] = 'DEFAULT',
**kwargs: Any,
) -> Callable[[F], F]:
"""
Observability decorator that traces function execution when lmnr is available.
This decorator will use lmnr's observe decorator if lmnr is installed,
otherwise it will be a no-op that accepts the same parameters.
Args:
name: Name of the span/trace
ignore_input: Whether to ignore function input parameters in tracing
ignore_output: Whether to ignore function output in tracing
metadata: Additional metadata to attach to the span
**kwargs: Additional parameters passed to lmnr observe
Returns:
Decorated function that may be traced depending on lmnr availability
Example:
@observe(name="my_function", metadata={"version": "1.0"})
def my_function(param1, param2):
return param1 + param2
"""
kwargs = {
'name': name,
'ignore_input': ignore_input,
'ignore_output': ignore_output,
'metadata': metadata,
'span_type': span_type,
'tags': ['observe', 'observe_debug'], # important: tags need to be created on laminar first
**kwargs,
}
if _LMNR_AVAILABLE and _lmnr_observe:
# Use the real lmnr observe decorator
return cast(Callable[[F], F], _lmnr_observe(**kwargs))
else:
# Use no-op decorator
return _create_no_op_decorator(**kwargs)
def observe_debug(
name: str | None = None,
ignore_input: bool = False,
ignore_output: bool = False,
metadata: dict[str, Any] | None = None,
span_type: Literal['DEFAULT', 'LLM', 'TOOL'] = 'DEFAULT',
**kwargs: Any,
) -> Callable[[F], F]:
"""
Debug-only observability decorator that only traces when in debug mode.
This decorator will use lmnr's observe decorator if both lmnr is installed
AND we're in debug mode, otherwise it will be a no-op.
Debug mode is determined by:
- DEBUG environment variable set to 1/true/yes/on
- BROWSER_USE_DEBUG environment variable set to 1/true/yes/on
- Root logging level set to DEBUG or lower
Args:
name: Name of the span/trace
ignore_input: Whether to ignore function input parameters in tracing
ignore_output: Whether to ignore function output in tracing
metadata: Additional metadata to attach to the span
**kwargs: Additional parameters passed to lmnr observe
Returns:
Decorated function that may be traced only in debug mode
Example:
@observe_debug(ignore_input=True, ignore_output=True,name="debug_function", metadata={"debug": True})
def debug_function(param1, param2):
return param1 + param2
"""
kwargs = {
'name': name,
'ignore_input': ignore_input,
'ignore_output': ignore_output,
'metadata': metadata,
'span_type': span_type,
'tags': ['observe_debug'], # important: tags need to be created on laminar first
**kwargs,
}
if _LMNR_AVAILABLE and _lmnr_observe and _is_debug_mode():
# Use the real lmnr observe decorator only in debug mode
return cast(Callable[[F], F], _lmnr_observe(**kwargs))
else:
# Use no-op decorator (either not in debug mode or lmnr not available)
return _create_no_op_decorator(**kwargs)
# Convenience functions for checking availability and debug status
def is_lmnr_available() -> bool:
"""Check if lmnr is available for tracing."""
return _LMNR_AVAILABLE
def is_debug_mode() -> bool:
"""Check if we're currently in debug mode."""
return _is_debug_mode()
def get_observability_status() -> dict[str, bool]:
"""Get the current status of observability features."""
return {
'lmnr_available': _LMNR_AVAILABLE,
'debug_mode': _is_debug_mode(),
'observe_active': _LMNR_AVAILABLE,
'observe_debug_active': _LMNR_AVAILABLE and _is_debug_mode(),
}
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/observability.py",
"license": "MIT License",
"lines": 162,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/screenshots/service.py | """
Screenshot storage service for browser-use agents.
"""
import base64
from pathlib import Path
import anyio
from browser_use.observability import observe_debug
class ScreenshotService:
"""Simple screenshot storage service that saves screenshots to disk"""
def __init__(self, agent_directory: str | Path):
"""Initialize with agent directory path"""
self.agent_directory = Path(agent_directory) if isinstance(agent_directory, str) else agent_directory
# Create screenshots subdirectory
self.screenshots_dir = self.agent_directory / 'screenshots'
self.screenshots_dir.mkdir(parents=True, exist_ok=True)
@observe_debug(ignore_input=True, ignore_output=True, name='store_screenshot')
async def store_screenshot(self, screenshot_b64: str, step_number: int) -> str:
"""Store screenshot to disk and return the full path as string"""
screenshot_filename = f'step_{step_number}.png'
screenshot_path = self.screenshots_dir / screenshot_filename
# Decode base64 and save to disk
screenshot_data = base64.b64decode(screenshot_b64)
async with await anyio.open_file(screenshot_path, 'wb') as f:
await f.write(screenshot_data)
return str(screenshot_path)
@observe_debug(ignore_input=True, ignore_output=True, name='get_screenshot_from_disk')
async def get_screenshot(self, screenshot_path: str) -> str | None:
"""Load screenshot from disk path and return as base64"""
if not screenshot_path:
return None
path = Path(screenshot_path)
if not path.exists():
return None
# Load from disk and encode to base64
async with await anyio.open_file(path, 'rb') as f:
screenshot_data = await f.read()
return base64.b64encode(screenshot_data).decode('utf-8')
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/screenshots/service.py",
"license": "MIT License",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:browser_use/sync/auth.py | """
OAuth2 Device Authorization Grant flow client for browser-use.
"""
import asyncio
import json
import os
import shutil
import time
from datetime import datetime
import httpx
from pydantic import BaseModel
from uuid_extensions import uuid7str
from browser_use.config import CONFIG
# Temporary user ID for pre-auth events (matches cloud backend)
TEMP_USER_ID = '99999999-9999-9999-9999-999999999999'
def get_or_create_device_id() -> str:
"""Get or create a persistent device ID for this installation."""
device_id_path = CONFIG.BROWSER_USE_CONFIG_DIR / 'device_id'
# Try to read existing device ID
if device_id_path.exists():
try:
device_id = device_id_path.read_text().strip()
if device_id: # Make sure it's not empty
return device_id
except Exception:
# If we can't read it, we'll create a new one
pass
# Create new device ID
device_id = uuid7str()
# Ensure config directory exists
CONFIG.BROWSER_USE_CONFIG_DIR.mkdir(parents=True, exist_ok=True)
# Write device ID to file
device_id_path.write_text(device_id)
return device_id
class CloudAuthConfig(BaseModel):
"""Configuration for cloud authentication"""
api_token: str | None = None
user_id: str | None = None
authorized_at: datetime | None = None
@classmethod
def load_from_file(cls) -> 'CloudAuthConfig':
"""Load auth config from local file"""
config_path = CONFIG.BROWSER_USE_CONFIG_DIR / 'cloud_auth.json'
if config_path.exists():
try:
with open(config_path) as f:
data = json.load(f)
return cls.model_validate(data)
except Exception:
# Return empty config if file is corrupted
pass
return cls()
def save_to_file(self) -> None:
"""Save auth config to local file"""
CONFIG.BROWSER_USE_CONFIG_DIR.mkdir(parents=True, exist_ok=True)
config_path = CONFIG.BROWSER_USE_CONFIG_DIR / 'cloud_auth.json'
with open(config_path, 'w') as f:
json.dump(self.model_dump(mode='json'), f, indent=2, default=str)
# Set restrictive permissions (owner read/write only) for security
try:
os.chmod(config_path, 0o600)
except Exception:
# Some systems may not support chmod, continue anyway
pass
class DeviceAuthClient:
"""Client for OAuth2 device authorization flow"""
def __init__(self, base_url: str | None = None, http_client: httpx.AsyncClient | None = None):
# Backend API URL for OAuth requests - can be passed directly or defaults to env var
self.base_url = base_url or CONFIG.BROWSER_USE_CLOUD_API_URL
self.client_id = 'library'
self.scope = 'read write'
# If no client provided, we'll create one per request
self.http_client = http_client
# Temporary user ID for pre-auth events
self.temp_user_id = TEMP_USER_ID
# Get or create persistent device ID
self.device_id = get_or_create_device_id()
# Load existing auth if available
self.auth_config = CloudAuthConfig.load_from_file()
@property
def is_authenticated(self) -> bool:
"""Check if we have valid authentication"""
return bool(self.auth_config.api_token and self.auth_config.user_id)
@property
def api_token(self) -> str | None:
"""Get the current API token"""
return self.auth_config.api_token
@property
def user_id(self) -> str:
"""Get the current user ID (temporary or real)"""
return self.auth_config.user_id or self.temp_user_id
async def start_device_authorization(
self,
agent_session_id: str | None = None,
) -> dict:
"""
Start the device authorization flow.
Returns device authorization details including user code and verification URL.
"""
if self.http_client:
response = await self.http_client.post(
f'{self.base_url.rstrip("/")}/api/v1/oauth/device/authorize',
data={
'client_id': self.client_id,
'scope': self.scope,
'agent_session_id': agent_session_id or '',
'device_id': self.device_id,
},
)
response.raise_for_status()
return response.json()
else:
async with httpx.AsyncClient() as client:
response = await client.post(
f'{self.base_url.rstrip("/")}/api/v1/oauth/device/authorize',
data={
'client_id': self.client_id,
'scope': self.scope,
'agent_session_id': agent_session_id or '',
'device_id': self.device_id,
},
)
response.raise_for_status()
return response.json()
async def poll_for_token(
self,
device_code: str,
interval: float = 3.0,
timeout: float = 1800.0,
) -> dict | None:
"""
Poll for the access token.
Returns token info when authorized, None if timeout.
"""
start_time = time.time()
if self.http_client:
# Use injected client for all requests
while time.time() - start_time < timeout:
try:
response = await self.http_client.post(
f'{self.base_url.rstrip("/")}/api/v1/oauth/device/token',
data={
'grant_type': 'urn:ietf:params:oauth:grant-type:device_code',
'device_code': device_code,
'client_id': self.client_id,
},
)
if response.status_code == 200:
data = response.json()
# Check for pending authorization
if data.get('error') == 'authorization_pending':
await asyncio.sleep(interval)
continue
# Check for slow down
if data.get('error') == 'slow_down':
interval = data.get('interval', interval * 2)
await asyncio.sleep(interval)
continue
# Check for other errors
if 'error' in data:
print(f'Error: {data.get("error_description", data["error"])}')
return None
# Success! We have a token
if 'access_token' in data:
return data
elif response.status_code == 400:
# Error response
data = response.json()
if data.get('error') not in ['authorization_pending', 'slow_down']:
print(f'Error: {data.get("error_description", "Unknown error")}')
return None
else:
print(f'Unexpected status code: {response.status_code}')
return None
except Exception as e:
print(f'Error polling for token: {e}')
await asyncio.sleep(interval)
else:
# Create a new client for polling
async with httpx.AsyncClient() as client:
while time.time() - start_time < timeout:
try:
response = await client.post(
f'{self.base_url.rstrip("/")}/api/v1/oauth/device/token',
data={
'grant_type': 'urn:ietf:params:oauth:grant-type:device_code',
'device_code': device_code,
'client_id': self.client_id,
},
)
if response.status_code == 200:
data = response.json()
# Check for pending authorization
if data.get('error') == 'authorization_pending':
await asyncio.sleep(interval)
continue
# Check for slow down
if data.get('error') == 'slow_down':
interval = data.get('interval', interval * 2)
await asyncio.sleep(interval)
continue
# Check for other errors
if 'error' in data:
print(f'Error: {data.get("error_description", data["error"])}')
return None
# Success! We have a token
if 'access_token' in data:
return data
elif response.status_code == 400:
# Error response
data = response.json()
if data.get('error') not in ['authorization_pending', 'slow_down']:
print(f'Error: {data.get("error_description", "Unknown error")}')
return None
else:
print(f'Unexpected status code: {response.status_code}')
return None
except Exception as e:
print(f'Error polling for token: {e}')
await asyncio.sleep(interval)
return None
async def authenticate(
self,
agent_session_id: str | None = None,
show_instructions: bool = True,
) -> bool:
"""
Run the full authentication flow.
Returns True if authentication successful.
"""
import logging
logger = logging.getLogger(__name__)
try:
# Start device authorization
device_auth = await self.start_device_authorization(agent_session_id)
# Use frontend URL for user-facing links
frontend_url = CONFIG.BROWSER_USE_CLOUD_UI_URL or self.base_url.replace('//api.', '//cloud.')
# Replace backend URL with frontend URL in verification URIs
verification_uri = device_auth['verification_uri'].replace(self.base_url, frontend_url)
verification_uri_complete = device_auth['verification_uri_complete'].replace(self.base_url, frontend_url)
terminal_width, _terminal_height = shutil.get_terminal_size((80, 20))
if show_instructions and CONFIG.BROWSER_USE_CLOUD_SYNC:
logger.info('─' * max(terminal_width - 40, 20))
logger.info('🌐 View the details of this run in Browser Use Cloud:')
logger.info(f' 👉 {verification_uri_complete}')
logger.info('─' * max(terminal_width - 40, 20) + '\n')
# Poll for token
token_data = await self.poll_for_token(
device_code=device_auth['device_code'],
interval=device_auth.get('interval', 5),
)
if token_data and token_data.get('access_token'):
# Save authentication
self.auth_config.api_token = token_data['access_token']
self.auth_config.user_id = token_data.get('user_id', self.temp_user_id)
self.auth_config.authorized_at = datetime.now()
self.auth_config.save_to_file()
if show_instructions:
logger.debug('✅ Authentication successful! Cloud sync is now enabled with your browser-use account.')
return True
except httpx.HTTPStatusError as e:
# HTTP error with response
if e.response.status_code == 404:
logger.warning(
'Cloud sync authentication endpoint not found (404). Check your BROWSER_USE_CLOUD_API_URL setting.'
)
else:
logger.warning(f'Failed to authenticate with cloud service: HTTP {e.response.status_code} - {e.response.text}')
except httpx.RequestError as e:
# Connection/network errors
# logger.warning(f'Failed to connect to cloud service: {type(e).__name__}: {e}')
pass
except Exception as e:
# Other unexpected errors
logger.warning(f'❌ Unexpected error during cloud sync authentication: {type(e).__name__}: {e}')
if show_instructions:
logger.debug(f'❌ Sync authentication failed or timed out with {CONFIG.BROWSER_USE_CLOUD_API_URL}')
return False
def get_headers(self) -> dict:
"""Get headers for API requests"""
if self.api_token:
return {'Authorization': f'Bearer {self.api_token}'}
return {}
def clear_auth(self) -> None:
"""Clear stored authentication"""
self.auth_config = CloudAuthConfig()
# Remove the config file entirely instead of saving empty values
config_path = CONFIG.BROWSER_USE_CONFIG_DIR / 'cloud_auth.json'
config_path.unlink(missing_ok=True)
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/sync/auth.py",
"license": "MIT License",
"lines": 289,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/sync/service.py | """
Cloud sync service for sending events to the Browser Use cloud.
"""
import logging
import httpx
from bubus import BaseEvent
from browser_use.config import CONFIG
from browser_use.sync.auth import TEMP_USER_ID, DeviceAuthClient
logger = logging.getLogger(__name__)
class CloudSync:
"""Service for syncing events to the Browser Use cloud"""
def __init__(self, base_url: str | None = None, allow_session_events_for_auth: bool = False):
# Backend API URL for all API requests - can be passed directly or defaults to env var
self.base_url = base_url or CONFIG.BROWSER_USE_CLOUD_API_URL
self.auth_client = DeviceAuthClient(base_url=self.base_url)
self.session_id: str | None = None
self.allow_session_events_for_auth = allow_session_events_for_auth
self.auth_flow_active = False # Flag to indicate auth flow is running
# Check if cloud sync is actually enabled - if not, we should remain silent
self.enabled = CONFIG.BROWSER_USE_CLOUD_SYNC
async def handle_event(self, event: BaseEvent) -> None:
"""Handle an event by sending it to the cloud"""
try:
# If cloud sync is disabled, don't handle any events
if not self.enabled:
return
# Extract session ID from CreateAgentSessionEvent
if event.event_type == 'CreateAgentSessionEvent' and hasattr(event, 'id'):
self.session_id = str(event.id) # type: ignore
# Send events based on authentication status and context
if self.auth_client.is_authenticated:
# User is authenticated - send all events
await self._send_event(event)
elif self.allow_session_events_for_auth:
# Special case: allow ALL events during auth flow
await self._send_event(event)
# Mark auth flow as active when we see a session event
if event.event_type == 'CreateAgentSessionEvent':
self.auth_flow_active = True
else:
# User is not authenticated and no auth in progress - don't send anything
logger.debug(f'Skipping event {event.event_type} - user not authenticated')
except Exception as e:
logger.error(f'Failed to handle {event.event_type} event: {type(e).__name__}: {e}', exc_info=True)
async def _send_event(self, event: BaseEvent) -> None:
"""Send event to cloud API"""
try:
headers = {}
# Override user_id only if it's not already set to a specific value
# This allows CLI and other code to explicitly set temp user_id when needed
if self.auth_client and self.auth_client.is_authenticated:
# Only override if we're fully authenticated and event doesn't have temp user_id
current_user_id = getattr(event, 'user_id', None)
if current_user_id != TEMP_USER_ID:
setattr(event, 'user_id', str(self.auth_client.user_id))
else:
# Set temp user_id if not already set
if not hasattr(event, 'user_id') or not getattr(event, 'user_id', None):
setattr(event, 'user_id', TEMP_USER_ID)
# Add auth headers if available
if self.auth_client:
headers.update(self.auth_client.get_headers())
# Send event (batch format with direct BaseEvent serialization)
async with httpx.AsyncClient() as client:
# Serialize event and add device_id to all events
event_data = event.model_dump(mode='json')
if self.auth_client and self.auth_client.device_id:
event_data['device_id'] = self.auth_client.device_id
response = await client.post(
f'{self.base_url.rstrip("/")}/api/v1/events',
json={'events': [event_data]},
headers=headers,
timeout=10.0,
)
if response.status_code >= 400:
# Log error but don't raise - we want to fail silently
logger.debug(
f'Failed to send sync event: POST {response.request.url} {response.status_code} - {response.text}'
)
except httpx.TimeoutException:
logger.debug(f'Event send timed out after 10 seconds: {event}')
except httpx.ConnectError as e:
# logger.warning(f'⚠️ Failed to connect to cloud service at {self.base_url}: {e}')
pass
except httpx.HTTPError as e:
logger.debug(f'HTTP error sending event {event}: {type(e).__name__}: {e}')
except Exception as e:
logger.debug(f'Unexpected error sending event {event}: {type(e).__name__}: {e}')
# async def _update_wal_user_ids(self, session_id: str) -> None:
# """Update user IDs in WAL file after authentication"""
# try:
# assert self.auth_client, 'Cloud sync must be authenticated to update WAL user ID'
# wal_path = CONFIG.BROWSER_USE_CONFIG_DIR / 'events' / f'{session_id}.jsonl'
# if not await anyio.Path(wal_path).exists():
# raise FileNotFoundError(
# f'CloudSync failed to update saved event user_ids after auth: Agent EventBus WAL file not found: {wal_path}'
# )
# # Read all events
# events = []
# content = await anyio.Path(wal_path).read_text()
# for line in content.splitlines():
# if line.strip():
# events.append(json.loads(line))
# # Update user_id and device_id
# user_id = self.auth_client.user_id
# device_id = self.auth_client.device_id
# for event in events:
# if 'user_id' in event:
# event['user_id'] = user_id
# # Add device_id to all events
# event['device_id'] = device_id
# # Write back
# updated_content = '\n'.join(json.dumps(event) for event in events) + '\n'
# await anyio.Path(wal_path).write_text(updated_content)
# except Exception as e:
# logger.warning(f'Failed to update WAL user IDs: {e}')
def set_auth_flow_active(self) -> None:
"""Mark auth flow as active to allow all events"""
self.auth_flow_active = True
async def authenticate(self, show_instructions: bool = True) -> bool:
"""Authenticate with the cloud service"""
# If cloud sync is disabled, don't authenticate
if not self.enabled:
return False
# Check if already authenticated first
if self.auth_client.is_authenticated:
import logging
logger = logging.getLogger(__name__)
if show_instructions:
logger.info('✅ Already authenticated! Skipping OAuth flow.')
return True
# Not authenticated - run OAuth flow
return await self.auth_client.authenticate(agent_session_id=self.session_id, show_instructions=show_instructions)
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/sync/service.py",
"license": "MIT License",
"lines": 133,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/tokens/service.py | """
Token cost service that tracks LLM token usage and costs.
Fetches pricing data from LiteLLM repository and caches it for 1 day.
Automatically tracks token usage when LLMs are registered and invoked.
"""
import logging
import os
from datetime import datetime, timedelta
from pathlib import Path
from typing import Any
import anyio
import httpx
from dotenv import load_dotenv
from browser_use.llm.base import BaseChatModel
from browser_use.llm.views import ChatInvokeUsage
from browser_use.tokens.custom_pricing import CUSTOM_MODEL_PRICING
from browser_use.tokens.mappings import MODEL_TO_LITELLM
from browser_use.tokens.views import (
CachedPricingData,
ModelPricing,
ModelUsageStats,
ModelUsageTokens,
TokenCostCalculated,
TokenUsageEntry,
UsageSummary,
)
from browser_use.utils import create_task_with_error_handling
load_dotenv()
from browser_use.config import CONFIG
logger = logging.getLogger(__name__)
cost_logger = logging.getLogger('cost')
def xdg_cache_home() -> Path:
default = Path.home() / '.cache'
if CONFIG.XDG_CACHE_HOME and (path := Path(CONFIG.XDG_CACHE_HOME)).is_absolute():
return path
return default
class TokenCost:
"""Service for tracking token usage and calculating costs"""
CACHE_DIR_NAME = 'browser_use/token_cost'
CACHE_DURATION = timedelta(days=1)
PRICING_URL = 'https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json'
def __init__(self, include_cost: bool = False):
self.include_cost = include_cost or os.getenv('BROWSER_USE_CALCULATE_COST', 'false').lower() == 'true'
self.usage_history: list[TokenUsageEntry] = []
self.registered_llms: dict[str, BaseChatModel] = {}
self._pricing_data: dict[str, Any] | None = None
self._initialized = False
self._cache_dir = xdg_cache_home() / self.CACHE_DIR_NAME
async def initialize(self) -> None:
"""Initialize the service by loading pricing data"""
if not self._initialized:
if self.include_cost:
await self._load_pricing_data()
self._initialized = True
async def _load_pricing_data(self) -> None:
"""Load pricing data from cache or fetch from GitHub"""
# Try to find a valid cache file
cache_file = await self._find_valid_cache()
if cache_file:
await self._load_from_cache(cache_file)
else:
await self._fetch_and_cache_pricing_data()
async def _find_valid_cache(self) -> Path | None:
"""Find the most recent valid cache file"""
try:
# Ensure cache directory exists
self._cache_dir.mkdir(parents=True, exist_ok=True)
# List all JSON files in the cache directory
cache_files = list(self._cache_dir.glob('*.json'))
if not cache_files:
return None
# Sort by modification time (most recent first)
cache_files.sort(key=lambda f: f.stat().st_mtime, reverse=True)
# Check each file until we find a valid one
for cache_file in cache_files:
if await self._is_cache_valid(cache_file):
return cache_file
else:
# Clean up old cache files
try:
os.remove(cache_file)
except Exception:
pass
return None
except Exception:
return None
async def _is_cache_valid(self, cache_file: Path) -> bool:
"""Check if a specific cache file is valid and not expired"""
try:
if not cache_file.exists():
return False
# Read the cached data
cached = CachedPricingData.model_validate_json(await anyio.Path(cache_file).read_text())
# Check if cache is still valid
return datetime.now() - cached.timestamp < self.CACHE_DURATION
except Exception:
return False
async def _load_from_cache(self, cache_file: Path) -> None:
"""Load pricing data from a specific cache file"""
try:
content = await anyio.Path(cache_file).read_text()
cached = CachedPricingData.model_validate_json(content)
self._pricing_data = cached.data
except Exception as e:
logger.debug(f'Error loading cached pricing data from {cache_file}: {e}')
# Fall back to fetching
await self._fetch_and_cache_pricing_data()
async def _fetch_and_cache_pricing_data(self) -> None:
"""Fetch pricing data from LiteLLM GitHub and cache it with timestamp"""
try:
async with httpx.AsyncClient() as client:
response = await client.get(self.PRICING_URL, timeout=30)
response.raise_for_status()
self._pricing_data = response.json()
# Create cache object with timestamp
cached = CachedPricingData(timestamp=datetime.now(), data=self._pricing_data or {})
# Ensure cache directory exists
self._cache_dir.mkdir(parents=True, exist_ok=True)
# Create cache file with timestamp in filename
timestamp_str = datetime.now().strftime('%Y%m%d_%H%M%S')
cache_file = self._cache_dir / f'pricing_{timestamp_str}.json'
await anyio.Path(cache_file).write_text(cached.model_dump_json(indent=2))
except Exception as e:
logger.debug(f'Error fetching pricing data: {e}')
# Fall back to empty pricing data
self._pricing_data = {}
async def get_model_pricing(self, model_name: str) -> ModelPricing | None:
"""Get pricing information for a specific model"""
# Ensure we're initialized
if not self._initialized:
await self.initialize()
# Check custom pricing first
if model_name in CUSTOM_MODEL_PRICING:
data = CUSTOM_MODEL_PRICING[model_name]
return ModelPricing(
model=model_name,
input_cost_per_token=data.get('input_cost_per_token'),
output_cost_per_token=data.get('output_cost_per_token'),
max_tokens=data.get('max_tokens'),
max_input_tokens=data.get('max_input_tokens'),
max_output_tokens=data.get('max_output_tokens'),
cache_read_input_token_cost=data.get('cache_read_input_token_cost'),
cache_creation_input_token_cost=data.get('cache_creation_input_token_cost'),
)
# Map model name to LiteLLM model name if needed
litellm_model_name = MODEL_TO_LITELLM.get(model_name, model_name)
if not self._pricing_data or litellm_model_name not in self._pricing_data:
return None
data = self._pricing_data[litellm_model_name]
return ModelPricing(
model=model_name,
input_cost_per_token=data.get('input_cost_per_token'),
output_cost_per_token=data.get('output_cost_per_token'),
max_tokens=data.get('max_tokens'),
max_input_tokens=data.get('max_input_tokens'),
max_output_tokens=data.get('max_output_tokens'),
cache_read_input_token_cost=data.get('cache_read_input_token_cost'),
cache_creation_input_token_cost=data.get('cache_creation_input_token_cost'),
)
async def calculate_cost(self, model: str, usage: ChatInvokeUsage) -> TokenCostCalculated | None:
if not self.include_cost:
return None
data = await self.get_model_pricing(model)
if data is None:
return None
uncached_prompt_tokens = usage.prompt_tokens - (usage.prompt_cached_tokens or 0)
return TokenCostCalculated(
new_prompt_tokens=usage.prompt_tokens,
new_prompt_cost=uncached_prompt_tokens * (data.input_cost_per_token or 0),
# Cached tokens
prompt_read_cached_tokens=usage.prompt_cached_tokens,
prompt_read_cached_cost=usage.prompt_cached_tokens * data.cache_read_input_token_cost
if usage.prompt_cached_tokens and data.cache_read_input_token_cost
else None,
# Cache creation tokens
prompt_cached_creation_tokens=usage.prompt_cache_creation_tokens,
prompt_cache_creation_cost=usage.prompt_cache_creation_tokens * data.cache_creation_input_token_cost
if data.cache_creation_input_token_cost and usage.prompt_cache_creation_tokens
else None,
# Completion tokens
completion_tokens=usage.completion_tokens,
completion_cost=usage.completion_tokens * float(data.output_cost_per_token or 0),
)
def add_usage(self, model: str, usage: ChatInvokeUsage) -> TokenUsageEntry:
"""Add token usage entry to history (without calculating cost)"""
entry = TokenUsageEntry(
model=model,
timestamp=datetime.now(),
usage=usage,
)
self.usage_history.append(entry)
return entry
# async def _log_non_usage_llm(self, llm: BaseChatModel) -> None:
# """Log non-usage to the logger"""
# C_CYAN = '\033[96m'
# C_RESET = '\033[0m'
# cost_logger.debug(f'🧠 llm : {C_CYAN}{llm.model}{C_RESET} (no usage found)')
async def _log_usage(self, model: str, usage: TokenUsageEntry) -> None:
"""Log usage to the logger"""
if not self._initialized:
await self.initialize()
# ANSI color codes
C_CYAN = '\033[96m'
C_YELLOW = '\033[93m'
C_GREEN = '\033[92m'
C_BLUE = '\033[94m'
C_RESET = '\033[0m'
# Always get cost breakdown for token details (even if not showing costs)
cost = await self.calculate_cost(model, usage.usage)
# Build input tokens breakdown
input_part = self._build_input_tokens_display(usage.usage, cost)
# Build output tokens display
completion_tokens_fmt = self._format_tokens(usage.usage.completion_tokens)
if self.include_cost and cost and cost.completion_cost > 0:
output_part = f'📤 {C_GREEN}{completion_tokens_fmt} (${cost.completion_cost:.4f}){C_RESET}'
else:
output_part = f'📤 {C_GREEN}{completion_tokens_fmt}{C_RESET}'
cost_logger.debug(f'🧠 {C_CYAN}{model}{C_RESET} | {input_part} | {output_part}')
def _build_input_tokens_display(self, usage: ChatInvokeUsage, cost: TokenCostCalculated | None) -> str:
"""Build a clear display of input tokens breakdown with emojis and optional costs"""
C_YELLOW = '\033[93m'
C_BLUE = '\033[94m'
C_RESET = '\033[0m'
parts = []
# Always show token breakdown if we have cache information, regardless of cost tracking
if usage.prompt_cached_tokens or usage.prompt_cache_creation_tokens:
# Calculate actual new tokens (non-cached)
new_tokens = usage.prompt_tokens - (usage.prompt_cached_tokens or 0)
if new_tokens > 0:
new_tokens_fmt = self._format_tokens(new_tokens)
if self.include_cost and cost and cost.new_prompt_cost > 0:
parts.append(f'🆕 {C_YELLOW}{new_tokens_fmt} (${cost.new_prompt_cost:.4f}){C_RESET}')
else:
parts.append(f'🆕 {C_YELLOW}{new_tokens_fmt}{C_RESET}')
if usage.prompt_cached_tokens:
cached_tokens_fmt = self._format_tokens(usage.prompt_cached_tokens)
if self.include_cost and cost and cost.prompt_read_cached_cost:
parts.append(f'💾 {C_BLUE}{cached_tokens_fmt} (${cost.prompt_read_cached_cost:.4f}){C_RESET}')
else:
parts.append(f'💾 {C_BLUE}{cached_tokens_fmt}{C_RESET}')
if usage.prompt_cache_creation_tokens:
creation_tokens_fmt = self._format_tokens(usage.prompt_cache_creation_tokens)
if self.include_cost and cost and cost.prompt_cache_creation_cost:
parts.append(f'🔧 {C_BLUE}{creation_tokens_fmt} (${cost.prompt_cache_creation_cost:.4f}){C_RESET}')
else:
parts.append(f'🔧 {C_BLUE}{creation_tokens_fmt}{C_RESET}')
if not parts:
# Fallback to simple display when no cache information available
total_tokens_fmt = self._format_tokens(usage.prompt_tokens)
if self.include_cost and cost and cost.new_prompt_cost > 0:
parts.append(f'📥 {C_YELLOW}{total_tokens_fmt} (${cost.new_prompt_cost:.4f}){C_RESET}')
else:
parts.append(f'📥 {C_YELLOW}{total_tokens_fmt}{C_RESET}')
return ' + '.join(parts)
def register_llm(self, llm: BaseChatModel) -> BaseChatModel:
"""
Register an LLM to automatically track its token usage
@dev Guarantees that the same instance is not registered multiple times
"""
# Use instance ID as key to avoid collisions between multiple instances
instance_id = str(id(llm))
# Check if this exact instance is already registered
if instance_id in self.registered_llms:
logger.debug(f'LLM instance {instance_id} ({llm.provider}_{llm.model}) is already registered')
return llm
self.registered_llms[instance_id] = llm
# Store the original method
original_ainvoke = llm.ainvoke
# Store reference to self for use in the closure
token_cost_service = self
# Create a wrapped version that tracks usage
async def tracked_ainvoke(messages, output_format=None, **kwargs):
# Call the original method, passing through any additional kwargs
result = await original_ainvoke(messages, output_format, **kwargs)
# Track usage if available (no await needed since add_usage is now sync)
# Use llm.model instead of llm.name for consistency with get_usage_tokens_for_model()
if result.usage:
usage = token_cost_service.add_usage(llm.model, result.usage)
logger.debug(f'Token cost service: {usage}')
create_task_with_error_handling(
token_cost_service._log_usage(llm.model, usage), name='log_token_usage', suppress_exceptions=True
)
# else:
# await token_cost_service._log_non_usage_llm(llm)
return result
# Replace the method with our tracked version
# Using setattr to avoid type checking issues with overloaded methods
setattr(llm, 'ainvoke', tracked_ainvoke)
return llm
def get_usage_tokens_for_model(self, model: str) -> ModelUsageTokens:
"""Get usage tokens for a specific model"""
filtered_usage = [u for u in self.usage_history if u.model == model]
return ModelUsageTokens(
model=model,
prompt_tokens=sum(u.usage.prompt_tokens for u in filtered_usage),
prompt_cached_tokens=sum(u.usage.prompt_cached_tokens or 0 for u in filtered_usage),
completion_tokens=sum(u.usage.completion_tokens for u in filtered_usage),
total_tokens=sum(u.usage.prompt_tokens + u.usage.completion_tokens for u in filtered_usage),
)
async def get_usage_summary(self, model: str | None = None, since: datetime | None = None) -> UsageSummary:
"""Get summary of token usage and costs (costs calculated on-the-fly)"""
filtered_usage = self.usage_history
if model:
filtered_usage = [u for u in filtered_usage if u.model == model]
if since:
filtered_usage = [u for u in filtered_usage if u.timestamp >= since]
if not filtered_usage:
return UsageSummary(
total_prompt_tokens=0,
total_prompt_cost=0.0,
total_prompt_cached_tokens=0,
total_prompt_cached_cost=0.0,
total_completion_tokens=0,
total_completion_cost=0.0,
total_tokens=0,
total_cost=0.0,
entry_count=0,
)
# Calculate totals
total_prompt = sum(u.usage.prompt_tokens for u in filtered_usage)
total_completion = sum(u.usage.completion_tokens for u in filtered_usage)
total_tokens = total_prompt + total_completion
total_prompt_cached = sum(u.usage.prompt_cached_tokens or 0 for u in filtered_usage)
models = list({u.model for u in filtered_usage})
# Calculate per-model stats with record-by-record cost calculation
model_stats: dict[str, ModelUsageStats] = {}
total_prompt_cost = 0.0
total_completion_cost = 0.0
total_prompt_cached_cost = 0.0
for entry in filtered_usage:
if entry.model not in model_stats:
model_stats[entry.model] = ModelUsageStats(model=entry.model)
stats = model_stats[entry.model]
stats.prompt_tokens += entry.usage.prompt_tokens
stats.completion_tokens += entry.usage.completion_tokens
stats.total_tokens += entry.usage.prompt_tokens + entry.usage.completion_tokens
stats.invocations += 1
if self.include_cost:
# Calculate cost record by record using the updated calculate_cost function
cost = await self.calculate_cost(entry.model, entry.usage)
if cost:
stats.cost += cost.total_cost
total_prompt_cost += cost.prompt_cost
total_completion_cost += cost.completion_cost
total_prompt_cached_cost += cost.prompt_read_cached_cost or 0
# Calculate averages
for stats in model_stats.values():
if stats.invocations > 0:
stats.average_tokens_per_invocation = stats.total_tokens / stats.invocations
return UsageSummary(
total_prompt_tokens=total_prompt,
total_prompt_cost=total_prompt_cost,
total_prompt_cached_tokens=total_prompt_cached,
total_prompt_cached_cost=total_prompt_cached_cost,
total_completion_tokens=total_completion,
total_completion_cost=total_completion_cost,
total_tokens=total_tokens,
total_cost=total_prompt_cost + total_completion_cost + total_prompt_cached_cost,
entry_count=len(filtered_usage),
by_model=model_stats,
)
def _format_tokens(self, tokens: int) -> str:
"""Format token count with k suffix for thousands"""
if tokens >= 1000000000:
return f'{tokens / 1000000000:.1f}B'
if tokens >= 1000000:
return f'{tokens / 1000000:.1f}M'
if tokens >= 1000:
return f'{tokens / 1000:.1f}k'
return str(tokens)
async def log_usage_summary(self) -> None:
"""Log a comprehensive usage summary per model with colors and nice formatting"""
if not self.usage_history:
return
summary = await self.get_usage_summary()
if summary.entry_count == 0:
return
# ANSI color codes
C_CYAN = '\033[96m'
C_YELLOW = '\033[93m'
C_GREEN = '\033[92m'
C_BLUE = '\033[94m'
C_MAGENTA = '\033[95m'
C_RESET = '\033[0m'
C_BOLD = '\033[1m'
# Log overall summary
total_tokens_fmt = self._format_tokens(summary.total_tokens)
prompt_tokens_fmt = self._format_tokens(summary.total_prompt_tokens)
completion_tokens_fmt = self._format_tokens(summary.total_completion_tokens)
# Format cost breakdowns for input and output (only if cost tracking is enabled)
if self.include_cost and summary.total_cost > 0:
total_cost_part = f' (${C_MAGENTA}{summary.total_cost:.4f}{C_RESET})'
prompt_cost_part = f' (${summary.total_prompt_cost:.4f})'
completion_cost_part = f' (${summary.total_completion_cost:.4f})'
else:
total_cost_part = ''
prompt_cost_part = ''
completion_cost_part = ''
if len(summary.by_model) > 1:
cost_logger.debug(
f'💲 {C_BOLD}Total Usage Summary{C_RESET}: {C_BLUE}{total_tokens_fmt} tokens{C_RESET}{total_cost_part} | '
f'⬅️ {C_YELLOW}{prompt_tokens_fmt}{prompt_cost_part}{C_RESET} | ➡️ {C_GREEN}{completion_tokens_fmt}{completion_cost_part}{C_RESET}'
)
for model, stats in summary.by_model.items():
# Format tokens
model_total_fmt = self._format_tokens(stats.total_tokens)
model_prompt_fmt = self._format_tokens(stats.prompt_tokens)
model_completion_fmt = self._format_tokens(stats.completion_tokens)
avg_tokens_fmt = self._format_tokens(int(stats.average_tokens_per_invocation))
# Format cost display (only if cost tracking is enabled)
if self.include_cost:
# Calculate per-model costs on-the-fly
total_model_cost = 0.0
model_prompt_cost = 0.0
model_completion_cost = 0.0
# Calculate costs for this model
for entry in self.usage_history:
if entry.model == model:
cost = await self.calculate_cost(entry.model, entry.usage)
if cost:
model_prompt_cost += cost.prompt_cost
model_completion_cost += cost.completion_cost
total_model_cost = model_prompt_cost + model_completion_cost
if total_model_cost > 0:
cost_part = f' (${C_MAGENTA}{total_model_cost:.4f}{C_RESET})'
prompt_part = f'{C_YELLOW}{model_prompt_fmt} (${model_prompt_cost:.4f}){C_RESET}'
completion_part = f'{C_GREEN}{model_completion_fmt} (${model_completion_cost:.4f}){C_RESET}'
else:
cost_part = ''
prompt_part = f'{C_YELLOW}{model_prompt_fmt}{C_RESET}'
completion_part = f'{C_GREEN}{model_completion_fmt}{C_RESET}'
else:
cost_part = ''
prompt_part = f'{C_YELLOW}{model_prompt_fmt}{C_RESET}'
completion_part = f'{C_GREEN}{model_completion_fmt}{C_RESET}'
cost_logger.debug(
f' 🤖 {C_CYAN}{model}{C_RESET}: {C_BLUE}{model_total_fmt} tokens{C_RESET}{cost_part} | '
f'⬅️ {prompt_part} | ➡️ {completion_part} | '
f'📞 {stats.invocations} calls | 📈 {avg_tokens_fmt}/call'
)
async def get_cost_by_model(self) -> dict[str, ModelUsageStats]:
"""Get cost breakdown by model"""
summary = await self.get_usage_summary()
return summary.by_model
def clear_history(self) -> None:
"""Clear usage history"""
self.usage_history = []
async def refresh_pricing_data(self) -> None:
"""Force refresh of pricing data from GitHub"""
if self.include_cost:
await self._fetch_and_cache_pricing_data()
async def clean_old_caches(self, keep_count: int = 3) -> None:
"""Clean up old cache files, keeping only the most recent ones"""
try:
# List all JSON files in the cache directory
cache_files = list(self._cache_dir.glob('*.json'))
if len(cache_files) <= keep_count:
return
# Sort by modification time (oldest first)
cache_files.sort(key=lambda f: f.stat().st_mtime)
# Remove all but the most recent files
for cache_file in cache_files[:-keep_count]:
try:
os.remove(cache_file)
except Exception:
pass
except Exception as e:
logger.debug(f'Error cleaning old cache files: {e}')
async def ensure_pricing_loaded(self) -> None:
"""Ensure pricing data is loaded in the background. Call this after creating the service."""
if not self._initialized and self.include_cost:
# This will run in the background and won't block
await self.initialize()
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/tokens/service.py",
"license": "MIT License",
"lines": 473,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/tokens/tests/test_cost.py | """
Simple test for token cost tracking with real LLM calls.
Tests ChatOpenAI and ChatGoogle by iteratively generating countries.
"""
import asyncio
import logging
from browser_use.llm import ChatGoogle, ChatOpenAI
from browser_use.llm.messages import AssistantMessage, SystemMessage, UserMessage
from browser_use.tokens.service import TokenCost
# Optional OCI import
try:
from examples.models.oci_models import meta_llm
OCI_MODELS_AVAILABLE = True
except ImportError:
meta_llm = None
OCI_MODELS_AVAILABLE = False
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def get_oci_model_if_available():
"""Create OCI model for testing if credentials are available."""
if not OCI_MODELS_AVAILABLE:
return None
# Try to create OCI model with mock/test configuration
# These values should be replaced with real ones if testing with actual OCI
try:
# get any of the llm xai_llm or cohere_llm
return meta_llm
except Exception as e:
logger.info(f'OCI model not available for testing: {e}')
return None
async def test_iterative_country_generation():
"""Test token cost tracking with iterative country generation"""
# Initialize token cost service
tc = TokenCost(include_cost=True)
# System prompt that explains the iterative task
system_prompt = """You are a country name generator. When asked, you will provide exactly ONE country name and nothing else.
Each time you're asked to continue, provide the next country name that hasn't been mentioned yet.
Keep track of which countries you've already said and don't repeat them.
Only output the country name, no numbers, no punctuation, just the name."""
# Test with different models
models = []
models.append(ChatOpenAI(model='gpt-4.1')) # Commented out - requires OPENAI_API_KEY
models.append(ChatGoogle(model='gemini-2.0-flash-exp'))
# Add OCI model if available
oci_model = get_oci_model_if_available()
if oci_model:
models.append(oci_model)
print(f'✅ OCI model added to test: {oci_model.name}')
else:
print('ℹ️ OCI model not available (install with pip install browser-use[oci] and configure credentials)')
print('\n🌍 Iterative Country Generation Test')
print('=' * 80)
for llm in models:
print(f'\n📍 Testing {llm.model}')
print('-' * 60)
# Register the LLM for automatic tracking
tc.register_llm(llm)
# Initialize conversation
messages = [SystemMessage(content=system_prompt), UserMessage(content='Give me a country name')]
countries = []
# Generate 10 countries iteratively
for i in range(10):
# Call the LLM
result = await llm.ainvoke(messages)
country = result.completion.strip()
countries.append(country)
# Add the response to messages
messages.append(AssistantMessage(content=country))
# Add the next request (except for the last iteration)
if i < 9:
messages.append(UserMessage(content='Next country please'))
print(f' Country {i + 1}: {country}')
print(f'\n Generated countries: {", ".join(countries)}')
# Display cost summary
print('\n💰 Cost Summary')
print('=' * 80)
summary = await tc.get_usage_summary()
print(f'Total calls: {summary.entry_count}')
print(f'Total tokens: {summary.total_tokens:,}')
print(f'Total cost: ${summary.total_cost:.6f}')
expected_cost = 0
expected_invocations = 0
print('\n📊 Cost breakdown by model:')
for model, stats in summary.by_model.items():
expected_cost += stats.cost
expected_invocations += stats.invocations
print(f'\n{model}:')
print(f' Calls: {stats.invocations}')
print(f' Prompt tokens: {stats.prompt_tokens:,}')
print(f' Completion tokens: {stats.completion_tokens:,}')
print(f' Total tokens: {stats.total_tokens:,}')
print(f' Cost: ${stats.cost:.6f}')
print(f' Average tokens per call: {stats.average_tokens_per_invocation:.1f}')
assert summary.entry_count == expected_invocations, f'Expected {expected_invocations} invocations, got {summary.entry_count}'
assert abs(summary.total_cost - expected_cost) < 1e-6, (
f'Expected total cost ${expected_cost:.6f}, got ${summary.total_cost:.6f}'
)
if __name__ == '__main__':
# Run the test
asyncio.run(test_iterative_country_generation())
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/tokens/tests/test_cost.py",
"license": "MIT License",
"lines": 100,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:browser_use/tokens/views.py | from datetime import datetime
from typing import Any, TypeVar
from pydantic import BaseModel, Field
from browser_use.llm.views import ChatInvokeUsage
T = TypeVar('T', bound=BaseModel)
class TokenUsageEntry(BaseModel):
"""Single token usage entry"""
model: str
timestamp: datetime
usage: ChatInvokeUsage
class TokenCostCalculated(BaseModel):
"""Token cost"""
new_prompt_tokens: int
new_prompt_cost: float
prompt_read_cached_tokens: int | None
prompt_read_cached_cost: float | None
prompt_cached_creation_tokens: int | None
prompt_cache_creation_cost: float | None
"""Anthropic only: The cost of creating the cache."""
completion_tokens: int
completion_cost: float
@property
def prompt_cost(self) -> float:
return self.new_prompt_cost + (self.prompt_read_cached_cost or 0) + (self.prompt_cache_creation_cost or 0)
@property
def total_cost(self) -> float:
return (
self.new_prompt_cost
+ (self.prompt_read_cached_cost or 0)
+ (self.prompt_cache_creation_cost or 0)
+ self.completion_cost
)
class ModelPricing(BaseModel):
"""Pricing information for a model"""
model: str
input_cost_per_token: float | None
output_cost_per_token: float | None
cache_read_input_token_cost: float | None
cache_creation_input_token_cost: float | None
max_tokens: int | None
max_input_tokens: int | None
max_output_tokens: int | None
class CachedPricingData(BaseModel):
"""Cached pricing data with timestamp"""
timestamp: datetime
data: dict[str, Any]
class ModelUsageStats(BaseModel):
"""Usage statistics for a single model"""
model: str
prompt_tokens: int = 0
completion_tokens: int = 0
total_tokens: int = 0
cost: float = 0.0
invocations: int = 0
average_tokens_per_invocation: float = 0.0
class ModelUsageTokens(BaseModel):
"""Usage tokens for a single model"""
model: str
prompt_tokens: int
prompt_cached_tokens: int
completion_tokens: int
total_tokens: int
class UsageSummary(BaseModel):
"""Summary of token usage and costs"""
total_prompt_tokens: int
total_prompt_cost: float
total_prompt_cached_tokens: int
total_prompt_cached_cost: float
total_completion_tokens: int
total_completion_cost: float
total_tokens: int
total_cost: float
entry_count: int
by_model: dict[str, ModelUsageStats] = Field(default_factory=dict)
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/tokens/views.py",
"license": "MIT License",
"lines": 74,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:browser_use/tools/registry/service.py | import asyncio
import functools
import inspect
import logging
import re
from collections.abc import Callable
from inspect import Parameter, iscoroutinefunction, signature
from types import UnionType
from typing import Any, Generic, Optional, TypeVar, Union, get_args, get_origin
import pyotp
from pydantic import BaseModel, Field, RootModel, create_model
from browser_use.browser import BrowserSession
from browser_use.filesystem.file_system import FileSystem
from browser_use.llm.base import BaseChatModel
from browser_use.observability import observe_debug
from browser_use.telemetry.service import ProductTelemetry
from browser_use.tools.registry.views import (
ActionModel,
ActionRegistry,
RegisteredAction,
SpecialActionParameters,
)
from browser_use.utils import is_new_tab_page, match_url_with_domain_pattern, time_execution_async
Context = TypeVar('Context')
logger = logging.getLogger(__name__)
class Registry(Generic[Context]):
"""Service for registering and managing actions"""
def __init__(self, exclude_actions: list[str] | None = None):
self.registry = ActionRegistry()
self.telemetry = ProductTelemetry()
# Create a new list to avoid mutable default argument issues
self.exclude_actions = list(exclude_actions) if exclude_actions is not None else []
def exclude_action(self, action_name: str) -> None:
"""Exclude an action from the registry after initialization.
If the action is already registered, it will be removed from the registry.
The action is also added to the exclude_actions list to prevent re-registration.
"""
# Add to exclude list to prevent future registration
if action_name not in self.exclude_actions:
self.exclude_actions.append(action_name)
# Remove from registry if already registered
if action_name in self.registry.actions:
del self.registry.actions[action_name]
logger.debug(f'Excluded action "{action_name}" from registry')
def _get_special_param_types(self) -> dict[str, type | UnionType | None]:
"""Get the expected types for special parameters from SpecialActionParameters"""
# Manually define the expected types to avoid issues with Optional handling.
# we should try to reduce this list to 0 if possible, give as few standardized objects to all the actions
# but each driver should decide what is relevant to expose the action methods,
# e.g. CDP client, 2fa code getters, sensitive_data wrappers, other context, etc.
return {
'context': None, # Context is a TypeVar, so we can't validate type
'browser_session': BrowserSession,
'page_url': str,
'cdp_client': None, # CDPClient type from cdp_use, but we don't import it here
'page_extraction_llm': BaseChatModel,
'available_file_paths': list,
'has_sensitive_data': bool,
'file_system': FileSystem,
'extraction_schema': None, # dict | None, skip type validation
}
def _normalize_action_function_signature(
self,
func: Callable,
description: str,
param_model: type[BaseModel] | None = None,
) -> tuple[Callable, type[BaseModel]]:
"""
Normalize action function to accept only kwargs.
Returns:
- Normalized function that accepts (*_, params: ParamModel, **special_params)
- The param model to use for registration
"""
sig = signature(func)
parameters = list(sig.parameters.values())
special_param_types = self._get_special_param_types()
special_param_names = set(special_param_types.keys())
# Step 1: Validate no **kwargs in original function signature
# if it needs default values it must use a dedicated param_model: BaseModel instead
for param in parameters:
if param.kind == Parameter.VAR_KEYWORD:
raise ValueError(
f"Action '{func.__name__}' has **{param.name} which is not allowed. "
f'Actions must have explicit positional parameters only.'
)
# Step 2: Separate special and action parameters
action_params = []
special_params = []
param_model_provided = param_model is not None
for i, param in enumerate(parameters):
# Check if this is a Type 1 pattern (first param is BaseModel)
if i == 0 and param_model_provided and param.name not in special_param_names:
# This is Type 1 pattern - skip the params argument
continue
if param.name in special_param_names:
# Validate special parameter type
expected_type = special_param_types.get(param.name)
if param.annotation != Parameter.empty and expected_type is not None:
# Handle Optional types - normalize both sides
param_type = param.annotation
origin = get_origin(param_type)
if origin is Union:
args = get_args(param_type)
# Find non-None type
param_type = next((arg for arg in args if arg is not type(None)), param_type)
# Check if types are compatible (exact match, subclass, or generic list)
types_compatible = (
param_type == expected_type
or (
inspect.isclass(param_type)
and inspect.isclass(expected_type)
and issubclass(param_type, expected_type)
)
or
# Handle list[T] vs list comparison
(expected_type is list and (param_type is list or get_origin(param_type) is list))
)
if not types_compatible:
expected_type_name = getattr(expected_type, '__name__', str(expected_type))
param_type_name = getattr(param_type, '__name__', str(param_type))
raise ValueError(
f"Action '{func.__name__}' parameter '{param.name}: {param_type_name}' "
f"conflicts with special argument injected by tools: '{param.name}: {expected_type_name}'"
)
special_params.append(param)
else:
action_params.append(param)
# Step 3: Create or validate param model
if not param_model_provided:
# Type 2: Generate param model from action params
if action_params:
params_dict = {}
for param in action_params:
annotation = param.annotation if param.annotation != Parameter.empty else str
default = ... if param.default == Parameter.empty else param.default
params_dict[param.name] = (annotation, default)
param_model = create_model(f'{func.__name__}_Params', __base__=ActionModel, **params_dict)
else:
# No action params, create empty model
param_model = create_model(
f'{func.__name__}_Params',
__base__=ActionModel,
)
assert param_model is not None, f'param_model is None for {func.__name__}'
# Step 4: Create normalized wrapper function
@functools.wraps(func)
async def normalized_wrapper(*args, params: BaseModel | None = None, **kwargs):
"""Normalized action that only accepts kwargs"""
# Validate no positional args
if args:
raise TypeError(f'{func.__name__}() does not accept positional arguments, only keyword arguments are allowed')
# Prepare arguments for original function
call_args = []
call_kwargs = {}
# Handle Type 1 pattern (first arg is the param model)
if param_model_provided and parameters and parameters[0].name not in special_param_names:
if params is None:
raise ValueError(f"{func.__name__}() missing required 'params' argument")
# For Type 1, we'll use the params object as first argument
pass
else:
# Type 2 pattern - need to unpack params
# If params is None, try to create it from kwargs
if params is None and action_params:
# Extract action params from kwargs
action_kwargs = {}
for param in action_params:
if param.name in kwargs:
action_kwargs[param.name] = kwargs[param.name]
if action_kwargs:
# Use the param_model which has the correct types defined
params = param_model(**action_kwargs)
# Build call_args by iterating through original function parameters in order
params_dict = params.model_dump() if params is not None else {}
for i, param in enumerate(parameters):
# Skip first param for Type 1 pattern (it's the model itself)
if param_model_provided and i == 0 and param.name not in special_param_names:
call_args.append(params)
elif param.name in special_param_names:
# This is a special parameter
if param.name in kwargs:
value = kwargs[param.name]
# Check if required special param is None
if value is None and param.default == Parameter.empty:
if param.name == 'browser_session':
raise ValueError(f'Action {func.__name__} requires browser_session but none provided.')
elif param.name == 'page_extraction_llm':
raise ValueError(f'Action {func.__name__} requires page_extraction_llm but none provided.')
elif param.name == 'file_system':
raise ValueError(f'Action {func.__name__} requires file_system but none provided.')
elif param.name == 'page':
raise ValueError(f'Action {func.__name__} requires page but none provided.')
elif param.name == 'available_file_paths':
raise ValueError(f'Action {func.__name__} requires available_file_paths but none provided.')
elif param.name == 'file_system':
raise ValueError(f'Action {func.__name__} requires file_system but none provided.')
else:
raise ValueError(f"{func.__name__}() missing required special parameter '{param.name}'")
call_args.append(value)
elif param.default != Parameter.empty:
call_args.append(param.default)
else:
# Special param is required but not provided
if param.name == 'browser_session':
raise ValueError(f'Action {func.__name__} requires browser_session but none provided.')
elif param.name == 'page_extraction_llm':
raise ValueError(f'Action {func.__name__} requires page_extraction_llm but none provided.')
elif param.name == 'file_system':
raise ValueError(f'Action {func.__name__} requires file_system but none provided.')
elif param.name == 'page':
raise ValueError(f'Action {func.__name__} requires page but none provided.')
elif param.name == 'available_file_paths':
raise ValueError(f'Action {func.__name__} requires available_file_paths but none provided.')
elif param.name == 'file_system':
raise ValueError(f'Action {func.__name__} requires file_system but none provided.')
else:
raise ValueError(f"{func.__name__}() missing required special parameter '{param.name}'")
else:
# This is an action parameter
if param.name in params_dict:
call_args.append(params_dict[param.name])
elif param.default != Parameter.empty:
call_args.append(param.default)
else:
raise ValueError(f"{func.__name__}() missing required parameter '{param.name}'")
# Call original function with positional args
if iscoroutinefunction(func):
return await func(*call_args)
else:
return await asyncio.to_thread(func, *call_args)
# Update wrapper signature to be kwargs-only
new_params = [Parameter('params', Parameter.KEYWORD_ONLY, default=None, annotation=Optional[param_model])]
# Add special params as keyword-only
for sp in special_params:
new_params.append(Parameter(sp.name, Parameter.KEYWORD_ONLY, default=sp.default, annotation=sp.annotation))
# Add **kwargs to accept and ignore extra params
new_params.append(Parameter('kwargs', Parameter.VAR_KEYWORD))
normalized_wrapper.__signature__ = sig.replace(parameters=new_params) # type: ignore[attr-defined]
return normalized_wrapper, param_model
# @time_execution_sync('--create_param_model')
def _create_param_model(self, function: Callable) -> type[BaseModel]:
"""Creates a Pydantic model from function signature"""
sig = signature(function)
special_param_names = set(SpecialActionParameters.model_fields.keys())
params = {
name: (param.annotation, ... if param.default == param.empty else param.default)
for name, param in sig.parameters.items()
if name not in special_param_names
}
# TODO: make the types here work
return create_model(
f'{function.__name__}_parameters',
__base__=ActionModel,
**params, # type: ignore
)
def action(
self,
description: str,
param_model: type[BaseModel] | None = None,
domains: list[str] | None = None,
allowed_domains: list[str] | None = None,
terminates_sequence: bool = False,
):
"""Decorator for registering actions"""
# Handle aliases: domains and allowed_domains are the same parameter
if allowed_domains is not None and domains is not None:
raise ValueError("Cannot specify both 'domains' and 'allowed_domains' - they are aliases for the same parameter")
final_domains = allowed_domains if allowed_domains is not None else domains
def decorator(func: Callable):
# Skip registration if action is in exclude_actions
if func.__name__ in self.exclude_actions:
return func
# Normalize the function signature
normalized_func, actual_param_model = self._normalize_action_function_signature(func, description, param_model)
action = RegisteredAction(
name=func.__name__,
description=description,
function=normalized_func,
param_model=actual_param_model,
domains=final_domains,
terminates_sequence=terminates_sequence,
)
self.registry.actions[func.__name__] = action
# Return the normalized function so it can be called with kwargs
return normalized_func
return decorator
@observe_debug(ignore_input=True, ignore_output=True, name='execute_action')
@time_execution_async('--execute_action')
async def execute_action(
self,
action_name: str,
params: dict,
browser_session: BrowserSession | None = None,
page_extraction_llm: BaseChatModel | None = None,
file_system: FileSystem | None = None,
sensitive_data: dict[str, str | dict[str, str]] | None = None,
available_file_paths: list[str] | None = None,
extraction_schema: dict | None = None,
) -> Any:
"""Execute a registered action with simplified parameter handling"""
if action_name not in self.registry.actions:
raise ValueError(f'Action {action_name} not found')
action = self.registry.actions[action_name]
try:
# Create the validated Pydantic model
try:
validated_params = action.param_model(**params)
except Exception as e:
raise ValueError(f'Invalid parameters {params} for action {action_name}: {type(e)}: {e}') from e
if sensitive_data:
# Get current URL if browser_session is provided
current_url = None
if browser_session and browser_session.agent_focus_target_id:
try:
# Get current page info from session_manager
target = browser_session.session_manager.get_target(browser_session.agent_focus_target_id)
if target:
current_url = target.url
except Exception:
pass
validated_params = self._replace_sensitive_data(validated_params, sensitive_data, current_url)
# Build special context dict
special_context = {
'browser_session': browser_session,
'page_extraction_llm': page_extraction_llm,
'available_file_paths': available_file_paths,
'has_sensitive_data': action_name == 'input' and bool(sensitive_data),
'file_system': file_system,
'extraction_schema': extraction_schema,
}
# Only pass sensitive_data to actions that explicitly need it (input)
if action_name == 'input':
special_context['sensitive_data'] = sensitive_data
# Add CDP-related parameters if browser_session is available
if browser_session:
# Add page_url
try:
special_context['page_url'] = await browser_session.get_current_page_url()
except Exception:
special_context['page_url'] = None
# Add cdp_client
special_context['cdp_client'] = browser_session.cdp_client
# All functions are now normalized to accept kwargs only
# Call with params and unpacked special context
try:
return await action.function(params=validated_params, **special_context)
except Exception as e:
raise
except ValueError as e:
# Preserve ValueError messages from validation
if 'requires browser_session but none provided' in str(e) or 'requires page_extraction_llm but none provided' in str(
e
):
raise RuntimeError(str(e)) from e
else:
raise RuntimeError(f'Error executing action {action_name}: {str(e)}') from e
except TimeoutError as e:
raise RuntimeError(f'Error executing action {action_name} due to timeout.') from e
except Exception as e:
raise RuntimeError(f'Error executing action {action_name}: {str(e)}') from e
def _log_sensitive_data_usage(self, placeholders_used: set[str], current_url: str | None) -> None:
"""Log when sensitive data is being used on a page"""
if placeholders_used:
url_info = f' on {current_url}' if current_url and not is_new_tab_page(current_url) else ''
logger.info(f'🔒 Using sensitive data placeholders: {", ".join(sorted(placeholders_used))}{url_info}')
def _replace_sensitive_data(
self, params: BaseModel, sensitive_data: dict[str, Any], current_url: str | None = None
) -> BaseModel:
"""
Replaces sensitive data placeholders in params with actual values.
Args:
params: The parameter object containing <secret>placeholder</secret> tags
sensitive_data: Dictionary of sensitive data, either in old format {key: value}
or new format {domain_pattern: {key: value}}
current_url: Optional current URL for domain matching
Returns:
BaseModel: The parameter object with placeholders replaced by actual values
"""
secret_pattern = re.compile(r'<secret>(.*?)</secret>')
# Set to track all missing placeholders across the full object
all_missing_placeholders = set()
# Set to track successfully replaced placeholders
replaced_placeholders = set()
# Process sensitive data based on format and current URL
applicable_secrets = {}
for domain_or_key, content in sensitive_data.items():
if isinstance(content, dict):
# New format: {domain_pattern: {key: value}}
# Only include secrets for domains that match the current URL
if current_url and not is_new_tab_page(current_url):
# it's a real url, check it using our custom allowed_domains scheme://*.example.com glob matching
if match_url_with_domain_pattern(current_url, domain_or_key):
applicable_secrets.update(content)
else:
# Old format: {key: value}, expose to all domains (only allowed for legacy reasons)
applicable_secrets[domain_or_key] = content
# Filter out empty values
applicable_secrets = {k: v for k, v in applicable_secrets.items() if v}
def recursively_replace_secrets(value: str | dict | list) -> str | dict | list:
if isinstance(value, str):
# 1. Handle tagged secrets: <secret>label</secret>
matches = secret_pattern.findall(value)
for placeholder in matches:
if placeholder in applicable_secrets:
# generate a totp code if secret is suffixed with bu_2fa_code
if placeholder.endswith('bu_2fa_code'):
totp = pyotp.TOTP(applicable_secrets[placeholder], digits=6)
replacement_value = totp.now()
else:
replacement_value = applicable_secrets[placeholder]
value = value.replace(f'<secret>{placeholder}</secret>', replacement_value)
replaced_placeholders.add(placeholder)
else:
# Keep track of missing placeholders
all_missing_placeholders.add(placeholder)
# 2. Handle literal secrets: "user_name" (no tags)
# This handles cases where the LLM forgets to use tags but uses the exact placeholder name
if value in applicable_secrets:
placeholder_name = value
if placeholder_name.endswith('bu_2fa_code'):
totp = pyotp.TOTP(applicable_secrets[placeholder_name], digits=6)
value = totp.now()
else:
value = applicable_secrets[placeholder_name]
replaced_placeholders.add(placeholder_name)
return value
elif isinstance(value, dict):
return {k: recursively_replace_secrets(v) for k, v in value.items()}
elif isinstance(value, list):
return [recursively_replace_secrets(v) for v in value]
return value
params_dump = params.model_dump()
processed_params = recursively_replace_secrets(params_dump)
# Log sensitive data usage
self._log_sensitive_data_usage(replaced_placeholders, current_url)
# Log a warning if any placeholders are missing
if all_missing_placeholders:
logger.warning(f'Missing or empty keys in sensitive_data dictionary: {", ".join(all_missing_placeholders)}')
return type(params).model_validate(processed_params)
# @time_execution_sync('--create_action_model')
def create_action_model(self, include_actions: list[str] | None = None, page_url: str | None = None) -> type[ActionModel]:
"""Creates a Union of individual action models from registered actions,
used by LLM APIs that support tool calling & enforce a schema.
Each action model contains only the specific action being used,
rather than all actions with most set to None.
"""
from typing import Union
# Filter actions based on page_url if provided:
# if page_url is None, only include actions with no filters
# if page_url is provided, only include actions that match the URL
available_actions: dict[str, RegisteredAction] = {}
for name, action in self.registry.actions.items():
if include_actions is not None and name not in include_actions:
continue
# If no page_url provided, only include actions with no filters
if page_url is None:
if action.domains is None:
available_actions[name] = action
continue
# Check domain filter if present
domain_is_allowed = self.registry._match_domains(action.domains, page_url)
# Include action if domain filter matches
if domain_is_allowed:
available_actions[name] = action
# Create individual action models for each action
individual_action_models: list[type[BaseModel]] = []
for name, action in available_actions.items():
# Create an individual model for each action that contains only one field
individual_model = create_model(
f'{name.title().replace("_", "")}ActionModel',
__base__=ActionModel,
**{
name: (
action.param_model,
Field(description=action.description),
) # type: ignore
},
)
individual_action_models.append(individual_model)
# If no actions available, return empty ActionModel
if not individual_action_models:
return create_model('EmptyActionModel', __base__=ActionModel)
# Create proper Union type that maintains ActionModel interface
if len(individual_action_models) == 1:
# If only one action, return it directly (no Union needed)
result_model = individual_action_models[0]
# Meaning the length is more than 1
else:
# Create a Union type using RootModel that properly delegates ActionModel methods
union_type = Union[tuple(individual_action_models)] # type: ignore : Typing doesn't understand that the length is >= 2 (by design)
class ActionModelUnion(RootModel[union_type]): # type: ignore
def get_index(self) -> int | None:
"""Delegate get_index to the underlying action model"""
if hasattr(self.root, 'get_index'):
return self.root.get_index() # type: ignore
return None
def set_index(self, index: int):
"""Delegate set_index to the underlying action model"""
if hasattr(self.root, 'set_index'):
self.root.set_index(index) # type: ignore
def model_dump(self, **kwargs):
"""Delegate model_dump to the underlying action model"""
if hasattr(self.root, 'model_dump'):
return self.root.model_dump(**kwargs) # type: ignore
return super().model_dump(**kwargs)
# Set the name for better debugging
ActionModelUnion.__name__ = 'ActionModel'
ActionModelUnion.__qualname__ = 'ActionModel'
result_model = ActionModelUnion
return result_model # type:ignore
def get_prompt_description(self, page_url: str | None = None) -> str:
"""Get a description of all actions for the prompt
If page_url is provided, only include actions that are available for that URL
based on their domain filters
"""
return self.registry.get_prompt_description(page_url=page_url)
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/tools/registry/service.py",
"license": "MIT License",
"lines": 516,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/tools/views.py | from typing import Generic, TypeVar
from pydantic import BaseModel, ConfigDict, Field
from pydantic.json_schema import SkipJsonSchema
# Action Input Models
class ExtractAction(BaseModel):
query: str
extract_links: bool = Field(
default=False, description='Set True to true if the query requires links, else false to safe tokens'
)
start_from_char: int = Field(
default=0, description='Use this for long markdowns to start from a specific character (not index in browser_state)'
)
output_schema: SkipJsonSchema[dict | None] = Field(
default=None,
description='Optional JSON Schema dict. When provided, extraction returns validated JSON matching this schema instead of free-text.',
)
class SearchPageAction(BaseModel):
pattern: str = Field(description='Text or regex pattern to search for in page content')
regex: bool = Field(default=False, description='Treat pattern as regex (default: literal text match)')
case_sensitive: bool = Field(default=False, description='Case-sensitive search (default: case-insensitive)')
context_chars: int = Field(default=150, description='Characters of surrounding context per match')
css_scope: str | None = Field(default=None, description='CSS selector to limit search scope (e.g. "div#main")')
max_results: int = Field(default=25, description='Maximum matches to return')
class FindElementsAction(BaseModel):
selector: str = Field(description='CSS selector to query elements (e.g. "table tr", "a.link", "div.product")')
attributes: list[str] | None = Field(
default=None,
description='Specific attributes to extract (e.g. ["href", "src", "class"]). If not set, returns tag and text only.',
)
max_results: int = Field(default=50, description='Maximum elements to return')
include_text: bool = Field(default=True, description='Include text content of each element')
class SearchAction(BaseModel):
query: str
engine: str = Field(
default='duckduckgo', description='duckduckgo, google, bing (use duckduckgo by default because less captchas)'
)
# Backward compatibility alias
SearchAction = SearchAction
class NavigateAction(BaseModel):
url: str
new_tab: bool = Field(default=False)
# Backward compatibility alias
GoToUrlAction = NavigateAction
class ClickElementAction(BaseModel):
index: int | None = Field(default=None, ge=1, description='Element index from browser_state')
coordinate_x: int | None = Field(default=None, description='Horizontal coordinate relative to viewport left edge')
coordinate_y: int | None = Field(default=None, description='Vertical coordinate relative to viewport top edge')
# expect_download: bool = Field(default=False, description='set True if expecting a download, False otherwise') # moved to downloads_watchdog.py
# click_count: int = 1 # TODO
class ClickElementActionIndexOnly(BaseModel):
model_config = ConfigDict(title='ClickElementAction')
index: int = Field(ge=1, description='Element index from browser_state')
class InputTextAction(BaseModel):
index: int = Field(ge=0, description='from browser_state')
text: str
clear: bool = Field(default=True, description='1=clear, 0=append')
class DoneAction(BaseModel):
text: str = Field(description='Final user message in the format the user requested')
success: bool = Field(default=True, description='True if user_request completed successfully')
files_to_display: list[str] | None = Field(default=[])
T = TypeVar('T', bound=BaseModel)
def _hide_internal_fields_from_schema(schema: dict) -> None:
"""Remove internal fields from the JSON schema to avoid collisions with user models."""
props = schema.get('properties', {})
props.pop('success', None)
props.pop('files_to_display', None)
class StructuredOutputAction(BaseModel, Generic[T]):
model_config = ConfigDict(json_schema_extra=_hide_internal_fields_from_schema)
success: bool = Field(default=True, description='True if user_request completed successfully')
data: T = Field(description='The actual output data matching the requested schema')
files_to_display: list[str] | None = Field(default=[])
class SwitchTabAction(BaseModel):
tab_id: str = Field(min_length=4, max_length=4, description='4-char id')
class CloseTabAction(BaseModel):
tab_id: str = Field(min_length=4, max_length=4, description='4-char id')
class ScrollAction(BaseModel):
down: bool = Field(default=True, description='down=True=scroll down, down=False scroll up')
pages: float = Field(default=1.0, description='0.5=half page, 1=full page, 10=to bottom/top')
index: int | None = Field(default=None, description='Optional element index to scroll within specific element')
class SendKeysAction(BaseModel):
keys: str = Field(description='keys (Escape, Enter, PageDown) or shortcuts (Control+o)')
class UploadFileAction(BaseModel):
index: int
path: str
class NoParamsAction(BaseModel):
model_config = ConfigDict(extra='ignore')
# Optional field required by Gemini API which errors on empty objects in response_schema
description: str | None = Field(None, description='Optional description for the action')
class ScreenshotAction(BaseModel):
model_config = ConfigDict(extra='ignore')
file_name: str | None = Field(
default=None,
description='If provided, saves screenshot to this file and returns path. Otherwise screenshot is included in next observation.',
)
class SaveAsPdfAction(BaseModel):
file_name: str | None = Field(
default=None,
description='Output PDF filename (without path). Defaults to page title. Extension .pdf is added automatically if missing.',
)
print_background: bool = Field(default=True, description='Include background graphics and colors')
landscape: bool = Field(default=False, description='Use landscape orientation')
scale: float = Field(default=1.0, ge=0.1, le=2.0, description='Scale of the webpage rendering (0.1 to 2.0)')
paper_format: str = Field(
default='Letter',
description='Paper size: Letter, Legal, A4, A3, or Tabloid',
)
class ReadContentAction(BaseModel):
"""Action for intelligent reading of long content."""
goal: str = Field(description='What to look for or extract from the content')
source: str = Field(
default='page',
description='What to read: "page" for current webpage, or a file path',
)
context: str = Field(default='', description='Additional context about the task')
class GetDropdownOptionsAction(BaseModel):
index: int
class SelectDropdownOptionAction(BaseModel):
index: int
text: str = Field(description='exact text/value')
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/tools/views.py",
"license": "MIT License",
"lines": 119,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:examples/browser/parallel_browser.py | import asyncio
from browser_use import Agent, Browser, ChatOpenAI
# NOTE: This is still experimental, and agents might conflict each other.
async def main():
# Create 3 separate browser instances
browsers = [
Browser(
user_data_dir=f'./temp-profile-{i}',
headless=False,
)
for i in range(3)
]
# Create 3 agents with different tasks
agents = [
Agent(
task='Search for "browser automation" on Google',
browser=browsers[0],
llm=ChatOpenAI(model='gpt-4.1-mini'),
),
Agent(
task='Search for "AI agents" on DuckDuckGo',
browser=browsers[1],
llm=ChatOpenAI(model='gpt-4.1-mini'),
),
Agent(
task='Visit Wikipedia and search for "web scraping"',
browser=browsers[2],
llm=ChatOpenAI(model='gpt-4.1-mini'),
),
]
# Run all agents in parallel
tasks = [agent.run() for agent in agents]
results = await asyncio.gather(*tasks, return_exceptions=True)
print('🎉 All agents completed!')
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/browser/parallel_browser.py",
"license": "MIT License",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:examples/browser/playwright_integration.py | """
Key features:
1. Browser-Use and Playwright sharing the same Chrome instance via CDP
2. Take actions with Playwright and continue with Browser-Use actions
3. Let the agent call Playwright functions like screenshot or click on selectors
"""
import asyncio
import os
import subprocess
import sys
import tempfile
from pydantic import BaseModel, Field
# Check for required dependencies first - before other imports
try:
import aiohttp # type: ignore
from playwright.async_api import Browser, Page, async_playwright # type: ignore
except ImportError as e:
print(f'❌ Missing dependencies for this example: {e}')
print('This example requires: playwright aiohttp')
print('Install with: uv add playwright aiohttp')
print('Also run: playwright install chromium')
sys.exit(1)
from browser_use import Agent, BrowserSession, ChatOpenAI, Tools
from browser_use.agent.views import ActionResult
# Global Playwright browser instance - shared between custom actions
playwright_browser: Browser | None = None
playwright_page: Page | None = None
# Custom action parameter models
class PlaywrightFillFormAction(BaseModel):
"""Parameters for Playwright form filling action."""
customer_name: str = Field(..., description='Customer name to fill')
phone_number: str = Field(..., description='Phone number to fill')
email: str = Field(..., description='Email address to fill')
size_option: str = Field(..., description='Size option (small/medium/large)')
class PlaywrightScreenshotAction(BaseModel):
"""Parameters for Playwright screenshot action."""
filename: str = Field(default='playwright_screenshot.png', description='Filename for screenshot')
quality: int | None = Field(default=None, description='JPEG quality (1-100), only for .jpg/.jpeg files')
class PlaywrightGetTextAction(BaseModel):
"""Parameters for getting text using Playwright selectors."""
selector: str = Field(..., description='CSS selector to get text from. Use "title" for page title.')
async def start_chrome_with_debug_port(port: int = 9222):
"""
Start Chrome with remote debugging enabled.
Returns the Chrome process.
"""
# Create temporary directory for Chrome user data
user_data_dir = tempfile.mkdtemp(prefix='chrome_cdp_')
# Chrome launch command
chrome_paths = [
'/Applications/Google Chrome.app/Contents/MacOS/Google Chrome', # macOS
'/usr/bin/google-chrome', # Linux
'/usr/bin/chromium-browser', # Linux Chromium
'chrome', # Windows/PATH
'chromium', # Generic
]
chrome_exe = None
for path in chrome_paths:
if os.path.exists(path) or path in ['chrome', 'chromium']:
try:
# Test if executable works
test_proc = await asyncio.create_subprocess_exec(
path, '--version', stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL
)
await test_proc.wait()
chrome_exe = path
break
except Exception:
continue
if not chrome_exe:
raise RuntimeError('❌ Chrome not found. Please install Chrome or Chromium.')
# Chrome command arguments
cmd = [
chrome_exe,
f'--remote-debugging-port={port}',
f'--user-data-dir={user_data_dir}',
'--no-first-run',
'--no-default-browser-check',
'--disable-extensions',
'about:blank', # Start with blank page
]
# Start Chrome process
process = await asyncio.create_subprocess_exec(*cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
# Wait for Chrome to start and CDP to be ready
cdp_ready = False
for _ in range(20): # 20 second timeout
try:
async with aiohttp.ClientSession() as session:
async with session.get(
f'http://localhost:{port}/json/version', timeout=aiohttp.ClientTimeout(total=1)
) as response:
if response.status == 200:
cdp_ready = True
break
except Exception:
pass
await asyncio.sleep(1)
if not cdp_ready:
process.terminate()
raise RuntimeError('❌ Chrome failed to start with CDP')
return process
async def connect_playwright_to_cdp(cdp_url: str):
"""
Connect Playwright to the same Chrome instance Browser-Use is using.
This enables custom actions to use Playwright functions.
"""
global playwright_browser, playwright_page
playwright = await async_playwright().start()
playwright_browser = await playwright.chromium.connect_over_cdp(cdp_url)
# Get or create a page
if playwright_browser and playwright_browser.contexts and playwright_browser.contexts[0].pages:
playwright_page = playwright_browser.contexts[0].pages[0]
elif playwright_browser:
context = await playwright_browser.new_context()
playwright_page = await context.new_page()
# Create custom tools that use Playwright functions
tools = Tools()
@tools.registry.action(
"Fill out a form using Playwright's precise form filling capabilities. This uses Playwright selectors for reliable form interaction.",
param_model=PlaywrightFillFormAction,
)
async def playwright_fill_form(params: PlaywrightFillFormAction, browser_session: BrowserSession):
"""
Custom action that uses Playwright to fill forms with high precision.
This demonstrates how to create Browser-Use actions that leverage Playwright's capabilities.
"""
try:
if not playwright_page:
return ActionResult(error='Playwright not connected. Run setup first.')
# Filling form with Playwright's precise selectors
# Wait for form to be ready and fill basic fields
await playwright_page.wait_for_selector('input[name="custname"]', timeout=10000)
await playwright_page.fill('input[name="custname"]', params.customer_name)
await playwright_page.fill('input[name="custtel"]', params.phone_number)
await playwright_page.fill('input[name="custemail"]', params.email)
# Handle size selection - check if it's a select dropdown or radio buttons
size_select = playwright_page.locator('select[name="size"]')
size_radio = playwright_page.locator(f'input[name="size"][value="{params.size_option}"]')
if await size_select.count() > 0:
# It's a select dropdown
await playwright_page.select_option('select[name="size"]', params.size_option)
elif await size_radio.count() > 0:
# It's radio buttons
await playwright_page.check(f'input[name="size"][value="{params.size_option}"]')
else:
raise ValueError(f'Could not find size input field for value: {params.size_option}')
# Get form data to verify it was filled
form_data = {}
form_data['name'] = await playwright_page.input_value('input[name="custname"]')
form_data['phone'] = await playwright_page.input_value('input[name="custtel"]')
form_data['email'] = await playwright_page.input_value('input[name="custemail"]')
# Get size value based on input type
if await size_select.count() > 0:
form_data['size'] = await playwright_page.input_value('select[name="size"]')
else:
# For radio buttons, find the checked one
checked_radio = playwright_page.locator('input[name="size"]:checked')
if await checked_radio.count() > 0:
form_data['size'] = await checked_radio.get_attribute('value')
else:
form_data['size'] = 'none selected'
success_msg = f'✅ Form filled successfully with Playwright: {form_data}'
return ActionResult(
extracted_content=success_msg, include_in_memory=True, long_term_memory=f'Filled form with: {form_data}'
)
except Exception as e:
error_msg = f'❌ Playwright form filling failed: {str(e)}'
return ActionResult(error=error_msg)
@tools.registry.action(
"Take a screenshot using Playwright's screenshot capabilities with high quality and precision.",
param_model=PlaywrightScreenshotAction,
)
async def playwright_screenshot(params: PlaywrightScreenshotAction, browser_session: BrowserSession):
"""
Custom action that uses Playwright's advanced screenshot features.
"""
try:
if not playwright_page:
return ActionResult(error='Playwright not connected. Run setup first.')
# Taking screenshot with Playwright
# Use Playwright's screenshot with full page capture
screenshot_kwargs = {'path': params.filename, 'full_page': True}
# Add quality parameter only for JPEG files
if params.quality is not None and params.filename.lower().endswith(('.jpg', '.jpeg')):
screenshot_kwargs['quality'] = params.quality
await playwright_page.screenshot(**screenshot_kwargs)
success_msg = f'✅ Screenshot saved as {params.filename} using Playwright'
return ActionResult(
extracted_content=success_msg, include_in_memory=True, long_term_memory=f'Screenshot saved: {params.filename}'
)
except Exception as e:
error_msg = f'❌ Playwright screenshot failed: {str(e)}'
return ActionResult(error=error_msg)
@tools.registry.action(
"Extract text from elements using Playwright's powerful CSS selectors and XPath support.", param_model=PlaywrightGetTextAction
)
async def playwright_get_text(params: PlaywrightGetTextAction, browser_session: BrowserSession):
"""
Custom action that uses Playwright's advanced text extraction with CSS selectors and XPath.
"""
try:
if not playwright_page:
return ActionResult(error='Playwright not connected. Run setup first.')
# Extracting text with Playwright selectors
# Handle special selectors
if params.selector.lower() == 'title':
# Use page.title() for title element
text_content = await playwright_page.title()
result_data = {
'selector': 'title',
'text_content': text_content,
'inner_text': text_content,
'tag_name': 'TITLE',
'is_visible': True,
}
else:
# Use Playwright's robust element selection and text extraction
element = playwright_page.locator(params.selector).first
if await element.count() == 0:
error_msg = f'❌ No element found with selector: {params.selector}'
return ActionResult(error=error_msg)
text_content = await element.text_content()
inner_text = await element.inner_text()
# Get additional element info
tag_name = await element.evaluate('el => el.tagName')
is_visible = await element.is_visible()
result_data = {
'selector': params.selector,
'text_content': text_content,
'inner_text': inner_text,
'tag_name': tag_name,
'is_visible': is_visible,
}
success_msg = f'✅ Extracted text using Playwright: {result_data}'
return ActionResult(
extracted_content=str(result_data),
include_in_memory=True,
long_term_memory=f'Extracted from {params.selector}: {result_data["text_content"]}',
)
except Exception as e:
error_msg = f'❌ Playwright text extraction failed: {str(e)}'
return ActionResult(error=error_msg)
async def main():
"""
Main function demonstrating Browser-Use + Playwright integration with custom actions.
"""
print('🚀 Advanced Playwright + Browser-Use Integration with Custom Actions')
chrome_process = None
try:
# Step 1: Start Chrome with CDP debugging
chrome_process = await start_chrome_with_debug_port()
cdp_url = 'http://localhost:9222'
# Step 2: Connect Playwright to the same Chrome instance
await connect_playwright_to_cdp(cdp_url)
# Step 3: Create Browser-Use session connected to same Chrome
browser_session = BrowserSession(cdp_url=cdp_url)
# Step 4: Create AI agent with our custom Playwright-powered tools
agent = Agent(
task="""
Please help me demonstrate the integration between Browser-Use and Playwright:
1. First, navigate to https://httpbin.org/forms/post
2. Use the 'playwright_fill_form' action to fill the form with these details:
- Customer name: "Alice Johnson"
- Phone: "555-9876"
- Email: "alice@demo.com"
- Size: "large"
3. Take a screenshot using the 'playwright_screenshot' action and save it as "form_demo.png"
4. Extract the title of the page using 'playwright_get_text' action with selector "title"
5. Finally, submit the form and tell me what happened
This demonstrates how Browser-Use AI can orchestrate tasks while using Playwright's precise capabilities for specific operations.
""",
llm=ChatOpenAI(model='gpt-4.1-mini'),
tools=tools, # Our custom tools with Playwright actions
browser_session=browser_session,
)
print('🎯 Starting AI agent with custom Playwright actions...')
# Step 5: Run the agent - it will use both Browser-Use actions and our custom Playwright actions
result = await agent.run()
# Keep browser open briefly to see results
print(f'✅ Integration demo completed! Result: {result}')
await asyncio.sleep(2) # Brief pause to see results
except Exception as e:
print(f'❌ Error: {e}')
raise
finally:
# Clean up resources
if playwright_browser:
await playwright_browser.close()
if chrome_process:
chrome_process.terminate()
try:
await asyncio.wait_for(chrome_process.wait(), 5)
except TimeoutError:
chrome_process.kill()
print('✅ Cleanup complete')
if __name__ == '__main__':
# Run the advanced integration demo
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/browser/playwright_integration.py",
"license": "MIT License",
"lines": 298,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:examples/cloud/01_basic_task.py | """
Cloud Example 1: Your First Browser Use Cloud Task
==================================================
This example demonstrates the most basic Browser Use Cloud functionality:
- Create a simple automation task
- Get the task ID
- Monitor completion
- Retrieve results
Perfect for first-time cloud users to understand the API basics.
Cost: ~$0.04 (1 task + 3 steps with GPT-4.1 mini)
"""
import os
import time
from typing import Any
import requests
from requests.exceptions import RequestException
# Configuration
API_KEY = os.getenv('BROWSER_USE_API_KEY')
if not API_KEY:
raise ValueError(
'Please set BROWSER_USE_API_KEY environment variable. You can also create an API key at https://cloud.browser-use.com/new-api-key'
)
BASE_URL = os.getenv('BROWSER_USE_BASE_URL', 'https://api.browser-use.com/api/v1')
TIMEOUT = int(os.getenv('BROWSER_USE_TIMEOUT', '30'))
HEADERS = {'Authorization': f'Bearer {API_KEY}', 'Content-Type': 'application/json'}
def _request_with_retry(method: str, url: str, **kwargs) -> requests.Response:
"""Make HTTP request with timeout and retry logic."""
kwargs.setdefault('timeout', TIMEOUT)
for attempt in range(3):
try:
response = requests.request(method, url, **kwargs)
response.raise_for_status()
return response
except RequestException as e:
if attempt == 2: # Last attempt
raise
sleep_time = 2**attempt
print(f'⚠️ Request failed (attempt {attempt + 1}/3), retrying in {sleep_time}s: {e}')
time.sleep(sleep_time)
# This line should never be reached, but satisfies type checker
raise RuntimeError('Unexpected error in retry logic')
def create_task(instructions: str) -> str:
"""
Create a new browser automation task.
Args:
instructions: Natural language description of what the agent should do
Returns:
task_id: Unique identifier for the created task
"""
print(f'📝 Creating task: {instructions}')
payload = {
'task': instructions,
'llm_model': 'gpt-4.1-mini', # Cost-effective model
'max_agent_steps': 10, # Prevent runaway costs
'enable_public_share': True, # Enable shareable execution URLs
}
response = _request_with_retry('post', f'{BASE_URL}/run-task', headers=HEADERS, json=payload)
task_id = response.json()['id']
print(f'✅ Task created with ID: {task_id}')
return task_id
def get_task_status(task_id: str) -> dict[str, Any]:
"""Get the current status of a task."""
response = _request_with_retry('get', f'{BASE_URL}/task/{task_id}/status', headers=HEADERS)
return response.json()
def get_task_details(task_id: str) -> dict[str, Any]:
"""Get full task details including steps and output."""
response = _request_with_retry('get', f'{BASE_URL}/task/{task_id}', headers=HEADERS)
return response.json()
def wait_for_completion(task_id: str, poll_interval: int = 3) -> dict[str, Any]:
"""
Wait for task completion and show progress.
Args:
task_id: The task to monitor
poll_interval: How often to check status (seconds)
Returns:
Complete task details with output
"""
print(f'⏳ Monitoring task {task_id}...')
step_count = 0
start_time = time.time()
while True:
details = get_task_details(task_id)
status = details['status']
current_steps = len(details.get('steps', []))
elapsed = time.time() - start_time
# Clear line and show current progress
if current_steps > step_count:
step_count = current_steps
# Build status message
if status == 'running':
if current_steps > 0:
status_msg = f'🔄 Step {current_steps} | ⏱️ {elapsed:.0f}s | 🤖 Agent working...'
else:
status_msg = f'🤖 Agent starting... | ⏱️ {elapsed:.0f}s'
else:
status_msg = f'🔄 Step {current_steps} | ⏱️ {elapsed:.0f}s | Status: {status}'
# Clear line and print status
print(f'\r{status_msg:<80}', end='', flush=True)
# Check if finished
if status == 'finished':
print(f'\r✅ Task completed successfully! ({current_steps} steps in {elapsed:.1f}s)' + ' ' * 20)
return details
elif status in ['failed', 'stopped']:
print(f'\r❌ Task {status} after {current_steps} steps' + ' ' * 30)
return details
time.sleep(poll_interval)
def main():
"""Run a basic cloud automation task."""
print('🚀 Browser Use Cloud - Basic Task Example')
print('=' * 50)
# Define a simple search task (using DuckDuckGo to avoid captchas)
task_description = (
"Go to DuckDuckGo and search for 'browser automation tools'. Tell me the top 3 results with their titles and URLs."
)
try:
# Step 1: Create the task
task_id = create_task(task_description)
# Step 2: Wait for completion
result = wait_for_completion(task_id)
# Step 3: Display results
print('\n📊 Results:')
print('-' * 30)
print(f'Status: {result["status"]}')
print(f'Steps taken: {len(result.get("steps", []))}')
if result.get('output'):
print(f'Output: {result["output"]}')
else:
print('No output available')
# Show share URLs for viewing execution
if result.get('live_url'):
print(f'\n🔗 Live Preview: {result["live_url"]}')
if result.get('public_share_url'):
print(f'🌐 Share URL: {result["public_share_url"]}')
elif result.get('share_url'):
print(f'🌐 Share URL: {result["share_url"]}')
if not result.get('live_url') and not result.get('public_share_url') and not result.get('share_url'):
print("\n💡 Tip: Add 'enable_public_share': True to task payload to get shareable URLs")
except requests.exceptions.RequestException as e:
print(f'❌ API Error: {e}')
except Exception as e:
print(f'❌ Error: {e}')
if __name__ == '__main__':
main()
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/cloud/01_basic_task.py",
"license": "MIT License",
"lines": 143,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:examples/cloud/02_fast_mode_gemini.py | """
Cloud Example 2: Ultra-Fast Mode with Gemini Flash ⚡
====================================================
This example demonstrates the fastest and most cost-effective configuration:
- Gemini 2.5 Flash model ($0.01 per step)
- No proxy (faster execution, but no captcha solving)
- No element highlighting (better performance)
- Optimized viewport size
- Maximum speed configuration
Perfect for: Quick content generation, humor tasks, fast web scraping
Cost: ~$0.03 (1 task + 2-3 steps with Gemini Flash)
Speed: 2-3x faster than default configuration
Fun Factor: 💯 (Creates hilarious tech commentary)
"""
import argparse
import os
import time
from typing import Any
import requests
from requests.exceptions import RequestException
# Configuration
API_KEY = os.getenv('BROWSER_USE_API_KEY')
if not API_KEY:
raise ValueError(
'Please set BROWSER_USE_API_KEY environment variable. You can also create an API key at https://cloud.browser-use.com/new-api-key'
)
BASE_URL = os.getenv('BROWSER_USE_BASE_URL', 'https://api.browser-use.com/api/v1')
TIMEOUT = int(os.getenv('BROWSER_USE_TIMEOUT', '30'))
HEADERS = {'Authorization': f'Bearer {API_KEY}', 'Content-Type': 'application/json'}
def _request_with_retry(method: str, url: str, **kwargs) -> requests.Response:
"""Make HTTP request with timeout and retry logic."""
kwargs.setdefault('timeout', TIMEOUT)
for attempt in range(3):
try:
response = requests.request(method, url, **kwargs)
response.raise_for_status()
return response
except RequestException as e:
if attempt == 2: # Last attempt
raise
sleep_time = 2**attempt
print(f'⚠️ Request failed (attempt {attempt + 1}/3), retrying in {sleep_time}s: {e}')
time.sleep(sleep_time)
raise RuntimeError('Unexpected error in retry logic')
def create_fast_task(instructions: str) -> str:
"""
Create a browser automation task optimized for speed and cost.
Args:
instructions: Natural language description of what the agent should do
Returns:
task_id: Unique identifier for the created task
"""
print(f'⚡ Creating FAST task: {instructions}')
# Ultra-fast configuration
payload = {
'task': instructions,
# Model: Fastest and cheapest
'llm_model': 'gemini-2.5-flash',
# Performance optimizations
'use_proxy': False, # No proxy = faster execution
'highlight_elements': False, # No highlighting = better performance
'use_adblock': True, # Block ads for faster loading
# Viewport optimization (smaller = faster)
'browser_viewport_width': 1024,
'browser_viewport_height': 768,
# Cost control
'max_agent_steps': 25, # Reasonable limit for fast tasks
# Enable sharing for viewing execution
'enable_public_share': True, # Get shareable URLs
# Optional: Speed up with domain restrictions
# "allowed_domains": ["google.com", "*.google.com"]
}
response = _request_with_retry('post', f'{BASE_URL}/run-task', headers=HEADERS, json=payload)
task_id = response.json()['id']
print(f'✅ Fast task created with ID: {task_id}')
print('⚡ Configuration: Gemini Flash + No Proxy + No Highlighting')
return task_id
def monitor_fast_task(task_id: str) -> dict[str, Any]:
"""
Monitor task with optimized polling for fast execution.
Args:
task_id: The task to monitor
Returns:
Complete task details with output
"""
print(f'🚀 Fast monitoring task {task_id}...')
start_time = time.time()
step_count = 0
last_step_time = start_time
# Faster polling for quick tasks
poll_interval = 1 # Check every second for fast tasks
while True:
response = _request_with_retry('get', f'{BASE_URL}/task/{task_id}', headers=HEADERS)
details = response.json()
status = details['status']
# Show progress with timing
current_steps = len(details.get('steps', []))
elapsed = time.time() - start_time
# Build status message
if current_steps > step_count:
step_time = time.time() - last_step_time
last_step_time = time.time()
step_count = current_steps
step_msg = f'🔥 Step {current_steps} | ⚡ {step_time:.1f}s | Total: {elapsed:.1f}s'
else:
if status == 'running':
step_msg = f'🚀 Step {current_steps} | ⏱️ {elapsed:.1f}s | Fast processing...'
else:
step_msg = f'🚀 Step {current_steps} | ⏱️ {elapsed:.1f}s | Status: {status}'
# Clear line and show progress
print(f'\r{step_msg:<80}', end='', flush=True)
# Check completion
if status == 'finished':
total_time = time.time() - start_time
if current_steps > 0:
avg_msg = f'⚡ Average: {total_time / current_steps:.1f}s per step'
else:
avg_msg = '⚡ No steps recorded'
print(f'\r🏁 Task completed in {total_time:.1f}s! {avg_msg}' + ' ' * 20)
return details
elif status in ['failed', 'stopped']:
print(f'\r❌ Task {status} after {elapsed:.1f}s' + ' ' * 30)
return details
time.sleep(poll_interval)
def run_speed_comparison():
"""Run multiple tasks to compare speed vs accuracy."""
print('\n🏃♂️ Speed Comparison Demo')
print('=' * 40)
tasks = [
'Go to ProductHunt and roast the top product like a sarcastic tech reviewer',
'Visit Reddit r/ProgrammerHumor and summarize the top post as a dramatic news story',
"Check GitHub trending and write a conspiracy theory about why everyone's switching to Rust",
]
results = []
for i, task in enumerate(tasks, 1):
print(f'\n📝 Fast Task {i}/{len(tasks)}')
print(f'Task: {task}')
start = time.time()
task_id = create_fast_task(task)
result = monitor_fast_task(task_id)
end = time.time()
results.append(
{
'task': task,
'duration': end - start,
'steps': len(result.get('steps', [])),
'status': result['status'],
'output': result.get('output', '')[:100] + '...' if result.get('output') else 'No output',
}
)
# Summary
print('\n📊 Speed Summary')
print('=' * 50)
total_time = sum(r['duration'] for r in results)
total_steps = sum(r['steps'] for r in results)
for i, result in enumerate(results, 1):
print(f'Task {i}: {result["duration"]:.1f}s ({result["steps"]} steps) - {result["status"]}')
print(f'\n⚡ Total time: {total_time:.1f}s')
print(f'🔥 Average per task: {total_time / len(results):.1f}s')
if total_steps > 0:
print(f'💨 Average per step: {total_time / total_steps:.1f}s')
else:
print('💨 Average per step: N/A (no steps recorded)')
def main():
"""Demonstrate ultra-fast cloud automation."""
print('⚡ Browser Use Cloud - Ultra-Fast Mode with Gemini Flash')
print('=' * 60)
print('🎯 Configuration Benefits:')
print('• Gemini Flash: $0.01 per step (cheapest)')
print('• No proxy: 30% faster execution')
print('• No highlighting: Better performance')
print('• Optimized viewport: Faster rendering')
try:
# Single fast task
print('\n🚀 Single Fast Task Demo')
print('-' * 30)
task = """
Go to Hacker News (news.ycombinator.com) and get the top 3 articles from the front page.
Then, write a funny tech news segment in the style of Fireship YouTube channel:
- Be sarcastic and witty about tech trends
- Use developer humor and memes
- Make fun of common programming struggles
- Include phrases like "And yes, it runs on JavaScript" or "Plot twist: it's written in Rust"
- Keep it under 250 words but make it entertaining
- Structure it like a news anchor delivering breaking tech news
Make each story sound dramatic but also hilarious, like you're reporting on the most important events in human history.
"""
task_id = create_fast_task(task)
result = monitor_fast_task(task_id)
print(f'\n📊 Result: {result.get("output", "No output")}')
# Show execution URLs
if result.get('live_url'):
print(f'\n🔗 Live Preview: {result["live_url"]}')
if result.get('public_share_url'):
print(f'🌐 Share URL: {result["public_share_url"]}')
elif result.get('share_url'):
print(f'🌐 Share URL: {result["share_url"]}')
# Optional: Run speed comparison with --compare flag
parser = argparse.ArgumentParser(description='Fast mode demo with Gemini Flash')
parser.add_argument('--compare', action='store_true', help='Run speed comparison with 3 tasks')
args = parser.parse_args()
if args.compare:
print('\n🏃♂️ Running speed comparison...')
run_speed_comparison()
except requests.exceptions.RequestException as e:
print(f'❌ API Error: {e}')
except Exception as e:
print(f'❌ Error: {e}')
if __name__ == '__main__':
main()
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/cloud/02_fast_mode_gemini.py",
"license": "MIT License",
"lines": 210,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:examples/cloud/03_structured_output.py | """
Cloud Example 3: Structured JSON Output 📋
==========================================
This example demonstrates how to get structured, validated JSON output:
- Define Pydantic schemas for type safety
- Extract structured data from websites
- Validate and parse JSON responses
- Handle different data types and nested structures
Perfect for: Data extraction, API integration, structured analysis
Cost: ~$0.06 (1 task + 5-6 steps with GPT-4.1 mini)
"""
import argparse
import json
import os
import time
from typing import Any
import requests
from pydantic import BaseModel, Field, ValidationError
from requests.exceptions import RequestException
# Configuration
API_KEY = os.getenv('BROWSER_USE_API_KEY')
if not API_KEY:
raise ValueError(
'Please set BROWSER_USE_API_KEY environment variable. You can also create an API key at https://cloud.browser-use.com/new-api-key'
)
BASE_URL = os.getenv('BROWSER_USE_BASE_URL', 'https://api.browser-use.com/api/v1')
TIMEOUT = int(os.getenv('BROWSER_USE_TIMEOUT', '30'))
HEADERS = {'Authorization': f'Bearer {API_KEY}', 'Content-Type': 'application/json'}
def _request_with_retry(method: str, url: str, **kwargs) -> requests.Response:
"""Make HTTP request with timeout and retry logic."""
kwargs.setdefault('timeout', TIMEOUT)
for attempt in range(3):
try:
response = requests.request(method, url, **kwargs)
response.raise_for_status()
return response
except RequestException as e:
if attempt == 2: # Last attempt
raise
sleep_time = 2**attempt
print(f'⚠️ Request failed (attempt {attempt + 1}/3), retrying in {sleep_time}s: {e}')
time.sleep(sleep_time)
raise RuntimeError('Unexpected error in retry logic')
# Define structured output schemas using Pydantic
class NewsArticle(BaseModel):
"""Schema for a news article."""
title: str = Field(description='The headline of the article')
summary: str = Field(description='Brief summary of the article')
url: str = Field(description='Direct link to the article')
published_date: str | None = Field(description='Publication date if available')
category: str | None = Field(description='Article category/section')
class NewsResponse(BaseModel):
"""Schema for multiple news articles."""
articles: list[NewsArticle] = Field(description='List of news articles')
source_website: str = Field(description='The website where articles were found')
extracted_at: str = Field(description='When the data was extracted')
class ProductInfo(BaseModel):
"""Schema for product information."""
name: str = Field(description='Product name')
price: float = Field(description='Product price in USD')
rating: float | None = Field(description='Average rating (0-5 scale)')
availability: str = Field(description='Stock status (in stock, out of stock, etc.)')
description: str = Field(description='Product description')
class CompanyInfo(BaseModel):
"""Schema for company information."""
name: str = Field(description='Company name')
stock_symbol: str | None = Field(description='Stock ticker symbol')
market_cap: str | None = Field(description='Market capitalization')
industry: str = Field(description='Primary industry')
headquarters: str = Field(description='Headquarters location')
founded_year: int | None = Field(description='Year founded')
def create_structured_task(instructions: str, schema_model: type[BaseModel], **kwargs) -> str:
"""
Create a task that returns structured JSON output.
Args:
instructions: Task description
schema_model: Pydantic model defining the expected output structure
**kwargs: Additional task parameters
Returns:
task_id: Unique identifier for the created task
"""
print(f'📝 Creating structured task: {instructions}')
print(f'🏗️ Expected schema: {schema_model.__name__}')
# Generate JSON schema from Pydantic model
json_schema = schema_model.model_json_schema()
payload = {
'task': instructions,
'structured_output_json': json.dumps(json_schema),
'llm_model': 'gpt-4.1-mini',
'max_agent_steps': 15,
'enable_public_share': True, # Enable shareable execution URLs
**kwargs,
}
response = _request_with_retry('post', f'{BASE_URL}/run-task', headers=HEADERS, json=payload)
task_id = response.json()['id']
print(f'✅ Structured task created: {task_id}')
return task_id
def wait_for_structured_completion(task_id: str, max_wait_time: int = 300) -> dict[str, Any]:
"""Wait for task completion and return the result."""
print(f'⏳ Waiting for structured output (max {max_wait_time}s)...')
start_time = time.time()
while True:
response = _request_with_retry('get', f'{BASE_URL}/task/{task_id}/status', headers=HEADERS)
status = response.json()
elapsed = time.time() - start_time
# Check for timeout
if elapsed > max_wait_time:
print(f'\r⏰ Task timeout after {max_wait_time}s - stopping wait' + ' ' * 30)
# Get final details before timeout
details_response = _request_with_retry('get', f'{BASE_URL}/task/{task_id}', headers=HEADERS)
details = details_response.json()
return details
# Get step count from full details for better progress tracking
details_response = _request_with_retry('get', f'{BASE_URL}/task/{task_id}', headers=HEADERS)
details = details_response.json()
steps = len(details.get('steps', []))
# Build status message
if status == 'running':
status_msg = f'📋 Structured task | Step {steps} | ⏱️ {elapsed:.0f}s | 🔄 Extracting...'
else:
status_msg = f'📋 Structured task | Step {steps} | ⏱️ {elapsed:.0f}s | Status: {status}'
# Clear line and show status
print(f'\r{status_msg:<80}', end='', flush=True)
if status == 'finished':
print(f'\r✅ Structured data extracted! ({steps} steps in {elapsed:.1f}s)' + ' ' * 20)
return details
elif status in ['failed', 'stopped']:
print(f'\r❌ Task {status} after {steps} steps' + ' ' * 30)
return details
time.sleep(3)
def validate_and_display_output(output: str, schema_model: type[BaseModel]):
"""
Validate the JSON output against the schema and display results.
Args:
output: Raw JSON string from the task
schema_model: Pydantic model for validation
"""
print('\n📊 Structured Output Analysis')
print('=' * 40)
try:
# Parse and validate the JSON
parsed_data = schema_model.model_validate_json(output)
print('✅ JSON validation successful!')
# Pretty print the structured data
print('\n📋 Parsed Data:')
print('-' * 20)
print(parsed_data.model_dump_json(indent=2))
# Display specific fields based on model type
if isinstance(parsed_data, NewsResponse):
print(f'\n📰 Found {len(parsed_data.articles)} articles from {parsed_data.source_website}')
for i, article in enumerate(parsed_data.articles[:3], 1):
print(f'\n{i}. {article.title}')
print(f' Summary: {article.summary[:100]}...')
print(f' URL: {article.url}')
elif isinstance(parsed_data, ProductInfo):
print(f'\n🛍️ Product: {parsed_data.name}')
print(f' Price: ${parsed_data.price}')
print(f' Rating: {parsed_data.rating}/5' if parsed_data.rating else ' Rating: N/A')
print(f' Status: {parsed_data.availability}')
elif isinstance(parsed_data, CompanyInfo):
print(f'\n🏢 Company: {parsed_data.name}')
print(f' Industry: {parsed_data.industry}')
print(f' Headquarters: {parsed_data.headquarters}')
if parsed_data.founded_year:
print(f' Founded: {parsed_data.founded_year}')
return parsed_data
except ValidationError as e:
print('❌ JSON validation failed!')
print(f'Errors: {e}')
print(f'\nRaw output: {output[:500]}...')
return None
except json.JSONDecodeError as e:
print('❌ Invalid JSON format!')
print(f'Error: {e}')
print(f'\nRaw output: {output[:500]}...')
return None
def demo_news_extraction():
"""Demo: Extract structured news data."""
print('\n📰 Demo 1: News Article Extraction')
print('-' * 40)
task = """
Go to a major news website (like BBC, CNN, or Reuters) and extract information
about the top 3 news articles. For each article, get the title, summary, URL,
and any other available metadata.
"""
task_id = create_structured_task(task, NewsResponse)
result = wait_for_structured_completion(task_id)
if result.get('output'):
parsed_result = validate_and_display_output(result['output'], NewsResponse)
# Show execution URLs
if result.get('live_url'):
print(f'\n🔗 Live Preview: {result["live_url"]}')
if result.get('public_share_url'):
print(f'🌐 Share URL: {result["public_share_url"]}')
elif result.get('share_url'):
print(f'🌐 Share URL: {result["share_url"]}')
return parsed_result
else:
print('❌ No structured output received')
return None
def demo_product_extraction():
"""Demo: Extract structured product data."""
print('\n🛍️ Demo 2: Product Information Extraction')
print('-' * 40)
task = """
Go to Amazon and search for 'wireless headphones'. Find the first product result
and extract detailed information including name, price, rating, availability,
and description.
"""
task_id = create_structured_task(task, ProductInfo)
result = wait_for_structured_completion(task_id)
if result.get('output'):
parsed_result = validate_and_display_output(result['output'], ProductInfo)
# Show execution URLs
if result.get('live_url'):
print(f'\n🔗 Live Preview: {result["live_url"]}')
if result.get('public_share_url'):
print(f'🌐 Share URL: {result["public_share_url"]}')
elif result.get('share_url'):
print(f'🌐 Share URL: {result["share_url"]}')
return parsed_result
else:
print('❌ No structured output received')
return None
def demo_company_extraction():
"""Demo: Extract structured company data."""
print('\n🏢 Demo 3: Company Information Extraction')
print('-' * 40)
task = """
Go to a financial website and look up information about Apple Inc.
Extract company details including name, stock symbol, market cap,
industry, headquarters, and founding year.
"""
task_id = create_structured_task(task, CompanyInfo)
result = wait_for_structured_completion(task_id)
if result.get('output'):
parsed_result = validate_and_display_output(result['output'], CompanyInfo)
# Show execution URLs
if result.get('live_url'):
print(f'\n🔗 Live Preview: {result["live_url"]}')
if result.get('public_share_url'):
print(f'🌐 Share URL: {result["public_share_url"]}')
elif result.get('share_url'):
print(f'🌐 Share URL: {result["share_url"]}')
return parsed_result
else:
print('❌ No structured output received')
return None
def main():
"""Demonstrate structured output extraction."""
print('📋 Browser Use Cloud - Structured JSON Output')
print('=' * 50)
print('🎯 Features:')
print('• Type-safe Pydantic schemas')
print('• Automatic JSON validation')
print('• Structured data extraction')
print('• Multiple output formats')
try:
# Parse command line arguments
parser = argparse.ArgumentParser(description='Structured output extraction demo')
parser.add_argument('--demo', choices=['news', 'product', 'company', 'all'], default='news', help='Which demo to run')
args = parser.parse_args()
print(f'\n🔍 Running {args.demo} demo(s)...')
if args.demo == 'news':
demo_news_extraction()
elif args.demo == 'product':
demo_product_extraction()
elif args.demo == 'company':
demo_company_extraction()
elif args.demo == 'all':
demo_news_extraction()
demo_product_extraction()
demo_company_extraction()
except requests.exceptions.RequestException as e:
print(f'❌ API Error: {e}')
except Exception as e:
print(f'❌ Error: {e}')
if __name__ == '__main__':
main()
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/cloud/03_structured_output.py",
"license": "MIT License",
"lines": 279,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:examples/cloud/04_proxy_usage.py | """
Cloud Example 4: Proxy Usage 🌍
===============================
This example demonstrates reliable proxy usage scenarios:
- Different country proxies for geo-restrictions
- IP address and location verification
- Region-specific content access (streaming, news)
- Search result localization by country
- Mobile/residential proxy benefits
Perfect for: Geo-restricted content, location testing, regional analysis
Cost: ~$0.08 (1 task + 6-8 steps with proxy enabled)
"""
import argparse
import os
import time
from typing import Any
import requests
from requests.exceptions import RequestException
# Configuration
API_KEY = os.getenv('BROWSER_USE_API_KEY')
if not API_KEY:
raise ValueError(
'Please set BROWSER_USE_API_KEY environment variable. You can also create an API key at https://cloud.browser-use.com/new-api-key'
)
BASE_URL = os.getenv('BROWSER_USE_BASE_URL', 'https://api.browser-use.com/api/v1')
TIMEOUT = int(os.getenv('BROWSER_USE_TIMEOUT', '30'))
HEADERS = {'Authorization': f'Bearer {API_KEY}', 'Content-Type': 'application/json'}
def _request_with_retry(method: str, url: str, **kwargs) -> requests.Response:
"""Make HTTP request with timeout and retry logic."""
kwargs.setdefault('timeout', TIMEOUT)
for attempt in range(3):
try:
response = requests.request(method, url, **kwargs)
response.raise_for_status()
return response
except RequestException as e:
if attempt == 2: # Last attempt
raise
sleep_time = 2**attempt
print(f'⚠️ Request failed (attempt {attempt + 1}/3), retrying in {sleep_time}s: {e}')
time.sleep(sleep_time)
raise RuntimeError('Unexpected error in retry logic')
def create_task_with_proxy(instructions: str, country_code: str = 'us') -> str:
"""
Create a task with proxy enabled from a specific country.
Args:
instructions: Task description
country_code: Proxy country ('us', 'fr', 'it', 'jp', 'au', 'de', 'fi', 'ca')
Returns:
task_id: Unique identifier for the created task
"""
print(f'🌍 Creating task with {country_code.upper()} proxy')
print(f'📝 Task: {instructions}')
payload = {
'task': instructions,
'llm_model': 'gpt-4.1-mini',
# Proxy configuration
'use_proxy': True, # Required for captcha solving
'proxy_country_code': country_code, # Choose proxy location
# Standard settings
'use_adblock': True, # Block ads for faster loading
'highlight_elements': True, # Keep highlighting for visibility
'max_agent_steps': 15,
# Enable sharing for viewing execution
'enable_public_share': True, # Get shareable URLs
}
response = _request_with_retry('post', f'{BASE_URL}/run-task', headers=HEADERS, json=payload)
task_id = response.json()['id']
print(f'✅ Task created with {country_code.upper()} proxy: {task_id}')
return task_id
def test_ip_location(country_code: str) -> dict[str, Any]:
"""Test IP address and location detection with proxy."""
task = """
Go to whatismyipaddress.com and tell me:
1. The detected IP address
2. The detected country/location
3. The ISP/organization
4. Any other location details shown
Please be specific about what you see on the page.
"""
task_id = create_task_with_proxy(task, country_code)
return wait_for_completion(task_id)
def test_geo_restricted_content(country_code: str) -> dict[str, Any]:
"""Test access to geo-restricted content."""
task = """
Go to a major news website (like BBC, CNN, or local news) and check:
1. What content is available
2. Any geo-restriction messages
3. Local/regional content differences
4. Language or currency preferences shown
Note any differences from what you might expect.
"""
task_id = create_task_with_proxy(task, country_code)
return wait_for_completion(task_id)
def test_streaming_service_access(country_code: str) -> dict[str, Any]:
"""Test access to region-specific streaming content."""
task = """
Go to a major streaming service website (like Netflix, YouTube, or BBC iPlayer)
and check what content or messaging appears.
Report:
1. What homepage content is shown
2. Any geo-restriction messages or content differences
3. Available content regions or language options
4. Any pricing or availability differences
Note: Don't try to log in, just observe the publicly available content.
"""
task_id = create_task_with_proxy(task, country_code)
return wait_for_completion(task_id)
def test_search_results_by_location(country_code: str) -> dict[str, Any]:
"""Test how search results vary by location."""
task = """
Go to Google and search for "best restaurants near me" or "local news".
Report:
1. What local results appear
2. The detected location in search results
3. Any location-specific content or ads
4. Language preferences
This will show how search results change based on proxy location.
"""
task_id = create_task_with_proxy(task, country_code)
return wait_for_completion(task_id)
def wait_for_completion(task_id: str) -> dict[str, Any]:
"""Wait for task completion and return results."""
print(f'⏳ Waiting for task {task_id} to complete...')
start_time = time.time()
while True:
response = _request_with_retry('get', f'{BASE_URL}/task/{task_id}', headers=HEADERS)
details = response.json()
status = details['status']
steps = len(details.get('steps', []))
elapsed = time.time() - start_time
# Build status message
if status == 'running':
status_msg = f'🌍 Proxy task | Step {steps} | ⏱️ {elapsed:.0f}s | 🤖 Processing...'
else:
status_msg = f'🌍 Proxy task | Step {steps} | ⏱️ {elapsed:.0f}s | Status: {status}'
# Clear line and show status
print(f'\r{status_msg:<80}', end='', flush=True)
if status == 'finished':
print(f'\r✅ Task completed in {steps} steps! ({elapsed:.1f}s total)' + ' ' * 20)
return details
elif status in ['failed', 'stopped']:
print(f'\r❌ Task {status} after {steps} steps' + ' ' * 30)
return details
time.sleep(3)
def demo_proxy_countries():
"""Demonstrate proxy usage across different countries."""
print('\n🌍 Demo 1: Proxy Countries Comparison')
print('-' * 45)
countries = [('us', 'United States'), ('de', 'Germany'), ('jp', 'Japan'), ('au', 'Australia')]
results = {}
for code, name in countries:
print(f'\n🌍 Testing {name} ({code.upper()}) proxy:')
print('=' * 40)
result = test_ip_location(code)
results[code] = result
if result.get('output'):
print(f'📍 Location Result: {result["output"][:200]}...')
# Show execution URLs
if result.get('live_url'):
print(f'🔗 Live Preview: {result["live_url"]}')
if result.get('public_share_url'):
print(f'🌐 Share URL: {result["public_share_url"]}')
elif result.get('share_url'):
print(f'🌐 Share URL: {result["share_url"]}')
print('-' * 40)
time.sleep(2) # Brief pause between tests
# Summary comparison
print('\n📊 Proxy Location Summary:')
print('=' * 30)
for code, result in results.items():
status = result.get('status', 'unknown')
print(f'{code.upper()}: {status}')
def demo_geo_restrictions():
"""Demonstrate geo-restriction bypass."""
print('\n🚫 Demo 2: Geo-Restriction Testing')
print('-' * 40)
# Test from different locations
locations = [('us', 'US content'), ('de', 'European content')]
for code, description in locations:
print(f'\n🌍 Testing {description} with {code.upper()} proxy:')
result = test_geo_restricted_content(code)
if result.get('output'):
print(f'📰 Content Access: {result["output"][:200]}...')
time.sleep(2)
def demo_streaming_access():
"""Demonstrate streaming service access with different proxies."""
print('\n📺 Demo 3: Streaming Service Access')
print('-' * 40)
locations = [('us', 'US'), ('de', 'Germany')]
for code, name in locations:
print(f'\n🌍 Testing streaming access from {name}:')
result = test_streaming_service_access(code)
if result.get('output'):
print(f'📺 Access Result: {result["output"][:200]}...')
time.sleep(2)
def demo_search_localization():
"""Demonstrate search result localization."""
print('\n🔍 Demo 4: Search Localization')
print('-' * 35)
locations = [('us', 'US'), ('de', 'Germany')]
for code, name in locations:
print(f'\n🌍 Testing search results from {name}:')
result = test_search_results_by_location(code)
if result.get('output'):
print(f'🔍 Search Results: {result["output"][:200]}...')
time.sleep(2)
def main():
"""Demonstrate comprehensive proxy usage."""
print('🌍 Browser Use Cloud - Proxy Usage Examples')
print('=' * 50)
print('🎯 Proxy Benefits:')
print('• Bypass geo-restrictions')
print('• Test location-specific content')
print('• Access region-locked websites')
print('• Mobile/residential IP addresses')
print('• Verify IP geolocation')
print('\n🌐 Available Countries:')
countries = ['🇺🇸 US', '🇫🇷 France', '🇮🇹 Italy', '🇯🇵 Japan', '🇦🇺 Australia', '🇩🇪 Germany', '🇫🇮 Finland', '🇨🇦 Canada']
print(' • '.join(countries))
try:
# Parse command line arguments
parser = argparse.ArgumentParser(description='Proxy usage examples')
parser.add_argument(
'--demo', choices=['countries', 'geo', 'streaming', 'search', 'all'], default='countries', help='Which demo to run'
)
args = parser.parse_args()
print(f'\n🔍 Running {args.demo} demo(s)...')
if args.demo == 'countries':
demo_proxy_countries()
elif args.demo == 'geo':
demo_geo_restrictions()
elif args.demo == 'streaming':
demo_streaming_access()
elif args.demo == 'search':
demo_search_localization()
elif args.demo == 'all':
demo_proxy_countries()
demo_geo_restrictions()
demo_streaming_access()
demo_search_localization()
except requests.exceptions.RequestException as e:
print(f'❌ API Error: {e}')
except Exception as e:
print(f'❌ Error: {e}')
if __name__ == '__main__':
main()
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/cloud/04_proxy_usage.py",
"license": "MIT License",
"lines": 247,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:examples/cloud/05_search_api.py | """
Cloud Example 5: Search API (Beta) 🔍
=====================================
This example demonstrates the Browser Use Search API (BETA):
- Simple search: Search Google and extract from multiple results
- URL search: Extract specific content from a target URL
- Deep navigation through websites (depth parameter)
- Real-time content extraction vs cached results
Perfect for: Content extraction, research, competitive analysis
"""
import argparse
import asyncio
import json
import os
import time
from typing import Any
import aiohttp
# Configuration
API_KEY = os.getenv('BROWSER_USE_API_KEY')
if not API_KEY:
raise ValueError(
'Please set BROWSER_USE_API_KEY environment variable. You can also create an API key at https://cloud.browser-use.com/new-api-key'
)
BASE_URL = os.getenv('BROWSER_USE_BASE_URL', 'https://api.browser-use.com/api/v1')
TIMEOUT = int(os.getenv('BROWSER_USE_TIMEOUT', '30'))
HEADERS = {'Authorization': f'Bearer {API_KEY}', 'Content-Type': 'application/json'}
async def simple_search(query: str, max_websites: int = 5, depth: int = 2) -> dict[str, Any]:
"""
Search Google and extract content from multiple top results.
Args:
query: Search query to process
max_websites: Number of websites to process (1-10)
depth: How deep to navigate (2-5)
Returns:
Dictionary with results from multiple websites
"""
# Validate input parameters
max_websites = max(1, min(max_websites, 10)) # Clamp to 1-10
depth = max(2, min(depth, 5)) # Clamp to 2-5
start_time = time.time()
print(f"🔍 Simple Search: '{query}'")
print(f'📊 Processing {max_websites} websites at depth {depth}')
print(f'💰 Estimated cost: {depth * max_websites}¢')
payload = {'query': query, 'max_websites': max_websites, 'depth': depth}
timeout = aiohttp.ClientTimeout(total=TIMEOUT)
connector = aiohttp.TCPConnector(limit=10) # Limit concurrent connections
async with aiohttp.ClientSession(timeout=timeout, connector=connector) as session:
async with session.post(f'{BASE_URL}/simple-search', json=payload, headers=HEADERS) as response:
elapsed = time.time() - start_time
if response.status == 200:
try:
result = await response.json()
print(f'✅ Found results from {len(result.get("results", []))} websites in {elapsed:.1f}s')
return result
except (aiohttp.ContentTypeError, json.JSONDecodeError) as e:
error_text = await response.text()
print(f'❌ Invalid JSON response: {e} (after {elapsed:.1f}s)')
return {'error': 'Invalid JSON', 'details': error_text}
else:
error_text = await response.text()
print(f'❌ Search failed: {response.status} - {error_text} (after {elapsed:.1f}s)')
return {'error': f'HTTP {response.status}', 'details': error_text}
async def search_url(url: str, query: str, depth: int = 2) -> dict[str, Any]:
"""
Extract specific content from a target URL.
Args:
url: Target URL to extract from
query: What specific content to look for
depth: How deep to navigate (2-5)
Returns:
Dictionary with extracted content
"""
# Validate input parameters
depth = max(2, min(depth, 5)) # Clamp to 2-5
start_time = time.time()
print(f'🎯 URL Search: {url}')
print(f"🔍 Looking for: '{query}'")
print(f'📊 Navigation depth: {depth}')
print(f'💰 Estimated cost: {depth}¢')
payload = {'url': url, 'query': query, 'depth': depth}
timeout = aiohttp.ClientTimeout(total=TIMEOUT)
connector = aiohttp.TCPConnector(limit=10) # Limit concurrent connections
async with aiohttp.ClientSession(timeout=timeout, connector=connector) as session:
async with session.post(f'{BASE_URL}/search-url', json=payload, headers=HEADERS) as response:
elapsed = time.time() - start_time
if response.status == 200:
try:
result = await response.json()
print(f'✅ Extracted content from {result.get("url", "website")} in {elapsed:.1f}s')
return result
except (aiohttp.ContentTypeError, json.JSONDecodeError) as e:
error_text = await response.text()
print(f'❌ Invalid JSON response: {e} (after {elapsed:.1f}s)')
return {'error': 'Invalid JSON', 'details': error_text}
else:
error_text = await response.text()
print(f'❌ URL search failed: {response.status} - {error_text} (after {elapsed:.1f}s)')
return {'error': f'HTTP {response.status}', 'details': error_text}
def display_simple_search_results(results: dict[str, Any]):
"""Display simple search results in a readable format."""
if 'error' in results:
print(f'❌ Error: {results["error"]}')
return
websites = results.get('results', [])
print(f'\n📋 Search Results ({len(websites)} websites)')
print('=' * 50)
for i, site in enumerate(websites, 1):
url = site.get('url', 'Unknown URL')
content = site.get('content', 'No content')
print(f'\n{i}. 🌐 {url}')
print('-' * 40)
# Show first 300 chars of content
if len(content) > 300:
print(f'{content[:300]}...')
print(f'[Content truncated - {len(content)} total characters]')
else:
print(content)
# Show execution URLs if available
if results.get('live_url'):
print(f'\n🔗 Live Preview: {results["live_url"]}')
if results.get('public_share_url'):
print(f'🌐 Share URL: {results["public_share_url"]}')
elif results.get('share_url'):
print(f'🌐 Share URL: {results["share_url"]}')
def display_url_search_results(results: dict[str, Any]):
"""Display URL search results in a readable format."""
if 'error' in results:
print(f'❌ Error: {results["error"]}')
return
url = results.get('url', 'Unknown URL')
content = results.get('content', 'No content')
print(f'\n📄 Extracted Content from: {url}')
print('=' * 60)
print(content)
# Show execution URLs if available
if results.get('live_url'):
print(f'\n🔗 Live Preview: {results["live_url"]}')
if results.get('public_share_url'):
print(f'🌐 Share URL: {results["public_share_url"]}')
elif results.get('share_url'):
print(f'🌐 Share URL: {results["share_url"]}')
async def demo_news_search():
"""Demo: Search for latest news across multiple sources."""
print('\n📰 Demo 1: Latest News Search')
print('-' * 35)
demo_start = time.time()
query = 'latest developments in artificial intelligence 2024'
results = await simple_search(query, max_websites=4, depth=2)
demo_elapsed = time.time() - demo_start
display_simple_search_results(results)
print(f'\n⏱️ Total demo time: {demo_elapsed:.1f}s')
return results
async def demo_competitive_analysis():
"""Demo: Analyze competitor websites."""
print('\n🏢 Demo 2: Competitive Analysis')
print('-' * 35)
query = 'browser automation tools comparison features pricing'
results = await simple_search(query, max_websites=3, depth=3)
display_simple_search_results(results)
return results
async def demo_deep_website_analysis():
"""Demo: Deep analysis of a specific website."""
print('\n🎯 Demo 3: Deep Website Analysis')
print('-' * 35)
demo_start = time.time()
url = 'https://docs.browser-use.com'
query = 'Browser Use features, pricing, and API capabilities'
results = await search_url(url, query, depth=3)
demo_elapsed = time.time() - demo_start
display_url_search_results(results)
print(f'\n⏱️ Total demo time: {demo_elapsed:.1f}s')
return results
async def demo_product_research():
"""Demo: Product research and comparison."""
print('\n🛍️ Demo 4: Product Research')
print('-' * 30)
query = 'best wireless headphones 2024 reviews comparison'
results = await simple_search(query, max_websites=5, depth=2)
display_simple_search_results(results)
return results
async def demo_real_time_vs_cached():
"""Demo: Show difference between real-time and cached results."""
print('\n⚡ Demo 5: Real-time vs Cached Data')
print('-' * 40)
print('🔄 Browser Use Search API benefits:')
print('• Actually browses websites like a human')
print('• Gets live, current data (not cached)')
print('• Navigates deep into sites via clicks')
print('• Handles JavaScript and dynamic content')
print('• Accesses pages requiring navigation')
# Example with live data
query = 'current Bitcoin price USD live'
results = await simple_search(query, max_websites=3, depth=2)
print('\n💰 Live Bitcoin Price Search Results:')
display_simple_search_results(results)
return results
async def demo_search_depth_comparison():
"""Demo: Compare different search depths."""
print('\n📊 Demo 6: Search Depth Comparison')
print('-' * 40)
url = 'https://news.ycombinator.com'
query = 'trending technology discussions'
depths = [2, 3, 4]
results = {}
for depth in depths:
print(f'\n🔍 Testing depth {depth}:')
result = await search_url(url, query, depth)
results[depth] = result
if 'content' in result:
content_length = len(result['content'])
print(f'📏 Content length: {content_length} characters')
# Brief pause between requests
await asyncio.sleep(1)
# Summary
print('\n📊 Depth Comparison Summary:')
print('-' * 30)
for depth, result in results.items():
if 'content' in result:
length = len(result['content'])
print(f'Depth {depth}: {length} characters')
else:
print(f'Depth {depth}: Error or no content')
return results
async def main():
"""Demonstrate comprehensive Search API usage."""
print('🔍 Browser Use Cloud - Search API (BETA)')
print('=' * 45)
print('⚠️ Note: This API is in BETA and may change')
print()
print('🎯 Search API Features:')
print('• Real-time website browsing (not cached)')
print('• Deep navigation through multiple pages')
print('• Dynamic content and JavaScript handling')
print('• Multiple result aggregation')
print('• Cost-effective content extraction')
print('\n💰 Pricing:')
print('• Simple Search: 1¢ × depth × websites')
print('• URL Search: 1¢ × depth')
print('• Example: depth=2, 5 websites = 10¢')
try:
# Parse command line arguments
parser = argparse.ArgumentParser(description='Search API (BETA) examples')
parser.add_argument(
'--demo',
choices=['news', 'competitive', 'deep', 'product', 'realtime', 'depth', 'all'],
default='news',
help='Which demo to run',
)
args = parser.parse_args()
print(f'\n🔍 Running {args.demo} demo(s)...')
if args.demo == 'news':
await demo_news_search()
elif args.demo == 'competitive':
await demo_competitive_analysis()
elif args.demo == 'deep':
await demo_deep_website_analysis()
elif args.demo == 'product':
await demo_product_research()
elif args.demo == 'realtime':
await demo_real_time_vs_cached()
elif args.demo == 'depth':
await demo_search_depth_comparison()
elif args.demo == 'all':
await demo_news_search()
await demo_competitive_analysis()
await demo_deep_website_analysis()
await demo_product_research()
await demo_real_time_vs_cached()
await demo_search_depth_comparison()
except aiohttp.ClientError as e:
print(f'❌ Network Error: {e}')
except Exception as e:
print(f'❌ Error: {e}')
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/cloud/05_search_api.py",
"license": "MIT License",
"lines": 275,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:examples/custom-functions/2fa.py | import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent
secret_key = os.environ.get('OTP_SECRET_KEY')
if not secret_key:
# For this example copy the code from the website https://authenticationtest.com/totpChallenge/
# For real 2fa just copy the secret key when you setup 2fa, you can get this e.g. in 1Password
secret_key = 'JBSWY3DPEHPK3PXP'
sensitive_data = {'bu_2fa_code': secret_key}
task = """
1. Go to https://authenticationtest.com/totpChallenge/ and try to log in.
2. If prompted for 2FA code:
Input the the secret bu_2fa_code.
When you input bu_2fa_code, the 6 digit code will be generated automatically.
"""
Agent(task=task, sensitive_data=sensitive_data).run_sync() # type: ignore
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/custom-functions/2fa.py",
"license": "MIT License",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:examples/custom-functions/cua.py | """
OpenAI Computer Use Assistant (CUA) Integration
This example demonstrates how to integrate OpenAI's Computer Use Assistant as a fallback
action when standard browser actions are insufficient to achieve the desired goal.
The CUA can perform complex computer interactions that might be difficult to achieve
through regular browser-use actions.
"""
import asyncio
import base64
import os
import sys
from io import BytesIO
from PIL import Image
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from openai import AsyncOpenAI
from pydantic import BaseModel, Field
from browser_use import Agent, ChatOpenAI, Tools
from browser_use.agent.views import ActionResult
from browser_use.browser import BrowserSession
class OpenAICUAAction(BaseModel):
"""Parameters for OpenAI Computer Use Assistant action."""
description: str = Field(..., description='Description of your next goal')
async def handle_model_action(browser_session: BrowserSession, action) -> ActionResult:
"""
Given a computer action (e.g., click, double_click, scroll, etc.),
execute the corresponding operation using CDP.
"""
action_type = action.type
ERROR_MSG: str = 'Could not execute the CUA action.'
if not browser_session.agent_focus_target_id:
return ActionResult(error='No active browser session')
# Get CDP session for the focused target using the public API
try:
cdp_session = await browser_session.get_or_create_cdp_session(browser_session.agent_focus_target_id, focus=False)
except Exception as e:
return ActionResult(error=f'Failed to get CDP session: {e}')
try:
match action_type:
case 'click':
x, y = action.x, action.y
button = action.button
print(f"Action: click at ({x}, {y}) with button '{button}'")
# Not handling things like middle click, etc.
if button != 'left' and button != 'right':
button = 'left'
# Use CDP to click
await browser_session.cdp_client.send.Input.dispatchMouseEvent(
params={
'type': 'mousePressed',
'x': x,
'y': y,
'button': button,
'clickCount': 1,
},
session_id=cdp_session.session_id,
)
await browser_session.cdp_client.send.Input.dispatchMouseEvent(
params={
'type': 'mouseReleased',
'x': x,
'y': y,
'button': button,
},
session_id=cdp_session.session_id,
)
msg = f'Clicked at ({x}, {y}) with button {button}'
return ActionResult(extracted_content=msg, include_in_memory=True, long_term_memory=msg)
case 'scroll':
x, y = action.x, action.y
scroll_x, scroll_y = action.scroll_x, action.scroll_y
print(f'Action: scroll at ({x}, {y}) with offsets (scroll_x={scroll_x}, scroll_y={scroll_y})')
# Move mouse to position first
await browser_session.cdp_client.send.Input.dispatchMouseEvent(
params={
'type': 'mouseMoved',
'x': x,
'y': y,
},
session_id=cdp_session.session_id,
)
# Execute scroll using JavaScript
await browser_session.cdp_client.send.Runtime.evaluate(
params={
'expression': f'window.scrollBy({scroll_x}, {scroll_y})',
},
session_id=cdp_session.session_id,
)
msg = f'Scrolled at ({x}, {y}) with offsets (scroll_x={scroll_x}, scroll_y={scroll_y})'
return ActionResult(extracted_content=msg, include_in_memory=True, long_term_memory=msg)
case 'keypress':
keys = action.keys
for k in keys:
print(f"Action: keypress '{k}'")
# A simple mapping for common keys; expand as needed.
key_code = k
if k.lower() == 'enter':
key_code = 'Enter'
elif k.lower() == 'space':
key_code = 'Space'
# Use CDP to send key
await browser_session.cdp_client.send.Input.dispatchKeyEvent(
params={
'type': 'keyDown',
'key': key_code,
},
session_id=cdp_session.session_id,
)
await browser_session.cdp_client.send.Input.dispatchKeyEvent(
params={
'type': 'keyUp',
'key': key_code,
},
session_id=cdp_session.session_id,
)
msg = f'Pressed keys: {keys}'
return ActionResult(extracted_content=msg, include_in_memory=True, long_term_memory=msg)
case 'type':
text = action.text
print(f'Action: type text: {text}')
# Type text character by character
for char in text:
await browser_session.cdp_client.send.Input.dispatchKeyEvent(
params={
'type': 'char',
'text': char,
},
session_id=cdp_session.session_id,
)
msg = f'Typed text: {text}'
return ActionResult(extracted_content=msg, include_in_memory=True, long_term_memory=msg)
case 'wait':
print('Action: wait')
await asyncio.sleep(2)
msg = 'Waited for 2 seconds'
return ActionResult(extracted_content=msg, include_in_memory=True, long_term_memory=msg)
case 'screenshot':
# Nothing to do as screenshot is taken at each turn
print('Action: screenshot')
return ActionResult(error=ERROR_MSG)
# Handle other actions here
case _:
print(f'Unrecognized action: {action}')
return ActionResult(error=ERROR_MSG)
except Exception as e:
print(f'Error handling action {action}: {e}')
return ActionResult(error=ERROR_MSG)
tools = Tools()
@tools.registry.action(
'Use OpenAI Computer Use Assistant (CUA) as a fallback when standard browser actions cannot achieve the desired goal. This action sends a screenshot and description to OpenAI CUA and executes the returned computer use actions.',
param_model=OpenAICUAAction,
)
async def openai_cua_fallback(params: OpenAICUAAction, browser_session: BrowserSession):
"""
Fallback action that uses OpenAI's Computer Use Assistant to perform complex
computer interactions when standard browser actions are insufficient.
"""
print(f'🎯 CUA Action Starting - Goal: {params.description}')
try:
# Get browser state summary
state = await browser_session.get_browser_state_summary()
page_info = state.page_info
if not page_info:
raise Exception('Page info not found - cannot execute CUA action')
print(f'📐 Viewport size: {page_info.viewport_width}x{page_info.viewport_height}')
screenshot_b64 = state.screenshot
if not screenshot_b64:
raise Exception('Screenshot not found - cannot execute CUA action')
print(f'📸 Screenshot captured (base64 length: {len(screenshot_b64)} chars)')
# Debug: Check screenshot dimensions
image = Image.open(BytesIO(base64.b64decode(screenshot_b64)))
print(f'📏 Screenshot actual dimensions: {image.size[0]}x{image.size[1]}')
# rescale the screenshot to the viewport size
image = image.resize((page_info.viewport_width, page_info.viewport_height))
# Save as PNG to bytes buffer
buffer = BytesIO()
image.save(buffer, format='PNG')
buffer.seek(0)
# Convert to base64
screenshot_b64 = base64.b64encode(buffer.getvalue()).decode('utf-8')
print(f'📸 Rescaled screenshot to viewport size: {page_info.viewport_width}x{page_info.viewport_height}')
client = AsyncOpenAI(api_key=os.getenv('OPENAI_API_KEY'))
print('🔄 Sending request to OpenAI CUA...')
prompt = f"""
You will be given an action to execute and screenshot of the current screen.
Output one computer_call object that will achieve this goal.
Goal: {params.description}
"""
response = await client.responses.create(
model='computer-use-preview',
tools=[
{
'type': 'computer_use_preview',
'display_width': page_info.viewport_width,
'display_height': page_info.viewport_height,
'environment': 'browser',
}
],
input=[
{
'role': 'user',
'content': [
{'type': 'input_text', 'text': prompt},
{
'type': 'input_image',
'detail': 'auto',
'image_url': f'data:image/png;base64,{screenshot_b64}',
},
],
}
],
truncation='auto',
temperature=0.1,
)
print(f'📥 CUA response received: {response}')
computer_calls = [item for item in response.output if item.type == 'computer_call']
computer_call = computer_calls[0] if computer_calls else None
if not computer_call:
raise Exception('No computer calls found in CUA response')
action = computer_call.action
print(f'🎬 Executing CUA action: {action.type} - {action}')
action_result = await handle_model_action(browser_session, action)
await asyncio.sleep(0.1)
print('✅ CUA action completed successfully')
return action_result
except Exception as e:
msg = f'Error executing CUA action: {e}'
print(f'❌ {msg}')
return ActionResult(error=msg)
async def main():
# Initialize the language model
llm = ChatOpenAI(
model='o4-mini',
temperature=1.0,
)
# Create browser session
browser_session = BrowserSession()
# Example task that might require CUA fallback
# This could be a complex interaction that's difficult with standard actions
task = """
Go to https://csreis.github.io/tests/cross-site-iframe.html
Click on "Go cross-site, complex page" using index
Use the OpenAI CUA fallback to click on "Tree is open..." link.
"""
# Create agent with our custom tools that includes CUA fallback
agent = Agent(
task=task,
llm=llm,
tools=tools,
browser_session=browser_session,
)
print('🚀 Starting agent with CUA fallback support...')
print(f'Task: {task}')
print('-' * 50)
try:
# Run the agent
result = await agent.run()
print(f'\n✅ Task completed! Result: {result}')
except Exception as e:
print(f'\n❌ Error running agent: {e}')
finally:
# Clean up browser session
await browser_session.kill()
print('\n🧹 Browser session closed')
if __name__ == '__main__':
# Example of different scenarios where CUA might be useful
print('🔧 OpenAI Computer Use Assistant (CUA) Integration Example')
print('=' * 60)
print()
print("This example shows how to integrate OpenAI's CUA as a fallback action")
print('when standard browser-use actions cannot achieve the desired goal.')
print()
print('CUA is particularly useful for:')
print('• Complex mouse interactions (drag & drop, precise clicking)')
print('• Keyboard shortcuts and key combinations')
print('• Actions that require pixel-perfect precision')
print("• Custom UI elements that don't respond to standard actions")
print()
print('Make sure you have OPENAI_API_KEY set in your environment!')
print()
# Check if OpenAI API key is available
if not os.getenv('OPENAI_API_KEY'):
print('❌ Error: OPENAI_API_KEY environment variable not set')
print('Please set your OpenAI API key to use CUA integration')
sys.exit(1)
# Run the example
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/custom-functions/cua.py",
"license": "MIT License",
"lines": 288,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:examples/features/add_image_context.py | """
Show how to use sample_images to add image context for your task
"""
import asyncio
import base64
from pathlib import Path
from typing import Any
from dotenv import load_dotenv
from browser_use import Agent
from browser_use.llm import ChatOpenAI
from browser_use.llm.messages import ContentPartImageParam, ContentPartTextParam, ImageURL
# Load environment variables
load_dotenv()
def image_to_base64(image_path: str) -> str:
"""
Convert image file to base64 string.
Args:
image_path: Path to the image file
Returns:
Base64 encoded string of the image
Raises:
FileNotFoundError: If image file doesn't exist
IOError: If image file cannot be read
"""
image_file = Path(image_path)
if not image_file.exists():
raise FileNotFoundError(f'Image file not found: {image_path}')
try:
with open(image_file, 'rb') as f:
encoded_string = base64.b64encode(f.read())
return encoded_string.decode('utf-8')
except OSError as e:
raise OSError(f'Failed to read image file: {e}')
def create_sample_images() -> list[ContentPartTextParam | ContentPartImageParam]:
"""
Create image context for the agent.
Returns:
list of content parts containing text and image data
"""
# Image path - replace with your actual image path
image_path = 'sample_image.png'
# Image context configuration
image_context: list[dict[str, Any]] = [
{
'type': 'text',
'value': (
'The following image explains the google layout. '
'The image highlights several buttons with red boxes, '
'and next to them are corresponding labels in red text.\n'
'Each label corresponds to a button as follows:\n'
'Label 1 is the "image" button.'
),
},
{'type': 'image', 'value': image_to_base64(image_path)},
]
# Convert to content parts
content_parts = []
for item in image_context:
if item['type'] == 'text':
content_parts.append(ContentPartTextParam(text=item['value']))
elif item['type'] == 'image':
content_parts.append(
ContentPartImageParam(
image_url=ImageURL(
url=f'data:image/jpeg;base64,{item["value"]}',
media_type='image/jpeg',
),
)
)
return content_parts
async def main() -> None:
"""
Main function to run the browser agent with image context.
"""
# Task configuration
task_str = 'goto https://www.google.com/ and click image button'
# Initialize the language model
model = ChatOpenAI(model='gpt-4.1')
# Create sample images for context
try:
sample_images = create_sample_images()
except (FileNotFoundError, OSError) as e:
print(f'Error loading sample images: {e}')
print('Continuing without sample images...')
sample_images = []
# Initialize and run the agent
agent = Agent(task=task_str, llm=model, sample_images=sample_images)
await agent.run()
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/features/add_image_context.py",
"license": "MIT License",
"lines": 90,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:examples/features/multi_tab.py | """
Simple try of the agent.
@dev You need to add OPENAI_API_KEY to your environment variables.
"""
import asyncio
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent, ChatOpenAI
# video: https://preview.screen.studio/share/clenCmS6
llm = ChatOpenAI(model='gpt-4.1-mini')
agent = Agent(
task='open 3 tabs with elon musk, sam altman, and steve jobs, then go back to the first and stop',
llm=llm,
)
async def main():
await agent.run()
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/features/multi_tab.py",
"license": "MIT License",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:examples/features/process_agent_output.py | import asyncio
import os
import sys
from pprint import pprint
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent, ChatOpenAI
from browser_use.agent.views import AgentHistoryList
from browser_use.browser import BrowserProfile, BrowserSession
from browser_use.browser.profile import ViewportSize
llm = ChatOpenAI(model='gpt-4.1-mini')
async def main():
browser_session = BrowserSession(
browser_profile=BrowserProfile(
headless=False,
traces_dir='./tmp/result_processing',
window_size=ViewportSize(width=1280, height=1000),
user_data_dir='~/.config/browseruse/profiles/default',
)
)
await browser_session.start()
try:
agent = Agent(
task="go to google.com and type 'OpenAI' click search and give me the first url",
llm=llm,
browser_session=browser_session,
)
history: AgentHistoryList = await agent.run(max_steps=3)
print('Final Result:')
pprint(history.final_result(), indent=4)
print('\nErrors:')
pprint(history.errors(), indent=4)
# e.g. xPaths the model clicked on
print('\nModel Outputs:')
pprint(history.model_actions(), indent=4)
print('\nThoughts:')
pprint(history.model_thoughts(), indent=4)
finally:
await browser_session.stop()
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/features/process_agent_output.py",
"license": "MIT License",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:examples/features/scrolling_page.py | # Goal: Automates webpage scrolling with various scrolling actions, including element-specific scrolling.
import asyncio
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent, ChatOpenAI
from browser_use.browser import BrowserProfile, BrowserSession
if not os.getenv('OPENAI_API_KEY'):
raise ValueError('OPENAI_API_KEY is not set')
"""
Example: Enhanced 'Scroll' action with page amounts and element-specific scrolling.
This script demonstrates the new enhanced scrolling capabilities:
1. PAGE-LEVEL SCROLLING:
- Scrolling by specific page amounts using 'num_pages' parameter (0.5, 1.0, 2.0, etc.)
- Scrolling up or down using the 'down' parameter
- Uses JavaScript window.scrollBy() or smart container detection
2. ELEMENT-SPECIFIC SCROLLING:
- NEW: Optional 'index' parameter to scroll within specific elements
- Perfect for dropdowns, sidebars, and custom UI components
- Uses direct scrollTop manipulation (no mouse events that might close dropdowns)
- Automatically finds scroll containers in the element hierarchy
- Falls back to page scrolling if no container found
3. IMPLEMENTATION DETAILS:
- Does NOT use mouse movement or wheel events
- Direct DOM manipulation for precision and reliability
- Container-aware scrolling prevents unwanted side effects
"""
llm = ChatOpenAI(model='gpt-4.1-mini')
browser_profile = BrowserProfile(headless=False)
browser_session = BrowserSession(browser_profile=browser_profile)
# Example 1: Basic page scrolling with custom amounts
agent1 = Agent(
task="Navigate to 'https://en.wikipedia.org/wiki/Internet' and scroll down by one page - then scroll up by 0.5 pages - then scroll down by 0.25 pages - then scroll down by 2 pages.",
llm=llm,
browser_session=browser_session,
)
# Example 2: Element-specific scrolling (dropdowns and containers)
agent2 = Agent(
task="""Go to https://semantic-ui.com/modules/dropdown.html#/definition and:
1. Scroll down in the left sidebar by 2 pages
2. Then scroll down 1 page in the main content area
3. Click on the State dropdown and scroll down 1 page INSIDE the dropdown to see more states
4. The dropdown should stay open while scrolling inside it""",
llm=llm,
browser_session=browser_session,
)
# Example 3: Text-based scrolling alternative
agent3 = Agent(
task="Navigate to 'https://en.wikipedia.org/wiki/Internet' and scroll to the text 'The vast majority of computer'",
llm=llm,
browser_session=browser_session,
)
async def main():
print('Choose which scrolling example to run:')
print('1. Basic page scrolling with custom amounts (Wikipedia)')
print('2. Element-specific scrolling (Semantic UI dropdowns)')
print('3. Text-based scrolling (Wikipedia)')
choice = input('Enter choice (1-3): ').strip()
if choice == '1':
print('🚀 Running Example 1: Basic page scrolling...')
await agent1.run()
elif choice == '2':
print('🚀 Running Example 2: Element-specific scrolling...')
await agent2.run()
elif choice == '3':
print('🚀 Running Example 3: Text-based scrolling...')
await agent3.run()
else:
print('❌ Invalid choice. Running Example 1 by default...')
await agent1.run()
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/features/scrolling_page.py",
"license": "MIT License",
"lines": 74,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:examples/features/secure.py | """
Azure OpenAI example with data privacy and high-scale configuration.
Environment Variables Required:
- AZURE_OPENAI_KEY (or AZURE_OPENAI_API_KEY)
- AZURE_OPENAI_ENDPOINT
- AZURE_OPENAI_DEPLOYMENT (optional)
DATA PRIVACY WITH AZURE OPENAI:
✅ Good News: No Training on Your Data by Default
Azure OpenAI Service already protects your data:
✅ NOT used to train OpenAI models
✅ NOT shared with other customers
✅ NOT accessible to OpenAI directly
✅ NOT used to improve Microsoft/third-party products
✅ Hosted entirely within Azure (not OpenAI's servers)
⚠️ Default Data Retention (30 Days)
- Prompts and completions stored for up to 30 days
- Purpose: Abuse monitoring and compliance
- Access: Microsoft authorized personnel (only if abuse detected)
🔒 How to Disable Data Logging Completely
Apply for Microsoft's "Limited Access Program":
1. Contact Microsoft Azure support
2. Submit Limited Access Program request
3. Demonstrate legitimate business need
4. After approval: Zero data logging, immediate deletion, no human review
For high-scale deployments (500+ agents), consider:
- Multiple deployments across regions
How to Verify This Yourself, that there is no data logging:
- Network monitoring: Run with network monitoring tools
- Firewall rules: Block all domains except Azure OpenAI and your target sites
Contact us if you need help with this: support@browser-use.com
"""
import asyncio
import os
import sys
from dotenv import load_dotenv
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
load_dotenv()
os.environ['ANONYMIZED_TELEMETRY'] = 'false'
from browser_use import Agent, BrowserProfile, ChatAzureOpenAI
# Configuration LLM
api_key = os.getenv('AZURE_OPENAI_KEY')
azure_endpoint = os.getenv('AZURE_OPENAI_ENDPOINT')
llm = ChatAzureOpenAI(model='gpt-4.1-mini', api_key=api_key, azure_endpoint=azure_endpoint)
# Configuration Task
task = 'Find the founders of the sensitive company_name'
# Configuration Browser (optional)
browser_profile = BrowserProfile(allowed_domains=['*google.com', 'browser-use.com'], enable_default_extensions=False)
# Sensitive data (optional) - {key: sensitive_information} - we filter out the sensitive_information from any input to the LLM, it will only work with placeholder.
# By default we pass screenshots to the LLM which can contain your information. Set use_vision=False to disable this.
# If you trust your LLM endpoint, you don't need to worry about this.
sensitive_data = {'company_name': 'browser-use'}
# Create Agent
agent = Agent(task=task, llm=llm, browser_profile=browser_profile, sensitive_data=sensitive_data) # type: ignore
async def main():
await agent.run(max_steps=10)
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/features/secure.py",
"license": "MIT License",
"lines": 56,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
browser-use/browser-use:examples/features/video_recording.py | import asyncio
from pathlib import Path
from browser_use import Agent, Browser, ChatOpenAI
# NOTE: To use this example, install imageio[ffmpeg], e.g. with uv pip install "browser-use[video]"
async def main():
browser_session = Browser(record_video_dir=Path('./tmp/recordings'))
agent = Agent(
task='Go to github.com/trending then navigate to the first trending repository and report how many commits it has.',
llm=ChatOpenAI(model='gpt-4.1-mini'),
browser_session=browser_session,
)
await agent.run(max_steps=5)
# The video will be saved automatically when the agent finishes and the session closes.
print('Agent run finished. Check the ./tmp/recordings directory for the video.')
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/features/video_recording.py",
"license": "MIT License",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:examples/file_system/alphabet_earnings.py | import asyncio
import os
import pathlib
import shutil
from dotenv import load_dotenv
from browser_use import Agent, ChatOpenAI
load_dotenv()
SCRIPT_DIR = pathlib.Path(os.path.dirname(os.path.abspath(__file__)))
agent_dir = SCRIPT_DIR / 'alphabet_earnings'
agent_dir.mkdir(exist_ok=True)
task = """
Go to https://abc.xyz/assets/cc/27/3ada14014efbadd7a58472f1f3f4/2025q2-alphabet-earnings-release.pdf.
Read the PDF and save 3 interesting data points in "alphabet_earnings.pdf" and share it with me!
""".strip('\n')
agent = Agent(
task=task,
llm=ChatOpenAI(model='o4-mini'),
file_system_path=str(agent_dir / 'fs'),
flash_mode=True,
)
async def main():
await agent.run()
input(f'Press Enter to clean the file system at {agent_dir}...')
# clean the file system
shutil.rmtree(str(agent_dir / 'fs'))
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/file_system/alphabet_earnings.py",
"license": "MIT License",
"lines": 27,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:examples/file_system/excel_sheet.py | import asyncio
import os
import sys
from browser_use.llm.openai.chat import ChatOpenAI
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent
llm = ChatOpenAI(model='o4-mini')
task = (
'Find current stock price of companies Meta and Amazon. Then, make me a CSV file with 2 columns: company name, stock price.'
)
agent = Agent(task=task, llm=llm)
async def main():
import time
start_time = time.time()
history = await agent.run()
# token usage
print(history.usage)
end_time = time.time()
print(f'Time taken: {end_time - start_time} seconds')
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/file_system/excel_sheet.py",
"license": "MIT License",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:examples/file_system/file_system.py | import asyncio
import os
import pathlib
import shutil
from dotenv import load_dotenv
from browser_use import Agent, ChatOpenAI
load_dotenv()
SCRIPT_DIR = pathlib.Path(os.path.dirname(os.path.abspath(__file__)))
agent_dir = SCRIPT_DIR / 'file_system'
agent_dir.mkdir(exist_ok=True)
conversation_dir = agent_dir / 'conversations' / 'conversation'
print(f'Agent logs directory: {agent_dir}')
task = """
Go to https://mertunsall.github.io/posts/post1.html
Save the title of the article in "data.md"
Then, use append_file to add the first sentence of the article to "data.md"
Then, read the file to see its content and make sure it's correct.
Finally, share the file with me.
NOTE: DO NOT USE extract action - everything is visible in browser state.
""".strip('\n')
llm = ChatOpenAI(model='gpt-4.1-mini')
agent = Agent(
task=task,
llm=llm,
save_conversation_path=str(conversation_dir),
file_system_path=str(agent_dir / 'fs'),
)
async def main():
agent_history = await agent.run()
print(f'Final result: {agent_history.final_result()}', flush=True)
input('Press Enter to clean the file system...')
# clean the file system
shutil.rmtree(str(agent_dir / 'fs'))
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/file_system/file_system.py",
"license": "MIT License",
"lines": 35,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:examples/getting_started/01_basic_search.py | """
Setup:
1. Get your API key from https://cloud.browser-use.com/new-api-key
2. Set environment variable: export BROWSER_USE_API_KEY="your-key"
"""
import asyncio
import os
import sys
# Add the parent directory to the path so we can import browser_use
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent, ChatBrowserUse
async def main():
llm = ChatBrowserUse(model='bu-2-0')
task = "Search Google for 'what is browser automation' and tell me the top 3 results"
agent = Agent(task=task, llm=llm)
await agent.run()
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/getting_started/01_basic_search.py",
"license": "MIT License",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:examples/getting_started/02_form_filling.py | """
Getting Started Example 2: Form Filling
This example demonstrates how to:
- Navigate to a website with forms
- Fill out input fields
- Submit forms
- Handle basic form interactions
This builds on the basic search example by showing more complex interactions.
Setup:
1. Get your API key from https://cloud.browser-use.com/new-api-key
2. Set environment variable: export BROWSER_USE_API_KEY="your-key"
"""
import asyncio
import os
import sys
# Add the parent directory to the path so we can import browser_use
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent, ChatBrowserUse
async def main():
# Initialize the model
llm = ChatBrowserUse(model='bu-2-0')
# Define a form filling task
task = """
Go to https://httpbin.org/forms/post and fill out the contact form with:
- Customer name: John Doe
- Telephone: 555-123-4567
- Email: john.doe@example.com
- Size: Medium
- Topping: cheese
- Delivery time: now
- Comments: This is a test form submission
Then submit the form and tell me what response you get.
"""
# Create and run the agent
agent = Agent(task=task, llm=llm)
await agent.run()
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/getting_started/02_form_filling.py",
"license": "MIT License",
"lines": 40,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
browser-use/browser-use:examples/getting_started/03_data_extraction.py | """
Getting Started Example 3: Data Extraction
This example demonstrates how to:
- Navigate to a website with structured data
- Extract specific information from the page
- Process and organize the extracted data
- Return structured results
This builds on previous examples by showing how to get valuable data from websites.
Setup:
1. Get your API key from https://cloud.browser-use.com/new-api-key
2. Set environment variable: export BROWSER_USE_API_KEY="your-key"
"""
import asyncio
import os
import sys
# Add the parent directory to the path so we can import browser_use
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent, ChatBrowserUse
async def main():
# Initialize the model
llm = ChatBrowserUse(model='bu-2-0')
# Define a data extraction task
task = """
Go to https://quotes.toscrape.com/ and extract the following information:
- The first 5 quotes on the page
- The author of each quote
- The tags associated with each quote
Present the information in a clear, structured format like:
Quote 1: "[quote text]" - Author: [author name] - Tags: [tag1, tag2, ...]
Quote 2: "[quote text]" - Author: [author name] - Tags: [tag1, tag2, ...]
etc.
"""
# Create and run the agent
agent = Agent(task=task, llm=llm)
await agent.run()
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/getting_started/03_data_extraction.py",
"license": "MIT License",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
browser-use/browser-use:examples/getting_started/04_multi_step_task.py | """
Getting Started Example 4: Multi-Step Task
This example demonstrates how to:
- Perform a complex workflow with multiple steps
- Navigate between different pages
- Combine search, form filling, and data extraction
- Handle a realistic end-to-end scenario
This is the most advanced getting started example, combining all previous concepts.
Setup:
1. Get your API key from https://cloud.browser-use.com/new-api-key
2. Set environment variable: export BROWSER_USE_API_KEY="your-key"
"""
import asyncio
import os
import sys
# Add the parent directory to the path so we can import browser_use
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent, ChatBrowserUse
async def main():
# Initialize the model
llm = ChatBrowserUse(model='bu-2-0')
# Define a multi-step task
task = """
I want you to research Python web scraping libraries. Here's what I need:
1. First, search Google for "best Python web scraping libraries 2024"
2. Find a reputable article or blog post about this topic
3. From that article, extract the top 3 recommended libraries
4. For each library, visit its official website or GitHub page
5. Extract key information about each library:
- Name
- Brief description
- Main features or advantages
- GitHub stars (if available)
Present your findings in a summary format comparing the three libraries.
"""
# Create and run the agent
agent = Agent(task=task, llm=llm)
await agent.run()
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/getting_started/04_multi_step_task.py",
"license": "MIT License",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
browser-use/browser-use:examples/getting_started/05_fast_agent.py | import asyncio
import os
import sys
# Add the parent directory to the path so we can import browser_use
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent, BrowserProfile
# Speed optimization instructions for the model
SPEED_OPTIMIZATION_PROMPT = """
Speed optimization instructions:
- Be extremely concise and direct in your responses
- Get to the goal as quickly as possible
- Use multi-action sequences whenever possible to reduce steps
"""
async def main():
# 1. Use fast LLM - Llama 4 on Groq for ultra-fast inference
from browser_use import ChatGroq
llm = ChatGroq(
model='meta-llama/llama-4-maverick-17b-128e-instruct',
temperature=0.0,
)
# from browser_use import ChatGoogle
# llm = ChatGoogle(model='gemini-flash-lite-latest')
# 2. Create speed-optimized browser profile
browser_profile = BrowserProfile(
minimum_wait_page_load_time=0.1,
wait_between_actions=0.1,
headless=False,
)
# 3. Define a speed-focused task
task = """
1. Go to reddit https://www.reddit.com/search/?q=browser+agent&type=communities
2. Click directly on the first 5 communities to open each in new tabs
3. Find out what the latest post is about, and switch directly to the next tab
4. Return the latest post summary for each page
"""
# 4. Create agent with all speed optimizations
agent = Agent(
task=task,
llm=llm,
flash_mode=True, # Disables thinking in the LLM output for maximum speed
browser_profile=browser_profile,
extend_system_message=SPEED_OPTIMIZATION_PROMPT,
)
await agent.run()
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/getting_started/05_fast_agent.py",
"license": "MIT License",
"lines": 48,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:examples/integrations/agentmail/2fa.py | import asyncio
import os
import sys
from agentmail import AsyncAgentMail # type: ignore
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent, Browser, ChatBrowserUse
from examples.integrations.agentmail.email_tools import EmailTools
TASK = """
Go to reddit.com, create a new account (use the get_email_address), make up password and all other information, confirm the 2fa with get_latest_email, and like latest post on r/elon subreddit.
"""
async def main():
# Create email inbox
# Get an API key from https://agentmail.to/
email_client = AsyncAgentMail()
inbox = await email_client.inboxes.create()
print(f'Your email address is: {inbox.inbox_id}\n\n')
# Initialize the tools for browser-use agent
tools = EmailTools(email_client=email_client, inbox=inbox)
# Initialize the LLM for browser-use agent
llm = ChatBrowserUse(model='bu-2-0')
# Set your local browser path
browser = Browser(executable_path='/Applications/Google Chrome.app/Contents/MacOS/Google Chrome')
agent = Agent(task=TASK, tools=tools, llm=llm, browser=browser)
await agent.run()
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/integrations/agentmail/2fa.py",
"license": "MIT License",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:examples/integrations/agentmail/email_tools.py | """
Email management to enable 2fa.
"""
import asyncio
import logging
# run `pip install agentmail` to install the library
from agentmail import AsyncAgentMail, Message, MessageReceivedEvent, Subscribe # type: ignore
from agentmail.inboxes.types.inbox import Inbox # type: ignore
from agentmail.inboxes.types.inbox_id import InboxId # type: ignore
from browser_use import Tools
# Configure basic logging if not already configured
if not logging.getLogger().handlers:
logging.basicConfig(level=logging.INFO, format='%(levelname)s - %(name)s - %(message)s')
logger = logging.getLogger(__name__)
class EmailTools(Tools):
def __init__(
self,
email_client: AsyncAgentMail | None = None,
email_timeout: int = 30,
inbox: Inbox | None = None,
):
super().__init__()
self.email_client = email_client or AsyncAgentMail()
self.email_timeout = email_timeout
self.register_email_tools()
self.inbox: Inbox | None = inbox
def _serialize_message_for_llm(self, message: Message) -> str:
"""
Serialize a message for the LLM
"""
# Use text if available, otherwise convert HTML to simple text
body_content = message.text
if not body_content and message.html:
body_content = self._html_to_text(message.html)
msg = f'From: {message.from_}\nTo: {message.to}\nTimestamp: {message.timestamp.isoformat()}\nSubject: {message.subject}\nBody: {body_content}'
return msg
def _html_to_text(self, html: str) -> str:
"""
Simple HTML to text conversion
"""
import re
# Remove script and style elements - handle spaces in closing tags
html = re.sub(r'<script\b[^>]*>.*?</script\s*>', '', html, flags=re.DOTALL | re.IGNORECASE)
html = re.sub(r'<style\b[^>]*>.*?</style\s*>', '', html, flags=re.DOTALL | re.IGNORECASE)
# Remove HTML tags
html = re.sub(r'<[^>]+>', '', html)
# Decode HTML entities
html = html.replace(' ', ' ')
html = html.replace('&', '&')
html = html.replace('<', '<')
html = html.replace('>', '>')
html = html.replace('"', '"')
html = html.replace(''', "'")
# Clean up whitespace
html = re.sub(r'\s+', ' ', html)
html = html.strip()
return html
async def get_or_create_inbox_client(self) -> Inbox:
"""
Create a default inbox profile for this API key (assume that agent is on free tier)
If you are not on free tier it is recommended to create 1 inbox per agent.
"""
if self.inbox:
return self.inbox
return await self.create_inbox_client()
async def create_inbox_client(self) -> Inbox:
"""
Create a default inbox profile for this API key (assume that agent is on free tier)
If you are not on free tier it is recommended to create 1 inbox per agent.
"""
inbox = await self.email_client.inboxes.create()
self.inbox = inbox
return inbox
async def wait_for_message(self, inbox_id: InboxId) -> Message:
"""
Wait for a message to be received in the inbox
"""
async with self.email_client.websockets.connect() as ws:
await ws.send_subscribe(message=Subscribe(inbox_ids=[inbox_id]))
try:
while True:
data = await asyncio.wait_for(ws.recv(), timeout=self.email_timeout)
if isinstance(data, MessageReceivedEvent):
await self.email_client.inboxes.messages.update(
inbox_id=inbox_id, message_id=data.message.message_id, remove_labels=['unread']
)
msg = data.message
logger.info(f'Received new message from: {msg.from_} with subject: {msg.subject}')
return msg
# If not MessageReceived, continue waiting for the next event
except TimeoutError:
raise TimeoutError(f'No email received in the inbox in {self.email_timeout}s')
def register_email_tools(self):
"""Register all email-related controller actions"""
@self.action('Get email address for login. You can use this email to login to any service with email and password')
async def get_email_address() -> str:
"""
Get the email address of the inbox
"""
inbox = await self.get_or_create_inbox_client()
logger.info(f'Email address: {inbox.inbox_id}')
return inbox.inbox_id
@self.action(
'Get the latest unread email from the inbox from the last max_age_minutes (default 5 minutes). Waits some seconds for new emails if none found. Use for 2FA codes.'
)
async def get_latest_email(max_age_minutes: int = 5) -> str:
"""
1. Check for unread emails within the last max_age_minutes
2. If no recent unread email, wait 30 seconds for new email via websocket
"""
from datetime import datetime, timedelta, timezone
inbox = await self.get_or_create_inbox_client()
# Get unread emails
emails = await self.email_client.inboxes.messages.list(inbox_id=inbox.inbox_id, labels=['unread'])
# Filter unread emails by time window - use UTC timezone to match email timestamps
time_cutoff = datetime.now(timezone.utc) - timedelta(minutes=max_age_minutes)
logger.debug(f'Time cutoff: {time_cutoff}')
logger.info(f'Found {len(emails.messages)} unread emails for inbox {inbox.inbox_id}')
recent_unread_emails = []
for i, email_summary in enumerate(emails.messages):
# Get full email details to check timestamp
full_email = await self.email_client.inboxes.messages.get(
inbox_id=inbox.inbox_id, message_id=email_summary.message_id
)
# Handle timezone comparison properly
email_timestamp = full_email.timestamp
if email_timestamp.tzinfo is None:
# If email timestamp is naive, assume UTC
email_timestamp = email_timestamp.replace(tzinfo=timezone.utc)
if email_timestamp >= time_cutoff:
recent_unread_emails.append(full_email)
# If we have recent unread emails, return the latest one
if recent_unread_emails:
# Sort by timestamp and get the most recent
recent_unread_emails.sort(key=lambda x: x.timestamp, reverse=True)
logger.info(f'Found {len(recent_unread_emails)} recent unread emails for inbox {inbox.inbox_id}')
latest_email = recent_unread_emails[0]
# Mark as read
await self.email_client.inboxes.messages.update(
inbox_id=inbox.inbox_id, message_id=latest_email.message_id, remove_labels=['unread']
)
logger.info(f'Latest email from: {latest_email.from_} with subject: {latest_email.subject}')
return self._serialize_message_for_llm(latest_email)
else:
logger.info('No recent unread emails, waiting for a new one')
# No recent unread emails, wait for new one
try:
latest_message = await self.wait_for_message(inbox_id=inbox.inbox_id)
except TimeoutError:
return f'No email received in the inbox in {self.email_timeout}s'
# logger.info(f'Latest message: {latest_message}')
return self._serialize_message_for_llm(latest_message)
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/integrations/agentmail/email_tools.py",
"license": "MIT License",
"lines": 152,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:examples/integrations/gmail_2fa_integration.py | """
Gmail 2FA Integration Example with Grant Mechanism
This example demonstrates how to use the Gmail integration for handling 2FA codes
during web automation with a robust credential grant and re-authentication system.
Features:
- Automatic credential validation and setup
- Interactive OAuth grant flow when credentials are missing/invalid
- Fallback re-authentication mechanisms
- Clear error handling and user guidance
Setup:
1. Enable Gmail API in Google Cloud Console
2. Create OAuth 2.0 credentials and download JSON
3. Save credentials as ~/.config/browseruse/gmail_credentials.json
4. Run this example - it will guide you through OAuth setup if needed
"""
import asyncio
import json
import os
import sys
from dotenv import load_dotenv
# Add the parent directory to the path so we can import browser_use
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
load_dotenv()
from browser_use import Agent, ChatOpenAI, Tools
from browser_use.config import CONFIG
from browser_use.integrations.gmail import GmailService, register_gmail_actions
class GmailGrantManager:
"""
Manages Gmail OAuth credential grants and authentication flows.
Provides a robust mechanism for setting up and maintaining Gmail API access.
"""
def __init__(self):
self.config_dir = CONFIG.BROWSER_USE_CONFIG_DIR
self.credentials_file = self.config_dir / 'gmail_credentials.json'
self.token_file = self.config_dir / 'gmail_token.json'
print(f'GmailGrantManager initialized with config_dir: {self.config_dir}')
print(f'GmailGrantManager initialized with credentials_file: {self.credentials_file}')
print(f'GmailGrantManager initialized with token_file: {self.token_file}')
def check_credentials_exist(self) -> bool:
"""Check if OAuth credentials file exists."""
return self.credentials_file.exists()
def check_token_exists(self) -> bool:
"""Check if saved token file exists."""
return self.token_file.exists()
def validate_credentials_format(self) -> tuple[bool, str]:
"""
Validate that the credentials file has the correct format.
Returns (is_valid, error_message)
"""
if not self.check_credentials_exist():
return False, 'Credentials file not found'
try:
with open(self.credentials_file) as f:
creds = json.load(f)
# Accept if either 'web' or 'installed' section exists and is not empty
if creds.get('web') or creds.get('installed'):
return True, 'Credentials file is valid'
return False, "Invalid credentials format - neither 'web' nor 'installed' sections found"
except json.JSONDecodeError:
return False, 'Credentials file is not valid JSON'
except Exception as e:
return False, f'Error reading credentials file: {e}'
async def setup_oauth_credentials(self) -> bool:
"""
Guide user through OAuth credentials setup process.
Returns True if setup is successful.
"""
print('\n🔐 Gmail OAuth Credentials Setup Required')
print('=' * 50)
if not self.check_credentials_exist():
print('❌ Gmail credentials file not found')
else:
is_valid, error = self.validate_credentials_format()
if not is_valid:
print(f'❌ Gmail credentials file is invalid: {error}')
print('\n📋 To set up Gmail API access:')
print('1. Go to https://console.cloud.google.com/')
print('2. Create a new project or select an existing one')
print('3. Enable the Gmail API:')
print(' - Go to "APIs & Services" > "Library"')
print(' - Search for "Gmail API" and enable it')
print('4. Create OAuth 2.0 credentials:')
print(' - Go to "APIs & Services" > "Credentials"')
print(' - Click "Create Credentials" > "OAuth client ID"')
print(' - Choose "Desktop application"')
print(' - Download the JSON file')
print(f'5. Save the JSON file as: {self.credentials_file}')
print(f'6. Ensure the directory exists: {self.config_dir}')
# Create config directory if it doesn't exist
self.config_dir.mkdir(parents=True, exist_ok=True)
print(f'\n✅ Created config directory: {self.config_dir}')
# Wait for user to set up credentials
while True:
user_input = input('\n❓ Have you saved the credentials file? (y/n/skip): ').lower().strip()
if user_input == 'skip':
print('⏭️ Skipping credential validation for now')
return False
elif user_input == 'y':
if self.check_credentials_exist():
is_valid, error = self.validate_credentials_format()
if is_valid:
print('✅ Credentials file found and validated!')
return True
else:
print(f'❌ Credentials file is invalid: {error}')
print('Please check the file format and try again.')
else:
print(f'❌ Credentials file still not found at: {self.credentials_file}')
elif user_input == 'n':
print('⏳ Please complete the setup steps above and try again.')
else:
print('Please enter y, n, or skip')
async def test_authentication(self, gmail_service: GmailService) -> tuple[bool, str]:
"""
Test Gmail authentication and return status.
Returns (success, message)
"""
try:
print('🔍 Testing Gmail authentication...')
success = await gmail_service.authenticate()
if success and gmail_service.is_authenticated():
print('✅ Gmail authentication successful!')
return True, 'Authentication successful'
else:
return False, 'Authentication failed - invalid credentials or OAuth flow failed'
except Exception as e:
return False, f'Authentication error: {e}'
async def handle_authentication_failure(self, gmail_service: GmailService, error_msg: str) -> bool:
"""
Handle authentication failures with fallback mechanisms.
Returns True if recovery was successful.
"""
print(f'\n❌ Gmail authentication failed: {error_msg}')
print('\n🔧 Attempting recovery...')
# Option 1: Try removing old token file
if self.token_file.exists():
print('🗑️ Removing old token file to force re-authentication...')
try:
self.token_file.unlink()
print('✅ Old token file removed')
# Try authentication again
success = await gmail_service.authenticate()
if success:
print('✅ Re-authentication successful!')
return True
except Exception as e:
print(f'❌ Failed to remove token file: {e}')
# Option 2: Validate and potentially re-setup credentials
is_valid, cred_error = self.validate_credentials_format()
if not is_valid:
print(f'\n❌ Credentials file issue: {cred_error}')
print('🔧 Initiating credential re-setup...')
return await self.setup_oauth_credentials()
# Option 3: Provide manual troubleshooting steps
print('\n🔧 Manual troubleshooting steps:')
print('1. Check that Gmail API is enabled in Google Cloud Console')
print('2. Verify OAuth consent screen is configured')
print('3. Ensure redirect URIs include http://localhost:8080')
print('4. Check if credentials file is for the correct project')
print('5. Try regenerating OAuth credentials in Google Cloud Console')
retry = input('\n❓ Would you like to retry authentication? (y/n): ').lower().strip()
if retry == 'y':
success = await gmail_service.authenticate()
return success
return False
async def main():
print('🚀 Gmail 2FA Integration Example with Grant Mechanism')
print('=' * 60)
# Initialize grant manager
grant_manager = GmailGrantManager()
# Step 1: Check and validate credentials
print('🔍 Step 1: Validating Gmail credentials...')
if not grant_manager.check_credentials_exist():
print('❌ No Gmail credentials found')
setup_success = await grant_manager.setup_oauth_credentials()
if not setup_success:
print('⏹️ Setup cancelled or failed. Exiting...')
return
else:
is_valid, error = grant_manager.validate_credentials_format()
if not is_valid:
print(f'❌ Invalid credentials: {error}')
setup_success = await grant_manager.setup_oauth_credentials()
if not setup_success:
print('⏹️ Setup cancelled or failed. Exiting...')
return
else:
print('✅ Gmail credentials file found and validated')
# Step 2: Initialize Gmail service and test authentication
print('\n🔍 Step 2: Testing Gmail authentication...')
gmail_service = GmailService()
auth_success, auth_message = await grant_manager.test_authentication(gmail_service)
if not auth_success:
print(f'❌ Initial authentication failed: {auth_message}')
recovery_success = await grant_manager.handle_authentication_failure(gmail_service, auth_message)
if not recovery_success:
print('❌ Failed to recover Gmail authentication. Please check your setup.')
return
# Step 3: Initialize tools with authenticated service
print('\n🔍 Step 3: Registering Gmail actions...')
tools = Tools()
register_gmail_actions(tools, gmail_service=gmail_service)
print('✅ Gmail actions registered with tools')
print('Available Gmail actions:')
print('- get_recent_emails: Get recent emails with filtering')
print()
# Initialize LLM
llm = ChatOpenAI(model='gpt-4.1-mini')
# Step 4: Test Gmail functionality
print('🔍 Step 4: Testing Gmail email retrieval...')
agent = Agent(task='Get recent emails from Gmail to test the integration is working properly', llm=llm, tools=tools)
try:
history = await agent.run()
print('✅ Gmail email retrieval test completed')
except Exception as e:
print(f'❌ Gmail email retrieval test failed: {e}')
# Try one more recovery attempt
print('🔧 Attempting final recovery...')
recovery_success = await grant_manager.handle_authentication_failure(gmail_service, str(e))
if recovery_success:
print('✅ Recovery successful, re-running test...')
history = await agent.run()
else:
print('❌ Final recovery failed. Please check your Gmail API setup.')
return
print('\n' + '=' * 60)
# Step 5: Demonstrate 2FA code finding
print('🔍 Step 5: Testing 2FA code detection...')
agent2 = Agent(
task='Search for any 2FA verification codes or OTP codes in recent Gmail emails from the last 30 minutes',
llm=llm,
tools=tools,
)
history2 = await agent2.run()
print('✅ 2FA code search completed')
print('\n' + '=' * 60)
# Step 6: Simulate complete login flow
print('🔍 Step 6: Demonstrating complete 2FA login flow...')
agent3 = Agent(
task="""
Demonstrate a complete 2FA-enabled login flow:
1. Check for any existing 2FA codes in recent emails
2. Explain how the agent would handle a typical login:
- Navigate to a login page
- Enter credentials
- Wait for 2FA prompt
- Use get_recent_emails to find the verification code
- Extract and enter the 2FA code
3. Show what types of emails and codes can be detected
""",
llm=llm,
tools=tools,
)
history3 = await agent3.run()
print('✅ Complete 2FA flow demonstration completed')
print('\n' + '=' * 60)
print('🎉 Gmail 2FA Integration with Grant Mechanism completed successfully!')
print('\n💡 Key features demonstrated:')
print('- ✅ Automatic credential validation and setup')
print('- ✅ Robust error handling and recovery mechanisms')
print('- ✅ Interactive OAuth grant flow')
print('- ✅ Token refresh and re-authentication')
print('- ✅ 2FA code detection and extraction')
print('\n🔧 Grant mechanism benefits:')
print('- Handles missing or invalid credentials gracefully')
print('- Provides clear setup instructions')
print('- Automatically recovers from authentication failures')
print('- Validates credential format before use')
print('- Offers multiple fallback options')
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/integrations/gmail_2fa_integration.py",
"license": "MIT License",
"lines": 271,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:examples/models/aws.py | """
AWS Bedrock Examples
This file demonstrates how to use AWS Bedrock models with browser-use.
We provide two classes:
1. ChatAnthropicBedrock - Convenience class for Anthropic Claude models
2. ChatAWSBedrock - General AWS Bedrock client supporting all providers
Requirements:
- AWS credentials configured via environment variables
- boto3 installed: pip install boto3
- Access to AWS Bedrock models in your region
"""
import asyncio
from browser_use import Agent
from browser_use.llm import ChatAnthropicBedrock, ChatAWSBedrock
async def example_anthropic_bedrock():
"""Example using ChatAnthropicBedrock - convenience class for Claude models."""
print('🔹 ChatAnthropicBedrock Example')
# Initialize with Anthropic Claude via AWS Bedrock
llm = ChatAnthropicBedrock(
model='us.anthropic.claude-sonnet-4-20250514-v1:0',
aws_region='us-east-1',
temperature=0.7,
)
print(f'Model: {llm.name}')
print(f'Provider: {llm.provider}')
# Create agent
agent = Agent(
task="Navigate to google.com and search for 'AWS Bedrock pricing'",
llm=llm,
)
print("Task: Navigate to google.com and search for 'AWS Bedrock pricing'")
# Run the agent
result = await agent.run(max_steps=2)
print(f'Result: {result}')
async def example_aws_bedrock():
"""Example using ChatAWSBedrock - general client for any Bedrock model."""
print('\n🔹 ChatAWSBedrock Example')
# Initialize with any AWS Bedrock model (using Meta Llama as example)
llm = ChatAWSBedrock(
model='us.meta.llama4-maverick-17b-instruct-v1:0',
aws_region='us-east-1',
temperature=0.5,
)
print(f'Model: {llm.name}')
print(f'Provider: {llm.provider}')
# Create agent
agent = Agent(
task='Go to github.com and find the most popular Python repository',
llm=llm,
)
print('Task: Go to github.com and find the most popular Python repository')
# Run the agent
result = await agent.run(max_steps=2)
print(f'Result: {result}')
async def main():
"""Run AWS Bedrock examples."""
print('🚀 AWS Bedrock Examples')
print('=' * 40)
print('Make sure you have AWS credentials configured:')
print('export AWS_ACCESS_KEY_ID=your_key')
print('export AWS_SECRET_ACCESS_KEY=your_secret')
print('export AWS_DEFAULT_REGION=us-east-1')
print('=' * 40)
try:
# Run both examples
await example_aws_bedrock()
await example_anthropic_bedrock()
except Exception as e:
print(f'❌ Error: {e}')
print('Make sure you have:')
print('- Valid AWS credentials configured')
print('- Access to AWS Bedrock in your region')
print('- boto3 installed: pip install boto3')
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/models/aws.py",
"license": "MIT License",
"lines": 75,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:examples/models/deepseek-chat.py | import asyncio
import os
from browser_use import Agent
from browser_use.llm import ChatDeepSeek
# Add your custom instructions
extend_system_message = """
Remember the most important rules:
1. When performing a search task, open https://www.google.com/ first for search.
2. Final output.
"""
deepseek_api_key = os.getenv('DEEPSEEK_API_KEY')
if deepseek_api_key is None:
print('Make sure you have DEEPSEEK_API_KEY:')
print('export DEEPSEEK_API_KEY=your_key')
exit(0)
async def main():
llm = ChatDeepSeek(
base_url='https://api.deepseek.com/v1',
model='deepseek-chat',
api_key=deepseek_api_key,
)
agent = Agent(
task='What should we pay attention to in the recent new rules on tariffs in China-US trade?',
llm=llm,
use_vision=False,
extend_system_message=extend_system_message,
)
await agent.run()
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/models/deepseek-chat.py",
"license": "MIT License",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:examples/models/gpt-4.1.py | """
Simple try of the agent.
@dev You need to add OPENAI_API_KEY to your environment variables.
"""
import asyncio
from dotenv import load_dotenv
from browser_use import Agent, ChatOpenAI
load_dotenv()
# All the models are type safe from OpenAI in case you need a list of supported models
llm = ChatOpenAI(model='gpt-4.1-mini')
agent = Agent(
task='Go to amazon.com, click on the first link, and give me the title of the page',
llm=llm,
)
async def main():
await agent.run(max_steps=10)
input('Press Enter to continue...')
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/models/gpt-4.1.py",
"license": "MIT License",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:examples/models/gpt-5-mini.py | """
Simple try of the agent.
@dev You need to add OPENAI_API_KEY to your environment variables.
"""
import asyncio
from dotenv import load_dotenv
from browser_use import Agent, ChatOpenAI
load_dotenv()
# All the models are type safe from OpenAI in case you need a list of supported models
llm = ChatOpenAI(model='gpt-5-mini')
agent = Agent(
llm=llm,
task='Find out which one is cooler: the monkey park or a dolphin tour in Tenerife?',
)
async def main():
await agent.run(max_steps=20)
input('Press Enter to continue...')
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/models/gpt-5-mini.py",
"license": "MIT License",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:examples/models/langchain/chat.py | from dataclasses import dataclass
from typing import TYPE_CHECKING, TypeVar, overload
from pydantic import BaseModel
from browser_use.llm.base import BaseChatModel
from browser_use.llm.exceptions import ModelProviderError
from browser_use.llm.messages import BaseMessage
from browser_use.llm.views import ChatInvokeCompletion, ChatInvokeUsage
from examples.models.langchain.serializer import LangChainMessageSerializer
if TYPE_CHECKING:
from langchain_core.language_models.chat_models import BaseChatModel as LangChainBaseChatModel # type: ignore
from langchain_core.messages import AIMessage as LangChainAIMessage # type: ignore
T = TypeVar('T', bound=BaseModel)
@dataclass
class ChatLangchain(BaseChatModel):
"""
A wrapper around LangChain BaseChatModel that implements the browser-use BaseChatModel protocol.
This class allows you to use any LangChain-compatible model with browser-use.
"""
# The LangChain model to wrap
chat: 'LangChainBaseChatModel'
@property
def model(self) -> str:
return self.name
@property
def provider(self) -> str:
"""Return the provider name based on the LangChain model class."""
model_class_name = self.chat.__class__.__name__.lower()
if 'openai' in model_class_name:
return 'openai'
elif 'anthropic' in model_class_name or 'claude' in model_class_name:
return 'anthropic'
elif 'google' in model_class_name or 'gemini' in model_class_name:
return 'google'
elif 'groq' in model_class_name:
return 'groq'
elif 'ollama' in model_class_name:
return 'ollama'
elif 'deepseek' in model_class_name:
return 'deepseek'
else:
return 'langchain'
@property
def name(self) -> str:
"""Return the model name."""
# Try to get model name from the LangChain model using getattr to avoid type errors
model_name = getattr(self.chat, 'model_name', None)
if model_name:
return str(model_name)
model_attr = getattr(self.chat, 'model', None)
if model_attr:
return str(model_attr)
return self.chat.__class__.__name__
def _get_usage(self, response: 'LangChainAIMessage') -> ChatInvokeUsage | None:
usage = response.usage_metadata
if usage is None:
return None
prompt_tokens = usage['input_tokens'] or 0
completion_tokens = usage['output_tokens'] or 0
total_tokens = usage['total_tokens'] or 0
input_token_details = usage.get('input_token_details', None)
if input_token_details is not None:
prompt_cached_tokens = input_token_details.get('cache_read', None)
prompt_cache_creation_tokens = input_token_details.get('cache_creation', None)
else:
prompt_cached_tokens = None
prompt_cache_creation_tokens = None
return ChatInvokeUsage(
prompt_tokens=prompt_tokens,
prompt_cached_tokens=prompt_cached_tokens,
prompt_cache_creation_tokens=prompt_cache_creation_tokens,
prompt_image_tokens=None,
completion_tokens=completion_tokens,
total_tokens=total_tokens,
)
@overload
async def ainvoke(self, messages: list[BaseMessage], output_format: None = None) -> ChatInvokeCompletion[str]: ...
@overload
async def ainvoke(self, messages: list[BaseMessage], output_format: type[T]) -> ChatInvokeCompletion[T]: ...
async def ainvoke(
self, messages: list[BaseMessage], output_format: type[T] | None = None
) -> ChatInvokeCompletion[T] | ChatInvokeCompletion[str]:
"""
Invoke the LangChain model with the given messages.
Args:
messages: List of browser-use chat messages
output_format: Optional Pydantic model class for structured output (not supported in basic LangChain integration)
Returns:
Either a string response or an instance of output_format
"""
# Convert browser-use messages to LangChain messages
langchain_messages = LangChainMessageSerializer.serialize_messages(messages)
try:
if output_format is None:
# Return string response
response = await self.chat.ainvoke(langchain_messages) # type: ignore
# Import at runtime for isinstance check
from langchain_core.messages import AIMessage as LangChainAIMessage # type: ignore
if not isinstance(response, LangChainAIMessage):
raise ModelProviderError(
message=f'Response is not an AIMessage: {type(response)}',
model=self.name,
)
# Extract content from LangChain response
content = response.content if hasattr(response, 'content') else str(response)
usage = self._get_usage(response)
return ChatInvokeCompletion(
completion=str(content),
usage=usage,
)
else:
# Use LangChain's structured output capability
try:
structured_chat = self.chat.with_structured_output(output_format)
parsed_object = await structured_chat.ainvoke(langchain_messages)
# For structured output, usage metadata is typically not available
# in the parsed object since it's a Pydantic model, not an AIMessage
usage = None
# Type cast since LangChain's with_structured_output returns the correct type
return ChatInvokeCompletion(
completion=parsed_object, # type: ignore
usage=usage,
)
except AttributeError:
# Fall back to manual parsing if with_structured_output is not available
response = await self.chat.ainvoke(langchain_messages) # type: ignore
if not isinstance(response, 'LangChainAIMessage'):
raise ModelProviderError(
message=f'Response is not an AIMessage: {type(response)}',
model=self.name,
)
content = response.content if hasattr(response, 'content') else str(response)
try:
if isinstance(content, str):
import json
parsed_data = json.loads(content)
if isinstance(parsed_data, dict):
parsed_object = output_format(**parsed_data)
else:
raise ValueError('Parsed JSON is not a dictionary')
else:
raise ValueError('Content is not a string and structured output not supported')
except Exception as e:
raise ModelProviderError(
message=f'Failed to parse response as {output_format.__name__}: {e}',
model=self.name,
) from e
usage = self._get_usage(response)
return ChatInvokeCompletion(
completion=parsed_object,
usage=usage,
)
except Exception as e:
# Convert any LangChain errors to browser-use ModelProviderError
raise ModelProviderError(
message=f'LangChain model error: {str(e)}',
model=self.name,
) from e
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/models/langchain/chat.py",
"license": "MIT License",
"lines": 157,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:examples/models/langchain/example.py | """
Example of using LangChain models with browser-use.
This example demonstrates how to:
1. Wrap a LangChain model with ChatLangchain
2. Use it with a browser-use Agent
3. Run a simple web automation task
@file purpose: Example usage of LangChain integration with browser-use
"""
import asyncio
from langchain_openai import ChatOpenAI # pyright: ignore
from browser_use import Agent
from examples.models.langchain.chat import ChatLangchain
async def main():
"""Basic example using ChatLangchain with OpenAI through LangChain."""
# Create a LangChain model (OpenAI)
langchain_model = ChatOpenAI(
model='gpt-4.1-mini',
temperature=0.1,
)
# Wrap it with ChatLangchain to make it compatible with browser-use
llm = ChatLangchain(chat=langchain_model)
# Create a simple task
task = "Go to google.com and search for 'browser automation with Python'"
# Create and run the agent
agent = Agent(
task=task,
llm=llm,
)
print(f'🚀 Starting task: {task}')
print(f'🤖 Using model: {llm.name} (provider: {llm.provider})')
# Run the agent
history = await agent.run()
print(f'✅ Task completed! Steps taken: {len(history.history)}')
# Print the final result if available
if history.final_result():
print(f'📋 Final result: {history.final_result()}')
return history
if __name__ == '__main__':
print('🌐 Browser-use LangChain Integration Example')
print('=' * 45)
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/models/langchain/example.py",
"license": "MIT License",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:examples/models/langchain/serializer.py | import json
from typing import overload
from langchain_core.messages import ( # pyright: ignore
AIMessage,
HumanMessage,
SystemMessage,
)
from langchain_core.messages import ( # pyright: ignore
ToolCall as LangChainToolCall,
)
from langchain_core.messages.base import BaseMessage as LangChainBaseMessage # pyright: ignore
from browser_use.llm.messages import (
AssistantMessage,
BaseMessage,
ContentPartImageParam,
ContentPartRefusalParam,
ContentPartTextParam,
ToolCall,
UserMessage,
)
from browser_use.llm.messages import (
SystemMessage as BrowserUseSystemMessage,
)
class LangChainMessageSerializer:
"""Serializer for converting between browser-use message types and LangChain message types."""
@staticmethod
def _serialize_user_content(
content: str | list[ContentPartTextParam | ContentPartImageParam],
) -> str | list[str | dict]:
"""Convert user message content for LangChain compatibility."""
if isinstance(content, str):
return content
serialized_parts = []
for part in content:
if part.type == 'text':
serialized_parts.append(
{
'type': 'text',
'text': part.text,
}
)
elif part.type == 'image_url':
# LangChain format for images
serialized_parts.append(
{'type': 'image_url', 'image_url': {'url': part.image_url.url, 'detail': part.image_url.detail}}
)
return serialized_parts
@staticmethod
def _serialize_system_content(
content: str | list[ContentPartTextParam],
) -> str:
"""Convert system message content to text string for LangChain compatibility."""
if isinstance(content, str):
return content
text_parts = []
for part in content:
if part.type == 'text':
text_parts.append(part.text)
return '\n'.join(text_parts)
@staticmethod
def _serialize_assistant_content(
content: str | list[ContentPartTextParam | ContentPartRefusalParam] | None,
) -> str:
"""Convert assistant message content to text string for LangChain compatibility."""
if content is None:
return ''
if isinstance(content, str):
return content
text_parts = []
for part in content:
if part.type == 'text':
text_parts.append(part.text)
# elif part.type == 'refusal':
# # Include refusal content as text
# text_parts.append(f'[Refusal: {part.refusal}]')
return '\n'.join(text_parts)
@staticmethod
def _serialize_tool_call(tool_call: ToolCall) -> LangChainToolCall:
"""Convert browser-use ToolCall to LangChain ToolCall."""
# Parse the arguments string to a dict for LangChain
try:
args_dict = json.loads(tool_call.function.arguments)
except json.JSONDecodeError:
# If parsing fails, wrap in a dict
args_dict = {'arguments': tool_call.function.arguments}
return LangChainToolCall(
name=tool_call.function.name,
args=args_dict,
id=tool_call.id,
)
# region - Serialize overloads
@overload
@staticmethod
def serialize(message: UserMessage) -> HumanMessage: ...
@overload
@staticmethod
def serialize(message: BrowserUseSystemMessage) -> SystemMessage: ...
@overload
@staticmethod
def serialize(message: AssistantMessage) -> AIMessage: ...
@staticmethod
def serialize(message: BaseMessage) -> LangChainBaseMessage:
"""Serialize a browser-use message to a LangChain message."""
if isinstance(message, UserMessage):
content = LangChainMessageSerializer._serialize_user_content(message.content)
return HumanMessage(content=content, name=message.name)
elif isinstance(message, BrowserUseSystemMessage):
content = LangChainMessageSerializer._serialize_system_content(message.content)
return SystemMessage(content=content, name=message.name)
elif isinstance(message, AssistantMessage):
# Handle content
content = LangChainMessageSerializer._serialize_assistant_content(message.content)
# For simplicity, we'll ignore tool calls in LangChain integration
# as requested by the user
return AIMessage(
content=content,
name=message.name,
)
else:
raise ValueError(f'Unknown message type: {type(message)}')
@staticmethod
def serialize_messages(messages: list[BaseMessage]) -> list[LangChainBaseMessage]:
"""Serialize a list of browser-use messages to LangChain messages."""
return [LangChainMessageSerializer.serialize(m) for m in messages]
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/models/langchain/serializer.py",
"license": "MIT License",
"lines": 124,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:examples/models/llama4-groq.py | import asyncio
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent
from browser_use.llm import ChatGroq
groq_api_key = os.environ.get('GROQ_API_KEY')
llm = ChatGroq(
model='meta-llama/llama-4-maverick-17b-128e-instruct',
# temperature=0.1,
)
# llm = ChatGroq(
# model='meta-llama/llama-4-maverick-17b-128e-instruct',
# api_key=os.environ.get('GROQ_API_KEY'),
# temperature=0.0,
# )
task = 'Go to amazon.com, search for laptop, sort by best rating, and give me the price of the first result'
async def main():
agent = Agent(
task=task,
llm=llm,
)
await agent.run()
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/models/llama4-groq.py",
"license": "MIT License",
"lines": 27,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:examples/models/openrouter.py | """
Simple try of the agent.
@dev You need to add OPENAI_API_KEY to your environment variables.
"""
import asyncio
import os
from dotenv import load_dotenv
from browser_use import Agent, ChatOpenAI
load_dotenv()
# All the models are type safe from OpenAI in case you need a list of supported models
llm = ChatOpenAI(
# model='x-ai/grok-4',
model='deepcogito/cogito-v2.1-671b',
base_url='https://openrouter.ai/api/v1',
api_key=os.getenv('OPENROUTER_API_KEY'),
)
agent = Agent(
task='Find the number of stars of the browser-use repo',
llm=llm,
use_vision=False,
)
async def main():
await agent.run(max_steps=10)
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/models/openrouter.py",
"license": "MIT License",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:examples/observability/openLLMetry.py | import asyncio
import os
from dotenv import load_dotenv
# test if traceloop is installed
try:
from traceloop.sdk import Traceloop # type: ignore
except ImportError:
print('Traceloop is not installed')
exit(1)
from browser_use import Agent
load_dotenv()
api_key = os.getenv('TRACELOOP_API_KEY')
Traceloop.init(api_key=api_key, disable_batch=True)
async def main():
await Agent('Find the founders of browser-use').run()
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/observability/openLLMetry.py",
"license": "MIT License",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:examples/use-cases/extract_pdf_content.py | #!/usr/bin/env -S uv run --script
# /// script
# requires-python = ">=3.11"
# dependencies = ["browser-use", "mistralai"]
# ///
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
import asyncio
import logging
from browser_use import Agent, ChatOpenAI
logger = logging.getLogger(__name__)
async def main():
agent = Agent(
task="""
Objective: Navigate to the following UR, what is on page 3?
URL: https://docs.house.gov/meetings/GO/GO00/20220929/115171/HHRG-117-GO00-20220929-SD010.pdf
""",
llm=ChatOpenAI(model='gpt-4.1-mini'),
)
result = await agent.run()
logger.info(result)
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/use-cases/extract_pdf_content.py",
"license": "MIT License",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:tests/ci/evaluate_tasks.py | """
Runs all agent tasks in parallel (up to 10 at a time) using separate subprocesses.
Each task gets its own Python process, preventing browser session interference.
Fails with exit code 1 if 0% of tasks pass.
"""
import argparse
import asyncio
import glob
import json
import logging
import os
import sys
import warnings
import anyio
import yaml
from dotenv import load_dotenv
from pydantic import BaseModel
load_dotenv()
from browser_use import Agent, AgentHistoryList, BrowserProfile, BrowserSession, ChatBrowserUse
from browser_use.llm.google.chat import ChatGoogle
from browser_use.llm.messages import UserMessage
# --- CONFIG ---
MAX_PARALLEL = 10
TASK_DIR = (
sys.argv[1]
if len(sys.argv) > 1 and not sys.argv[1].startswith('--')
else os.path.join(os.path.dirname(__file__), '../agent_tasks')
)
TASK_FILES = glob.glob(os.path.join(TASK_DIR, '*.yaml'))
class JudgeResponse(BaseModel):
success: bool
explanation: str
async def run_single_task(task_file):
"""Run a single task in the current process (called by subprocess)"""
try:
print(f'[DEBUG] Starting task: {os.path.basename(task_file)}', file=sys.stderr)
# Suppress all logging in subprocess to avoid interfering with JSON output
logging.getLogger().setLevel(logging.CRITICAL)
for logger_name in ['browser_use', 'telemetry', 'message_manager']:
logging.getLogger(logger_name).setLevel(logging.CRITICAL)
warnings.filterwarnings('ignore')
print('[DEBUG] Loading task file...', file=sys.stderr)
content = await anyio.Path(task_file).read_text()
task_data = yaml.safe_load(content)
task = task_data['task']
judge_context = task_data.get('judge_context', ['The agent must solve the task'])
max_steps = task_data.get('max_steps', 15)
print(f'[DEBUG] Task: {task[:100]}...', file=sys.stderr)
print(f'[DEBUG] Max steps: {max_steps}', file=sys.stderr)
api_key = os.getenv('BROWSER_USE_API_KEY')
if not api_key:
print('[SKIP] BROWSER_USE_API_KEY is not set - skipping task evaluation', file=sys.stderr)
return {
'file': os.path.basename(task_file),
'success': True, # Mark as success so it doesn't fail CI
'explanation': 'Skipped - API key not available (fork PR or missing secret)',
}
agent_llm = ChatBrowserUse(api_key=api_key)
# Check if Google API key is available for judge LLM
google_api_key = os.getenv('GOOGLE_API_KEY')
if not google_api_key:
print('[SKIP] GOOGLE_API_KEY is not set - skipping task evaluation', file=sys.stderr)
return {
'file': os.path.basename(task_file),
'success': True, # Mark as success so it doesn't fail CI
'explanation': 'Skipped - Google API key not available (fork PR or missing secret)',
}
judge_llm = ChatGoogle(model='gemini-flash-lite-latest')
print('[DEBUG] LLMs initialized', file=sys.stderr)
# Each subprocess gets its own profile and session
print('[DEBUG] Creating browser session...', file=sys.stderr)
profile = BrowserProfile(
headless=True,
user_data_dir=None,
chromium_sandbox=False, # Disable sandbox for CI environment (GitHub Actions)
)
session = BrowserSession(browser_profile=profile)
print('[DEBUG] Browser session created', file=sys.stderr)
# Test if browser is working
try:
await session.start()
from browser_use.browser.events import NavigateToUrlEvent
event = session.event_bus.dispatch(NavigateToUrlEvent(url='https://httpbin.org/get', new_tab=True))
await event
print('[DEBUG] Browser test: navigation successful', file=sys.stderr)
title = await session.get_current_page_title()
print(f"[DEBUG] Browser test: got title '{title}'", file=sys.stderr)
except Exception as browser_error:
print(f'[DEBUG] Browser test failed: {str(browser_error)}', file=sys.stderr)
print(
f'[DEBUG] Browser error type: {type(browser_error).__name__}',
file=sys.stderr,
)
print('[DEBUG] Starting agent execution...', file=sys.stderr)
agent = Agent(task=task, llm=agent_llm, browser_session=session)
try:
history: AgentHistoryList = await agent.run(max_steps=max_steps)
print('[DEBUG] Agent.run() returned successfully', file=sys.stderr)
except Exception as agent_error:
print(
f'[DEBUG] Agent.run() failed with error: {str(agent_error)}',
file=sys.stderr,
)
print(f'[DEBUG] Error type: {type(agent_error).__name__}', file=sys.stderr)
# Re-raise to be caught by outer try-catch
raise agent_error
agent_output = history.final_result() or ''
print('[DEBUG] Agent execution completed', file=sys.stderr)
# Test if LLM is working by making a simple call
try:
response = await agent_llm.ainvoke([UserMessage(content="Say 'test'")])
print(
f'[DEBUG] LLM test call successful: {response.completion[:50]}',
file=sys.stderr,
)
except Exception as llm_error:
print(f'[DEBUG] LLM test call failed: {str(llm_error)}', file=sys.stderr)
# Debug: capture more details about the agent execution
total_steps = len(history.history) if hasattr(history, 'history') else 0
last_action = history.history[-1] if hasattr(history, 'history') and history.history else None
debug_info = f'Steps: {total_steps}, Final result length: {len(agent_output)}'
if last_action:
debug_info += f', Last action: {type(last_action).__name__}'
# Log to stderr so it shows up in GitHub Actions (won't interfere with JSON output to stdout)
print(f'[DEBUG] Task {os.path.basename(task_file)}: {debug_info}', file=sys.stderr)
if agent_output:
print(
f'[DEBUG] Agent output preview: {agent_output[:200]}...',
file=sys.stderr,
)
else:
print('[DEBUG] Agent produced no output!', file=sys.stderr)
criteria = '\n- '.join(judge_context)
judge_prompt = f"""
You are a evaluator of a browser agent task inside a ci/cd pipeline. Here was the agent's task:
{task}
Here is the agent's output:
{agent_output if agent_output else '[No output provided]'}
Debug info: {debug_info}
Criteria for success:
- {criteria}
Reply in JSON with keys: success (true/false), explanation (string).
If the agent provided no output, explain what might have gone wrong.
"""
response = await judge_llm.ainvoke([UserMessage(content=judge_prompt)], output_format=JudgeResponse)
judge_response = response.completion
result = {
'file': os.path.basename(task_file),
'success': judge_response.success,
'explanation': judge_response.explanation,
}
# Clean up session before returning
await session.kill()
return result
except Exception as e:
# Ensure session cleanup even on error
try:
await session.kill()
except Exception:
pass
return {
'file': os.path.basename(task_file),
'success': False,
'explanation': f'Task failed with error: {str(e)}',
}
async def run_task_subprocess(task_file, semaphore):
"""Run a task in a separate subprocess"""
async with semaphore:
try:
# Set environment to reduce noise in subprocess
env = os.environ.copy()
env['PYTHONPATH'] = os.pathsep.join(sys.path)
proc = await asyncio.create_subprocess_exec(
sys.executable,
__file__,
'--task',
task_file,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
stdout, stderr = await proc.communicate()
if proc.returncode == 0:
try:
# Parse JSON result from subprocess
stdout_text = stdout.decode().strip()
stderr_text = stderr.decode().strip()
# Display subprocess debug logs
if stderr_text:
print(f'[SUBPROCESS {os.path.basename(task_file)}] Debug output:')
for line in stderr_text.split('\n'):
if line.strip():
print(f' {line}')
# Find the JSON line (should be the last line that starts with {)
lines = stdout_text.split('\n')
json_line = None
for line in reversed(lines):
line = line.strip()
if line.startswith('{') and line.endswith('}'):
json_line = line
break
if json_line:
result = json.loads(json_line)
print(f'[PARENT] Task {os.path.basename(task_file)} completed: {result["success"]}')
else:
raise ValueError(f'No JSON found in output: {stdout_text}')
except (json.JSONDecodeError, ValueError) as e:
result = {
'file': os.path.basename(task_file),
'success': False,
'explanation': f'Failed to parse subprocess result: {str(e)[:100]}',
}
print(f'[PARENT] Task {os.path.basename(task_file)} failed to parse: {str(e)}')
print(f'[PARENT] Full stdout was: {stdout.decode()[:500]}')
else:
stderr_text = stderr.decode().strip()
result = {
'file': os.path.basename(task_file),
'success': False,
'explanation': f'Subprocess failed (code {proc.returncode}): {stderr_text[:200]}',
}
print(f'[PARENT] Task {os.path.basename(task_file)} subprocess failed with code {proc.returncode}')
if stderr_text:
print(f'[PARENT] stderr: {stderr_text[:1000]}')
stdout_text = stdout.decode().strip()
if stdout_text:
print(f'[PARENT] stdout: {stdout_text[:1000]}')
except Exception as e:
result = {
'file': os.path.basename(task_file),
'success': False,
'explanation': f'Failed to start subprocess: {str(e)}',
}
print(f'[PARENT] Failed to start subprocess for {os.path.basename(task_file)}: {str(e)}')
return result
async def main():
"""Run all tasks in parallel using subprocesses"""
semaphore = asyncio.Semaphore(MAX_PARALLEL)
print(f'Found task files: {TASK_FILES}')
if not TASK_FILES:
print('No task files found!')
return 0, 0
# Run all tasks in parallel subprocesses
tasks = [run_task_subprocess(task_file, semaphore) for task_file in TASK_FILES]
results = await asyncio.gather(*tasks)
passed = sum(1 for r in results if r['success'])
total = len(results)
print('\n' + '=' * 60)
print(f'{"RESULTS":^60}\n')
# Prepare table data
headers = ['Task', 'Success', 'Reason']
rows = []
for r in results:
status = '✅' if r['success'] else '❌'
rows.append([r['file'], status, r['explanation']])
# Calculate column widths
col_widths = [max(len(str(row[i])) for row in ([headers] + rows)) for i in range(3)]
# Print header
header_row = ' | '.join(headers[i].ljust(col_widths[i]) for i in range(3))
print(header_row)
print('-+-'.join('-' * w for w in col_widths))
# Print rows
for row in rows:
print(' | '.join(str(row[i]).ljust(col_widths[i]) for i in range(3)))
print('\n' + '=' * 60)
print(f'\n{"SCORE":^60}')
print(f'\n{"=" * 60}\n')
print(f'\n{"*" * 10} {passed}/{total} PASSED {"*" * 10}\n')
print('=' * 60 + '\n')
# Output results for GitHub Actions
print(f'PASSED={passed}')
print(f'TOTAL={total}')
# Output detailed results as JSON for GitHub Actions
detailed_results = []
for r in results:
detailed_results.append(
{
'task': r['file'].replace('.yaml', ''),
'success': r['success'],
'reason': r['explanation'],
}
)
print('DETAILED_RESULTS=' + json.dumps(detailed_results))
return passed, total
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--task', type=str, help='Path to a single task YAML file (for subprocess mode)')
args = parser.parse_args()
if args.task:
# Subprocess mode: run a single task and output ONLY JSON
try:
result = asyncio.run(run_single_task(args.task))
# Output ONLY the JSON result, nothing else
print(json.dumps(result))
except Exception as e:
# Even on critical failure, output valid JSON
error_result = {
'file': os.path.basename(args.task),
'success': False,
'explanation': f'Critical subprocess error: {str(e)}',
}
print(json.dumps(error_result))
else:
# Parent process mode: run all tasks in parallel subprocesses
passed, total = asyncio.run(main())
# Results already printed by main() function
# Fail if 0% pass rate (all tasks failed)
if total > 0 and passed == 0:
print('\n❌ CRITICAL: 0% pass rate - all tasks failed!')
sys.exit(1)
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/evaluate_tasks.py",
"license": "MIT License",
"lines": 310,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:tests/ci/test_tools.py | import asyncio
import json
import os
import tempfile
import time
import anyio
import pytest
from pydantic import BaseModel, Field
from pytest_httpserver import HTTPServer
from browser_use.agent.views import ActionResult
from browser_use.browser import BrowserSession
from browser_use.browser.profile import BrowserProfile
from browser_use.filesystem.file_system import FileSystem
from browser_use.tools.service import Tools
@pytest.fixture(scope='session')
def http_server():
"""Create and provide a test HTTP server that serves static content."""
server = HTTPServer()
server.start()
# Add routes for common test pages
server.expect_request('/').respond_with_data(
'<html><head><title>Test Home Page</title></head><body><h1>Test Home Page</h1><p>Welcome to the test site</p></body></html>',
content_type='text/html',
)
server.expect_request('/page1').respond_with_data(
'<html><head><title>Test Page 1</title></head><body><h1>Test Page 1</h1><p>This is test page 1</p></body></html>',
content_type='text/html',
)
server.expect_request('/page2').respond_with_data(
'<html><head><title>Test Page 2</title></head><body><h1>Test Page 2</h1><p>This is test page 2</p></body></html>',
content_type='text/html',
)
server.expect_request('/search').respond_with_data(
"""
<html>
<head><title>Search Results</title></head>
<body>
<h1>Search Results</h1>
<div class="results">
<div class="result">Result 1</div>
<div class="result">Result 2</div>
<div class="result">Result 3</div>
</div>
</body>
</html>
""",
content_type='text/html',
)
yield server
server.stop()
@pytest.fixture(scope='session')
def base_url(http_server):
"""Return the base URL for the test HTTP server."""
return f'http://{http_server.host}:{http_server.port}'
@pytest.fixture(scope='module')
async def browser_session():
"""Create and provide a Browser instance with security disabled."""
browser_session = BrowserSession(
browser_profile=BrowserProfile(
headless=True,
user_data_dir=None,
keep_alive=True,
)
)
await browser_session.start()
yield browser_session
await browser_session.kill()
@pytest.fixture(scope='function')
def tools():
"""Create and provide a Tools instance."""
return Tools()
class TestToolsIntegration:
"""Integration tests for Tools using actual browser instances."""
async def test_registry_actions(self, tools, browser_session):
"""Test that the registry contains the expected default actions."""
# Check that common actions are registered
common_actions = [
'navigate',
'search',
'click',
'input',
'scroll',
'go_back',
'switch',
'close',
'wait',
]
for action in common_actions:
assert action in tools.registry.registry.actions
assert tools.registry.registry.actions[action].function is not None
assert tools.registry.registry.actions[action].description is not None
async def test_custom_action_registration(self, tools, browser_session, base_url):
"""Test registering a custom action and executing it."""
# Define a custom action
class CustomParams(BaseModel):
text: str
@tools.action('Test custom action', param_model=CustomParams)
async def custom_action(params: CustomParams, browser_session):
current_url = await browser_session.get_current_page_url()
return ActionResult(extracted_content=f'Custom action executed with: {params.text} on {current_url}')
# Navigate to a page first
await tools.navigate(url=f'{base_url}/page1', new_tab=False, browser_session=browser_session)
# Execute the custom action directly
result = await tools.custom_action(text='test_value', browser_session=browser_session)
# Verify the result
assert isinstance(result, ActionResult)
assert result.extracted_content is not None
assert 'Custom action executed with: test_value on' in result.extracted_content
assert f'{base_url}/page1' in result.extracted_content
async def test_wait_action(self, tools, browser_session):
"""Test that the wait action correctly waits for the specified duration."""
# verify that it's in the default action set
wait_action = None
for action_name, action in tools.registry.registry.actions.items():
if 'wait' in action_name.lower() and 'seconds' in str(action.param_model.model_fields):
wait_action = action
break
assert wait_action is not None, 'Could not find wait action in tools'
# Check that it has seconds parameter with default
assert 'seconds' in wait_action.param_model.model_fields
schema = wait_action.param_model.model_json_schema()
assert schema['properties']['seconds']['default'] == 3
# Record start time
start_time = time.time()
# Execute wait action
result = await tools.wait(seconds=3, browser_session=browser_session)
# Record end time
end_time = time.time()
# Verify the result
assert isinstance(result, ActionResult)
assert result.extracted_content is not None
assert 'Waited for' in result.extracted_content or 'Waiting for' in result.extracted_content
# Verify that approximately 1 second has passed (allowing some margin)
assert end_time - start_time <= 2.5 # We wait 3-1 seconds for LLM call
# longer wait
# Record start time
start_time = time.time()
# Execute wait action
result = await tools.wait(seconds=5, browser_session=browser_session)
# Record end time
end_time = time.time()
# Verify the result
assert isinstance(result, ActionResult)
assert result.extracted_content is not None
assert 'Waited for' in result.extracted_content or 'Waiting for' in result.extracted_content
assert 3.5 <= end_time - start_time <= 4.5 # We wait 5-1 seconds for LLM call
async def test_go_back_action(self, tools, browser_session, base_url):
"""Test that go_back action navigates to the previous page."""
# Navigate to first page
await tools.navigate(url=f'{base_url}/page1', new_tab=False, browser_session=browser_session)
# Store the first page URL
first_url = await browser_session.get_current_page_url()
print(f'First page URL: {first_url}')
# Navigate to second page
await tools.navigate(url=f'{base_url}/page2', new_tab=False, browser_session=browser_session)
# Verify we're on the second page
second_url = await browser_session.get_current_page_url()
print(f'Second page URL: {second_url}')
assert f'{base_url}/page2' in second_url
# Execute go back action
result = await tools.go_back(browser_session=browser_session)
# Verify the result
assert isinstance(result, ActionResult)
assert result.extracted_content is not None
assert 'Navigated back' in result.extracted_content
# Add another delay to allow the navigation to complete
await asyncio.sleep(1)
# Verify we're back on a different page than before
final_url = await browser_session.get_current_page_url()
print(f'Final page URL after going back: {final_url}')
# Try to verify we're back on the first page, but don't fail the test if not
assert f'{base_url}/page1' in final_url, f'Expected to return to page1 but got {final_url}'
async def test_navigation_chain(self, tools, browser_session, base_url):
"""Test navigating through multiple pages and back through history."""
# Set up a chain of navigation: Home -> Page1 -> Page2
urls = [f'{base_url}/', f'{base_url}/page1', f'{base_url}/page2']
# Navigate to each page in sequence
for url in urls:
await tools.navigate(url=url, new_tab=False, browser_session=browser_session)
# Verify current page
current_url = await browser_session.get_current_page_url()
assert url in current_url
# Go back twice and verify each step
for expected_url in reversed(urls[:-1]):
await tools.go_back(browser_session=browser_session)
await asyncio.sleep(1) # Wait for navigation to complete
current_url = await browser_session.get_current_page_url()
assert expected_url in current_url
async def test_excluded_actions(self, browser_session):
"""Test that excluded actions are not registered."""
# Create tools with excluded actions
excluded_tools = Tools(exclude_actions=['search', 'scroll'])
# Verify excluded actions are not in the registry
assert 'search' not in excluded_tools.registry.registry.actions
assert 'scroll' not in excluded_tools.registry.registry.actions
# But other actions are still there
assert 'navigate' in excluded_tools.registry.registry.actions
assert 'click' in excluded_tools.registry.registry.actions
async def test_search_action(self, tools, browser_session, base_url):
"""Test the search action."""
await browser_session.get_current_page_url()
# Execute search action - it will actually navigate to our search results page
result = await tools.search(query='Python web automation', browser_session=browser_session)
# Verify the result
assert isinstance(result, ActionResult)
assert result.extracted_content is not None
assert 'Searched' in result.extracted_content and 'Python web automation' in result.extracted_content
# For our test purposes, we just verify we're on some URL
current_url = await browser_session.get_current_page_url()
assert current_url is not None and 'Python' in current_url
async def test_done_action(self, tools, browser_session, base_url):
"""Test that DoneAction completes a task and reports success or failure."""
# Create a temporary directory for the file system
with tempfile.TemporaryDirectory() as temp_dir:
file_system = FileSystem(temp_dir)
# First navigate to a page
await tools.navigate(url=f'{base_url}/page1', new_tab=False, browser_session=browser_session)
success_done_message = 'Successfully completed task'
# Execute done action with file_system
result = await tools.done(
text=success_done_message, success=True, browser_session=browser_session, file_system=file_system
)
# Verify the result
assert isinstance(result, ActionResult)
assert result.extracted_content is not None
assert success_done_message in result.extracted_content
assert result.success is True
assert result.is_done is True
assert result.error is None
failed_done_message = 'Failed to complete task'
# Execute failed done action with file_system
result = await tools.done(
text=failed_done_message, success=False, browser_session=browser_session, file_system=file_system
)
# Verify the result
assert isinstance(result, ActionResult)
assert result.extracted_content is not None
assert failed_done_message in result.extracted_content
assert result.success is False
assert result.is_done is True
assert result.error is None
async def test_get_dropdown_options(self, tools, browser_session, base_url, http_server):
"""Test that get_dropdown_options correctly retrieves options from a dropdown."""
# Add route for dropdown test page
http_server.expect_request('/dropdown1').respond_with_data(
"""
<!DOCTYPE html>
<html>
<head>
<title>Dropdown Test</title>
</head>
<body>
<h1>Dropdown Test</h1>
<select id="test-dropdown" name="test-dropdown">
<option value="">Please select</option>
<option value="option1">First Option</option>
<option value="option2">Second Option</option>
<option value="option3">Third Option</option>
</select>
</body>
</html>
""",
content_type='text/html',
)
# Navigate to the dropdown test page
await tools.navigate(url=f'{base_url}/dropdown1', new_tab=False, browser_session=browser_session)
# Wait for the page to load using CDP
cdp_session = await browser_session.get_or_create_cdp_session()
assert cdp_session is not None, 'CDP session not initialized'
# Wait for page load by checking document ready state
await asyncio.sleep(0.5) # Brief wait for navigation to start
ready_state = await cdp_session.cdp_client.send.Runtime.evaluate(
params={'expression': 'document.readyState'}, session_id=cdp_session.session_id
)
# If not complete, wait a bit more
if ready_state.get('result', {}).get('value') != 'complete':
await asyncio.sleep(1.0)
# Initialize the DOM state to populate the selector map
await browser_session.get_browser_state_summary()
# Get the selector map
selector_map = await browser_session.get_selector_map()
# Find the dropdown element in the selector map
dropdown_index = None
for idx, element in selector_map.items():
if element.tag_name.lower() == 'select':
dropdown_index = idx
break
assert dropdown_index is not None, (
f'Could not find select element in selector map. Available elements: {[f"{idx}: {element.tag_name}" for idx, element in selector_map.items()]}'
)
# Execute the action with the dropdown index
result = await tools.dropdown_options(index=dropdown_index, browser_session=browser_session)
expected_options = [
{'index': 0, 'text': 'Please select', 'value': ''},
{'index': 1, 'text': 'First Option', 'value': 'option1'},
{'index': 2, 'text': 'Second Option', 'value': 'option2'},
{'index': 3, 'text': 'Third Option', 'value': 'option3'},
]
# Verify the result structure
assert isinstance(result, ActionResult)
# Core logic validation: Verify all options are returned
assert result.extracted_content is not None
for option in expected_options[1:]: # Skip the placeholder option
assert option['text'] in result.extracted_content, f"Option '{option['text']}' not found in result content"
# Verify the instruction for using the text in select_dropdown is included
assert 'Use the exact text or value string' in result.extracted_content and 'select_dropdown' in result.extracted_content
# Verify the actual dropdown options in the DOM using CDP
dropdown_options_result = await cdp_session.cdp_client.send.Runtime.evaluate(
params={
'expression': """
JSON.stringify((() => {
const select = document.getElementById('test-dropdown');
return Array.from(select.options).map(opt => ({
text: opt.text,
value: opt.value
}));
})())
""",
'returnByValue': True,
},
session_id=cdp_session.session_id,
)
dropdown_options_json = dropdown_options_result.get('result', {}).get('value', '[]')
import json
dropdown_options = json.loads(dropdown_options_json) if isinstance(dropdown_options_json, str) else dropdown_options_json
# Verify the dropdown has the expected options
assert len(dropdown_options) == len(expected_options), (
f'Expected {len(expected_options)} options, got {len(dropdown_options)}'
)
for i, expected in enumerate(expected_options):
actual = dropdown_options[i]
assert actual['text'] == expected['text'], (
f"Option at index {i} has wrong text: expected '{expected['text']}', got '{actual['text']}'"
)
assert actual['value'] == expected['value'], (
f"Option at index {i} has wrong value: expected '{expected['value']}', got '{actual['value']}'"
)
async def test_select_dropdown_option(self, tools, browser_session, base_url, http_server):
"""Test that select_dropdown_option correctly selects an option from a dropdown."""
# Add route for dropdown test page
http_server.expect_request('/dropdown2').respond_with_data(
"""
<!DOCTYPE html>
<html>
<head>
<title>Dropdown Test</title>
</head>
<body>
<h1>Dropdown Test</h1>
<select id="test-dropdown" name="test-dropdown">
<option value="">Please select</option>
<option value="option1">First Option</option>
<option value="option2">Second Option</option>
<option value="option3">Third Option</option>
</select>
</body>
</html>
""",
content_type='text/html',
)
# Navigate to the dropdown test page
await tools.navigate(url=f'{base_url}/dropdown2', new_tab=False, browser_session=browser_session)
# Wait for the page to load using CDP
cdp_session = await browser_session.get_or_create_cdp_session()
assert cdp_session is not None, 'CDP session not initialized'
# Wait for page load by checking document ready state
await asyncio.sleep(0.5) # Brief wait for navigation to start
ready_state = await cdp_session.cdp_client.send.Runtime.evaluate(
params={'expression': 'document.readyState'}, session_id=cdp_session.session_id
)
# If not complete, wait a bit more
if ready_state.get('result', {}).get('value') != 'complete':
await asyncio.sleep(1.0)
# populate the selector map with highlight indices
await browser_session.get_browser_state_summary()
# Now get the selector map which should contain our dropdown
selector_map = await browser_session.get_selector_map()
# Find the dropdown element in the selector map
dropdown_index = None
for idx, element in selector_map.items():
if element.tag_name.lower() == 'select':
dropdown_index = idx
break
assert dropdown_index is not None, (
f'Could not find select element in selector map. Available elements: {[f"{idx}: {element.tag_name}" for idx, element in selector_map.items()]}'
)
# Execute the action with the dropdown index
result = await tools.select_dropdown(index=dropdown_index, text='Second Option', browser_session=browser_session)
# Verify the result structure
assert isinstance(result, ActionResult)
# Core logic validation: Verify selection was successful
assert result.extracted_content is not None
assert 'selected option' in result.extracted_content.lower()
assert 'Second Option' in result.extracted_content
# Verify the actual dropdown selection was made by checking the DOM using CDP
selected_value_result = await cdp_session.cdp_client.send.Runtime.evaluate(
params={'expression': "document.getElementById('test-dropdown').value"}, session_id=cdp_session.session_id
)
selected_value = selected_value_result.get('result', {}).get('value')
assert selected_value == 'option2' # Second Option has value "option2"
class TestStructuredOutputDoneWithFiles:
"""Tests for file handling in structured output done action."""
async def test_structured_output_done_without_files(self, browser_session, base_url):
"""Structured output done action works without files (backward compat)."""
class MyOutput(BaseModel):
answer: str = Field(description='The answer')
tools = Tools(output_model=MyOutput)
with tempfile.TemporaryDirectory() as temp_dir:
file_system = FileSystem(temp_dir)
result = await tools.done(
data={'answer': 'hello'},
success=True,
browser_session=browser_session,
file_system=file_system,
)
assert isinstance(result, ActionResult)
assert result.is_done is True
assert result.success is True
assert result.extracted_content is not None
output = json.loads(result.extracted_content)
assert output == {'answer': 'hello'}
assert result.attachments == []
async def test_structured_output_done_with_files_to_display(self, browser_session, base_url):
"""Structured output done action resolves files_to_display into attachments."""
class MyOutput(BaseModel):
summary: str
tools = Tools(output_model=MyOutput)
with tempfile.TemporaryDirectory() as temp_dir:
file_system = FileSystem(temp_dir)
await file_system.write_file('report.txt', 'some report content')
result = await tools.done(
data={'summary': 'done'},
success=True,
files_to_display=['report.txt'],
browser_session=browser_session,
file_system=file_system,
)
assert isinstance(result, ActionResult)
assert result.is_done is True
assert result.success is True
assert result.extracted_content is not None
output = json.loads(result.extracted_content)
assert output == {'summary': 'done'}
assert result.attachments is not None
assert len(result.attachments) == 1
assert result.attachments[0].endswith('report.txt')
async def test_structured_output_done_auto_attaches_downloads(self, browser_session, base_url):
"""Session downloads are auto-attached even without files_to_display."""
class MyOutput(BaseModel):
url: str
tools = Tools(output_model=MyOutput)
with tempfile.TemporaryDirectory() as temp_dir:
file_system = FileSystem(temp_dir)
# Simulate a CDP-tracked browser download
fake_download = os.path.join(temp_dir, 'tax-bill.pdf')
await anyio.Path(fake_download).write_bytes(b'%PDF-1.4 fake pdf content')
saved_downloads = browser_session._downloaded_files.copy()
browser_session._downloaded_files.append(fake_download)
try:
result = await tools.done(
data={'url': f'{base_url}/bill.pdf'},
success=True,
browser_session=browser_session,
file_system=file_system,
)
assert isinstance(result, ActionResult)
assert result.is_done is True
assert result.extracted_content is not None
output = json.loads(result.extracted_content)
assert output == {'url': f'{base_url}/bill.pdf'}
# The download should be auto-attached
assert result.attachments is not None
assert len(result.attachments) == 1
assert result.attachments[0] == fake_download
finally:
browser_session._downloaded_files = saved_downloads
async def test_structured_output_done_deduplicates_attachments(self, browser_session):
"""Downloads already covered by files_to_display are not duplicated."""
class MyOutput(BaseModel):
status: str
tools = Tools(output_model=MyOutput)
with tempfile.TemporaryDirectory() as temp_dir:
file_system = FileSystem(temp_dir)
await file_system.write_file('report.txt', 'content here')
# The same file appears in both files_to_display and session downloads
fs_path = str(file_system.get_dir() / 'report.txt')
saved_downloads = browser_session._downloaded_files.copy()
browser_session._downloaded_files.append(fs_path)
try:
result = await tools.done(
data={'status': 'ok'},
success=True,
files_to_display=['report.txt'],
browser_session=browser_session,
file_system=file_system,
)
assert isinstance(result, ActionResult)
# Should have exactly 1 attachment, not 2
assert result.attachments is not None
assert len(result.attachments) == 1
assert result.attachments[0] == fs_path
finally:
browser_session._downloaded_files = saved_downloads
async def test_structured_output_done_nonexistent_file_ignored(self, browser_session):
"""Files that don't exist in FileSystem are not included via files_to_display."""
class MyOutput(BaseModel):
value: int
tools = Tools(output_model=MyOutput)
with tempfile.TemporaryDirectory() as temp_dir:
file_system = FileSystem(temp_dir)
result = await tools.done(
data={'value': 42},
success=True,
files_to_display=['nonexistent.txt'],
browser_session=browser_session,
file_system=file_system,
)
assert isinstance(result, ActionResult)
assert result.is_done is True
assert result.extracted_content is not None
output = json.loads(result.extracted_content)
assert output == {'value': 42}
# nonexistent file should not appear in attachments
assert result.attachments == []
async def test_structured_output_schema_hides_internal_fields(self):
"""The JSON schema for StructuredOutputAction hides success and files_to_display."""
from browser_use.tools.views import StructuredOutputAction
class MyOutput(BaseModel):
name: str
schema = StructuredOutputAction[MyOutput].model_json_schema()
top_level_props = schema.get('properties', {})
assert 'success' not in top_level_props
assert 'files_to_display' not in top_level_props
# data should still be present
assert 'data' in top_level_props
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/test_tools.py",
"license": "MIT License",
"lines": 535,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.