repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1 value | license stringclasses 7 values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2 classes |
|---|---|---|---|---|---|---|---|---|
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/browser/watchdogs/dom_watchdog.py | browser_use/browser/watchdogs/dom_watchdog.py | """DOM watchdog for browser DOM tree management using CDP."""
import asyncio
import time
from typing import TYPE_CHECKING
from browser_use.browser.events import (
BrowserErrorEvent,
BrowserStateRequestEvent,
ScreenshotEvent,
TabCreatedEvent,
)
from browser_use.browser.watchdog_base import BaseWatchdog
from browser_use.dom.service import DomService
from browser_use.dom.views import (
EnhancedDOMTreeNode,
SerializedDOMState,
)
from browser_use.observability import observe_debug
from browser_use.utils import create_task_with_error_handling, time_execution_async
if TYPE_CHECKING:
from browser_use.browser.views import BrowserStateSummary, NetworkRequest, PageInfo, PaginationButton
class DOMWatchdog(BaseWatchdog):
"""Handles DOM tree building, serialization, and element access via CDP.
This watchdog acts as a bridge between the event-driven browser session
and the DomService implementation, maintaining cached state and providing
helper methods for other watchdogs.
"""
LISTENS_TO = [TabCreatedEvent, BrowserStateRequestEvent]
EMITS = [BrowserErrorEvent]
# Public properties for other watchdogs
selector_map: dict[int, EnhancedDOMTreeNode] | None = None
current_dom_state: SerializedDOMState | None = None
enhanced_dom_tree: EnhancedDOMTreeNode | None = None
# Internal DOM service
_dom_service: DomService | None = None
# Network tracking - maps request_id to (url, start_time, method, resource_type)
_pending_requests: dict[str, tuple[str, float, str, str | None]] = {}
async def on_TabCreatedEvent(self, event: TabCreatedEvent) -> None:
# self.logger.debug('Setting up init scripts in browser')
return None
def _get_recent_events_str(self, limit: int = 10) -> str | None:
"""Get the most recent events from the event bus as JSON.
Args:
limit: Maximum number of recent events to include
Returns:
JSON string of recent events or None if not available
"""
import json
try:
# Get all events from history, sorted by creation time (most recent first)
all_events = sorted(
self.browser_session.event_bus.event_history.values(), key=lambda e: e.event_created_at.timestamp(), reverse=True
)
# Take the most recent events and create JSON-serializable data
recent_events_data = []
for event in all_events[:limit]:
event_data = {
'event_type': event.event_type,
'timestamp': event.event_created_at.isoformat(),
}
# Add specific fields for certain event types
if hasattr(event, 'url'):
event_data['url'] = getattr(event, 'url')
if hasattr(event, 'error_message'):
event_data['error_message'] = getattr(event, 'error_message')
if hasattr(event, 'target_id'):
event_data['target_id'] = getattr(event, 'target_id')
recent_events_data.append(event_data)
return json.dumps(recent_events_data) # Return empty array if no events
except Exception as e:
self.logger.debug(f'Failed to get recent events: {e}')
return json.dumps([]) # Return empty JSON array on error
async def _get_pending_network_requests(self) -> list['NetworkRequest']:
"""Get list of currently pending network requests.
Uses document.readyState and performance API to detect pending requests.
Filters out ads, tracking, and other noise.
Returns:
List of NetworkRequest objects representing currently loading resources
"""
from browser_use.browser.views import NetworkRequest
try:
# get_or_create_cdp_session() now handles focus validation automatically
cdp_session = await self.browser_session.get_or_create_cdp_session(focus=True)
# Use performance API to get pending requests
js_code = """
(function() {
const now = performance.now();
const resources = performance.getEntriesByType('resource');
const pending = [];
// Check document readyState
const docLoading = document.readyState !== 'complete';
// Common ad/tracking domains and patterns to filter out
const adDomains = [
// Standard ad/tracking networks
'doubleclick.net', 'googlesyndication.com', 'googletagmanager.com',
'facebook.net', 'analytics', 'ads', 'tracking', 'pixel',
'hotjar.com', 'clarity.ms', 'mixpanel.com', 'segment.com',
// Analytics platforms
'demdex.net', 'omtrdc.net', 'adobedtm.com', 'ensighten.com',
'newrelic.com', 'nr-data.net', 'google-analytics.com',
// Social media trackers
'connect.facebook.net', 'platform.twitter.com', 'platform.linkedin.com',
// CDN/image hosts (usually not critical for functionality)
'.cloudfront.net/image/', '.akamaized.net/image/',
// Common tracking paths
'/tracker/', '/collector/', '/beacon/', '/telemetry/', '/log/',
'/events/', '/eventBatch', '/track.', '/metrics/'
];
// Get resources that are still loading (responseEnd is 0)
let totalResourcesChecked = 0;
let filteredByResponseEnd = 0;
const allDomains = new Set();
for (const entry of resources) {
totalResourcesChecked++;
// Track all domains from recent resources (for logging)
try {
const hostname = new URL(entry.name).hostname;
if (hostname) allDomains.add(hostname);
} catch (e) {}
if (entry.responseEnd === 0) {
filteredByResponseEnd++;
const url = entry.name;
// Filter out ads and tracking
const isAd = adDomains.some(domain => url.includes(domain));
if (isAd) continue;
// Filter out data: URLs and very long URLs (often inline resources)
if (url.startsWith('data:') || url.length > 500) continue;
const loadingDuration = now - entry.startTime;
// Skip requests that have been loading for >10 seconds (likely stuck/polling)
if (loadingDuration > 10000) continue;
const resourceType = entry.initiatorType || 'unknown';
// Filter out non-critical resources (images, fonts, icons) if loading >3 seconds
const nonCriticalTypes = ['img', 'image', 'icon', 'font'];
if (nonCriticalTypes.includes(resourceType) && loadingDuration > 3000) continue;
// Filter out image URLs even if type is unknown
const isImageUrl = /\\.(jpg|jpeg|png|gif|webp|svg|ico)(\\?|$)/i.test(url);
if (isImageUrl && loadingDuration > 3000) continue;
pending.push({
url: url,
method: 'GET',
loading_duration_ms: Math.round(loadingDuration),
resource_type: resourceType
});
}
}
return {
pending_requests: pending,
document_loading: docLoading,
document_ready_state: document.readyState,
debug: {
total_resources: totalResourcesChecked,
with_response_end_zero: filteredByResponseEnd,
after_all_filters: pending.length,
all_domains: Array.from(allDomains)
}
};
})()
"""
result = await cdp_session.cdp_client.send.Runtime.evaluate(
params={'expression': js_code, 'returnByValue': True}, session_id=cdp_session.session_id
)
if result.get('result', {}).get('type') == 'object':
data = result['result'].get('value', {})
pending = data.get('pending_requests', [])
doc_state = data.get('document_ready_state', 'unknown')
doc_loading = data.get('document_loading', False)
debug_info = data.get('debug', {})
# Get all domains that had recent activity (from JS)
all_domains = debug_info.get('all_domains', [])
all_domains_str = ', '.join(sorted(all_domains)[:5]) if all_domains else 'none'
if len(all_domains) > 5:
all_domains_str += f' +{len(all_domains) - 5} more'
# Debug logging
self.logger.debug(
f'🔍 Network check: document.readyState={doc_state}, loading={doc_loading}, '
f'total_resources={debug_info.get("total_resources", 0)}, '
f'responseEnd=0: {debug_info.get("with_response_end_zero", 0)}, '
f'after_filters={len(pending)}, domains=[{all_domains_str}]'
)
# Convert to NetworkRequest objects
network_requests = []
for req in pending[:20]: # Limit to 20 to avoid overwhelming the context
network_requests.append(
NetworkRequest(
url=req['url'],
method=req.get('method', 'GET'),
loading_duration_ms=req.get('loading_duration_ms', 0.0),
resource_type=req.get('resource_type'),
)
)
return network_requests
except Exception as e:
self.logger.debug(f'Failed to get pending network requests: {e}')
return []
@observe_debug(ignore_input=True, ignore_output=True, name='browser_state_request_event')
async def on_BrowserStateRequestEvent(self, event: BrowserStateRequestEvent) -> 'BrowserStateSummary':
"""Handle browser state request by coordinating DOM building and screenshot capture.
This is the main entry point for getting the complete browser state.
Args:
event: The browser state request event with options
Returns:
Complete BrowserStateSummary with DOM, screenshot, and target info
"""
from browser_use.browser.views import BrowserStateSummary, PageInfo
self.logger.debug('🔍 DOMWatchdog.on_BrowserStateRequestEvent: STARTING browser state request')
page_url = await self.browser_session.get_current_page_url()
self.logger.debug(f'🔍 DOMWatchdog.on_BrowserStateRequestEvent: Got page URL: {page_url}')
# Get focused session for logging (validation already done by get_current_page_url)
if self.browser_session.agent_focus_target_id:
self.logger.debug(f'Current page URL: {page_url}, target_id: {self.browser_session.agent_focus_target_id}')
# check if we should skip DOM tree build for pointless pages
not_a_meaningful_website = page_url.lower().split(':', 1)[0] not in ('http', 'https')
# Check for pending network requests BEFORE waiting (so we can see what's loading)
pending_requests_before_wait = []
if not not_a_meaningful_website:
try:
pending_requests_before_wait = await self._get_pending_network_requests()
if pending_requests_before_wait:
self.logger.debug(f'🔍 Found {len(pending_requests_before_wait)} pending requests before stability wait')
except Exception as e:
self.logger.debug(f'Failed to get pending requests before wait: {e}')
pending_requests = pending_requests_before_wait
# Wait for page stability using browser profile settings (main branch pattern)
if not not_a_meaningful_website:
self.logger.debug('🔍 DOMWatchdog.on_BrowserStateRequestEvent: ⏳ Waiting for page stability...')
try:
if pending_requests_before_wait:
# Reduced from 1s to 0.3s for faster DOM builds while still allowing critical resources to load
await asyncio.sleep(0.3)
self.logger.debug('🔍 DOMWatchdog.on_BrowserStateRequestEvent: ✅ Page stability complete')
except Exception as e:
self.logger.warning(
f'🔍 DOMWatchdog.on_BrowserStateRequestEvent: Network waiting failed: {e}, continuing anyway...'
)
# Get tabs info once at the beginning for all paths
self.logger.debug('🔍 DOMWatchdog.on_BrowserStateRequestEvent: Getting tabs info...')
tabs_info = await self.browser_session.get_tabs()
self.logger.debug(f'🔍 DOMWatchdog.on_BrowserStateRequestEvent: Got {len(tabs_info)} tabs')
self.logger.debug(f'🔍 DOMWatchdog.on_BrowserStateRequestEvent: Tabs info: {tabs_info}')
# Get viewport / scroll position info, remember changing scroll position should invalidate selector_map cache because it only includes visible elements
# cdp_session = await self.browser_session.get_or_create_cdp_session(focus=True)
# scroll_info = await cdp_session.cdp_client.send.Runtime.evaluate(
# params={'expression': 'JSON.stringify({y: document.body.scrollTop, x: document.body.scrollLeft, width: document.documentElement.clientWidth, height: document.documentElement.clientHeight})'},
# session_id=cdp_session.session_id,
# )
# self.logger.debug(f'🔍 DOMWatchdog.on_BrowserStateRequestEvent: Got scroll info: {scroll_info["result"]}')
try:
# Fast path for empty pages
if not_a_meaningful_website:
self.logger.debug(f'⚡ Skipping BuildDOMTree for empty target: {page_url}')
self.logger.debug(f'📸 Not taking screenshot for empty page: {page_url} (non-http/https URL)')
# Create minimal DOM state
content = SerializedDOMState(_root=None, selector_map={})
# Skip screenshot for empty pages
screenshot_b64 = None
# Try to get page info from CDP, fall back to defaults if unavailable
try:
page_info = await self._get_page_info()
except Exception as e:
self.logger.debug(f'Failed to get page info from CDP for empty page: {e}, using fallback')
# Use default viewport dimensions
viewport = self.browser_session.browser_profile.viewport or {'width': 1280, 'height': 720}
page_info = PageInfo(
viewport_width=viewport['width'],
viewport_height=viewport['height'],
page_width=viewport['width'],
page_height=viewport['height'],
scroll_x=0,
scroll_y=0,
pixels_above=0,
pixels_below=0,
pixels_left=0,
pixels_right=0,
)
return BrowserStateSummary(
dom_state=content,
url=page_url,
title='Empty Tab',
tabs=tabs_info,
screenshot=screenshot_b64,
page_info=page_info,
pixels_above=0,
pixels_below=0,
browser_errors=[],
is_pdf_viewer=False,
recent_events=self._get_recent_events_str() if event.include_recent_events else None,
pending_network_requests=[], # Empty page has no pending requests
pagination_buttons=[], # Empty page has no pagination
closed_popup_messages=self.browser_session._closed_popup_messages.copy(),
)
# Execute DOM building and screenshot capture in parallel
dom_task = None
screenshot_task = None
# Start DOM building task if requested
if event.include_dom:
self.logger.debug('🔍 DOMWatchdog.on_BrowserStateRequestEvent: 🌳 Starting DOM tree build task...')
previous_state = (
self.browser_session._cached_browser_state_summary.dom_state
if self.browser_session._cached_browser_state_summary
else None
)
dom_task = create_task_with_error_handling(
self._build_dom_tree_without_highlights(previous_state),
name='build_dom_tree',
logger_instance=self.logger,
suppress_exceptions=True,
)
# Start clean screenshot task if requested (without JS highlights)
if event.include_screenshot:
self.logger.debug('🔍 DOMWatchdog.on_BrowserStateRequestEvent: 📸 Starting clean screenshot task...')
screenshot_task = create_task_with_error_handling(
self._capture_clean_screenshot(),
name='capture_screenshot',
logger_instance=self.logger,
suppress_exceptions=True,
)
# Wait for both tasks to complete
content = None
screenshot_b64 = None
if dom_task:
try:
content = await dom_task
self.logger.debug('🔍 DOMWatchdog.on_BrowserStateRequestEvent: ✅ DOM tree build completed')
except Exception as e:
self.logger.warning(f'🔍 DOMWatchdog.on_BrowserStateRequestEvent: DOM build failed: {e}, using minimal state')
content = SerializedDOMState(_root=None, selector_map={})
else:
content = SerializedDOMState(_root=None, selector_map={})
if screenshot_task:
try:
screenshot_b64 = await screenshot_task
self.logger.debug('🔍 DOMWatchdog.on_BrowserStateRequestEvent: ✅ Clean screenshot captured')
except Exception as e:
self.logger.warning(f'🔍 DOMWatchdog.on_BrowserStateRequestEvent: Clean screenshot failed: {e}')
screenshot_b64 = None
# Add browser-side highlights for user visibility
if content and content.selector_map and self.browser_session.browser_profile.dom_highlight_elements:
try:
self.logger.debug('🔍 DOMWatchdog.on_BrowserStateRequestEvent: 🎨 Adding browser-side highlights...')
await self.browser_session.add_highlights(content.selector_map)
self.logger.debug(
f'🔍 DOMWatchdog.on_BrowserStateRequestEvent: ✅ Added browser highlights for {len(content.selector_map)} elements'
)
except Exception as e:
self.logger.warning(f'🔍 DOMWatchdog.on_BrowserStateRequestEvent: Browser highlighting failed: {e}')
# Ensure we have valid content
if not content:
content = SerializedDOMState(_root=None, selector_map={})
# Tabs info already fetched at the beginning
# Get target title safely
try:
self.logger.debug('🔍 DOMWatchdog.on_BrowserStateRequestEvent: Getting page title...')
title = await asyncio.wait_for(self.browser_session.get_current_page_title(), timeout=1.0)
self.logger.debug(f'🔍 DOMWatchdog.on_BrowserStateRequestEvent: Got title: {title}')
except Exception as e:
self.logger.debug(f'🔍 DOMWatchdog.on_BrowserStateRequestEvent: Failed to get title: {e}')
title = 'Page'
# Get comprehensive page info from CDP with timeout
try:
self.logger.debug('🔍 DOMWatchdog.on_BrowserStateRequestEvent: Getting page info from CDP...')
page_info = await asyncio.wait_for(self._get_page_info(), timeout=1.0)
self.logger.debug(f'🔍 DOMWatchdog.on_BrowserStateRequestEvent: Got page info from CDP: {page_info}')
except Exception as e:
self.logger.debug(
f'🔍 DOMWatchdog.on_BrowserStateRequestEvent: Failed to get page info from CDP: {e}, using fallback'
)
# Fallback to default viewport dimensions
viewport = self.browser_session.browser_profile.viewport or {'width': 1280, 'height': 720}
page_info = PageInfo(
viewport_width=viewport['width'],
viewport_height=viewport['height'],
page_width=viewport['width'],
page_height=viewport['height'],
scroll_x=0,
scroll_y=0,
pixels_above=0,
pixels_below=0,
pixels_left=0,
pixels_right=0,
)
# Check for PDF viewer
is_pdf_viewer = page_url.endswith('.pdf') or '/pdf/' in page_url
# Detect pagination buttons from the DOM
pagination_buttons_data = []
if content and content.selector_map:
pagination_buttons_data = self._detect_pagination_buttons(content.selector_map)
# Build and cache the browser state summary
if screenshot_b64:
self.logger.debug(
f'🔍 DOMWatchdog.on_BrowserStateRequestEvent: 📸 Creating BrowserStateSummary with screenshot, length: {len(screenshot_b64)}'
)
else:
self.logger.debug(
'🔍 DOMWatchdog.on_BrowserStateRequestEvent: 📸 Creating BrowserStateSummary WITHOUT screenshot'
)
browser_state = BrowserStateSummary(
dom_state=content,
url=page_url,
title=title,
tabs=tabs_info,
screenshot=screenshot_b64,
page_info=page_info,
pixels_above=0,
pixels_below=0,
browser_errors=[],
is_pdf_viewer=is_pdf_viewer,
recent_events=self._get_recent_events_str() if event.include_recent_events else None,
pending_network_requests=pending_requests,
pagination_buttons=pagination_buttons_data,
closed_popup_messages=self.browser_session._closed_popup_messages.copy(),
)
# Cache the state
self.browser_session._cached_browser_state_summary = browser_state
# Cache viewport size for coordinate conversion (if llm_screenshot_size is enabled)
if page_info:
self.browser_session._original_viewport_size = (page_info.viewport_width, page_info.viewport_height)
self.logger.debug('🔍 DOMWatchdog.on_BrowserStateRequestEvent: ✅ COMPLETED - Returning browser state')
return browser_state
except Exception as e:
self.logger.error(f'Failed to get browser state: {e}')
# Return minimal recovery state
return BrowserStateSummary(
dom_state=SerializedDOMState(_root=None, selector_map={}),
url=page_url if 'page_url' in locals() else '',
title='Error',
tabs=[],
screenshot=None,
page_info=PageInfo(
viewport_width=1280,
viewport_height=720,
page_width=1280,
page_height=720,
scroll_x=0,
scroll_y=0,
pixels_above=0,
pixels_below=0,
pixels_left=0,
pixels_right=0,
),
pixels_above=0,
pixels_below=0,
browser_errors=[str(e)],
is_pdf_viewer=False,
recent_events=None,
pending_network_requests=[], # Error state has no pending requests
pagination_buttons=[], # Error state has no pagination
closed_popup_messages=self.browser_session._closed_popup_messages.copy()
if hasattr(self, 'browser_session') and self.browser_session is not None
else [],
)
@time_execution_async('build_dom_tree_without_highlights')
@observe_debug(ignore_input=True, ignore_output=True, name='build_dom_tree_without_highlights')
async def _build_dom_tree_without_highlights(self, previous_state: SerializedDOMState | None = None) -> SerializedDOMState:
"""Build DOM tree without injecting JavaScript highlights (for parallel execution)."""
try:
self.logger.debug('🔍 DOMWatchdog._build_dom_tree_without_highlights: STARTING DOM tree build')
# Create or reuse DOM service
if self._dom_service is None:
self._dom_service = DomService(
browser_session=self.browser_session,
logger=self.logger,
cross_origin_iframes=self.browser_session.browser_profile.cross_origin_iframes,
paint_order_filtering=self.browser_session.browser_profile.paint_order_filtering,
max_iframes=self.browser_session.browser_profile.max_iframes,
max_iframe_depth=self.browser_session.browser_profile.max_iframe_depth,
)
# Get serialized DOM tree using the service
self.logger.debug('🔍 DOMWatchdog._build_dom_tree_without_highlights: Calling DomService.get_serialized_dom_tree...')
start = time.time()
self.current_dom_state, self.enhanced_dom_tree, timing_info = await self._dom_service.get_serialized_dom_tree(
previous_cached_state=previous_state,
)
end = time.time()
total_time_ms = (end - start) * 1000
self.logger.debug(
'🔍 DOMWatchdog._build_dom_tree_without_highlights: ✅ DomService.get_serialized_dom_tree completed'
)
# Build hierarchical timing breakdown as single multi-line string
timing_lines = [f'⏱️ Total DOM tree time: {total_time_ms:.2f}ms', '📊 Timing breakdown:']
# get_all_trees breakdown
get_all_trees_ms = timing_info.get('get_all_trees_total_ms', 0)
if get_all_trees_ms > 0:
timing_lines.append(f' ├─ get_all_trees: {get_all_trees_ms:.2f}ms')
iframe_scroll_ms = timing_info.get('iframe_scroll_detection_ms', 0)
cdp_parallel_ms = timing_info.get('cdp_parallel_calls_ms', 0)
snapshot_proc_ms = timing_info.get('snapshot_processing_ms', 0)
if iframe_scroll_ms > 0.01:
timing_lines.append(f' │ ├─ iframe_scroll_detection: {iframe_scroll_ms:.2f}ms')
if cdp_parallel_ms > 0.01:
timing_lines.append(f' │ ├─ cdp_parallel_calls: {cdp_parallel_ms:.2f}ms')
if snapshot_proc_ms > 0.01:
timing_lines.append(f' │ └─ snapshot_processing: {snapshot_proc_ms:.2f}ms')
# build_ax_lookup
build_ax_ms = timing_info.get('build_ax_lookup_ms', 0)
if build_ax_ms > 0.01:
timing_lines.append(f' ├─ build_ax_lookup: {build_ax_ms:.2f}ms')
# build_snapshot_lookup
build_snapshot_ms = timing_info.get('build_snapshot_lookup_ms', 0)
if build_snapshot_ms > 0.01:
timing_lines.append(f' ├─ build_snapshot_lookup: {build_snapshot_ms:.2f}ms')
# construct_enhanced_tree
construct_tree_ms = timing_info.get('construct_enhanced_tree_ms', 0)
if construct_tree_ms > 0.01:
timing_lines.append(f' ├─ construct_enhanced_tree: {construct_tree_ms:.2f}ms')
# serialize_accessible_elements breakdown
serialize_total_ms = timing_info.get('serialize_accessible_elements_total_ms', 0)
if serialize_total_ms > 0.01:
timing_lines.append(f' ├─ serialize_accessible_elements: {serialize_total_ms:.2f}ms')
create_simp_ms = timing_info.get('create_simplified_tree_ms', 0)
paint_order_ms = timing_info.get('calculate_paint_order_ms', 0)
optimize_ms = timing_info.get('optimize_tree_ms', 0)
bbox_ms = timing_info.get('bbox_filtering_ms', 0)
assign_idx_ms = timing_info.get('assign_interactive_indices_ms', 0)
clickable_ms = timing_info.get('clickable_detection_time_ms', 0)
if create_simp_ms > 0.01:
timing_lines.append(f' │ ├─ create_simplified_tree: {create_simp_ms:.2f}ms')
if clickable_ms > 0.01:
timing_lines.append(f' │ │ └─ clickable_detection: {clickable_ms:.2f}ms')
if paint_order_ms > 0.01:
timing_lines.append(f' │ ├─ calculate_paint_order: {paint_order_ms:.2f}ms')
if optimize_ms > 0.01:
timing_lines.append(f' │ ├─ optimize_tree: {optimize_ms:.2f}ms')
if bbox_ms > 0.01:
timing_lines.append(f' │ ├─ bbox_filtering: {bbox_ms:.2f}ms')
if assign_idx_ms > 0.01:
timing_lines.append(f' │ └─ assign_interactive_indices: {assign_idx_ms:.2f}ms')
# Overheads
get_dom_overhead_ms = timing_info.get('get_dom_tree_overhead_ms', 0)
serialize_overhead_ms = timing_info.get('serialization_overhead_ms', 0)
get_serialized_overhead_ms = timing_info.get('get_serialized_dom_tree_overhead_ms', 0)
if get_dom_overhead_ms > 0.1:
timing_lines.append(f' ├─ get_dom_tree_overhead: {get_dom_overhead_ms:.2f}ms')
if serialize_overhead_ms > 0.1:
timing_lines.append(f' ├─ serialization_overhead: {serialize_overhead_ms:.2f}ms')
if get_serialized_overhead_ms > 0.1:
timing_lines.append(f' └─ get_serialized_dom_tree_overhead: {get_serialized_overhead_ms:.2f}ms')
# Calculate total tracked time for validation
main_operations_ms = (
get_all_trees_ms
+ build_ax_ms
+ build_snapshot_ms
+ construct_tree_ms
+ serialize_total_ms
+ get_dom_overhead_ms
+ serialize_overhead_ms
+ get_serialized_overhead_ms
)
untracked_time_ms = total_time_ms - main_operations_ms
if untracked_time_ms > 1.0: # Only log if significant
timing_lines.append(f' ⚠️ untracked_time: {untracked_time_ms:.2f}ms')
# Single log call with all timing info
self.logger.debug('\n'.join(timing_lines))
# Update selector map for other watchdogs
self.logger.debug('🔍 DOMWatchdog._build_dom_tree_without_highlights: Updating selector maps...')
self.selector_map = self.current_dom_state.selector_map
# Update BrowserSession's cached selector map
if self.browser_session:
self.browser_session.update_cached_selector_map(self.selector_map)
self.logger.debug(
f'🔍 DOMWatchdog._build_dom_tree_without_highlights: ✅ Selector maps updated, {len(self.selector_map)} elements'
)
# Skip JavaScript highlighting injection - Python highlighting will be applied later
self.logger.debug('🔍 DOMWatchdog._build_dom_tree_without_highlights: ✅ COMPLETED DOM tree build (no JS highlights)')
return self.current_dom_state
except Exception as e:
self.logger.error(f'Failed to build DOM tree without highlights: {e}')
self.event_bus.dispatch(
BrowserErrorEvent(
error_type='DOMBuildFailed',
message=str(e),
)
)
raise
@time_execution_async('capture_clean_screenshot')
@observe_debug(ignore_input=True, ignore_output=True, name='capture_clean_screenshot')
async def _capture_clean_screenshot(self) -> str:
"""Capture a clean screenshot without JavaScript highlights."""
try:
self.logger.debug('🔍 DOMWatchdog._capture_clean_screenshot: Capturing clean screenshot...')
await self.browser_session.get_or_create_cdp_session(target_id=self.browser_session.agent_focus_target_id, focus=True)
# Check if handler is registered
handlers = self.event_bus.handlers.get('ScreenshotEvent', [])
handler_names = [getattr(h, '__name__', str(h)) for h in handlers]
self.logger.debug(f'📸 ScreenshotEvent handlers registered: {len(handlers)} - {handler_names}')
screenshot_event = self.event_bus.dispatch(ScreenshotEvent(full_page=False))
self.logger.debug('📸 Dispatched ScreenshotEvent, waiting for event to complete...')
# Wait for the event itself to complete (this waits for all handlers)
await screenshot_event
# Get the single handler result
screenshot_b64 = await screenshot_event.event_result(raise_if_any=True, raise_if_none=True)
if screenshot_b64 is None:
raise RuntimeError('Screenshot handler returned None')
self.logger.debug('🔍 DOMWatchdog._capture_clean_screenshot: ✅ Clean screenshot captured successfully')
return str(screenshot_b64)
except TimeoutError:
self.logger.warning('📸 Clean screenshot timed out after 6 seconds - no handler registered or slow page?')
raise
except Exception as e:
self.logger.warning(f'📸 Clean screenshot failed: {type(e).__name__}: {e}')
raise
def _detect_pagination_buttons(self, selector_map: dict[int, EnhancedDOMTreeNode]) -> list['PaginationButton']:
"""Detect pagination buttons from the DOM selector map.
Args:
selector_map: Dictionary mapping element indices to DOM tree nodes
Returns:
List of PaginationButton instances found in the DOM
"""
from browser_use.browser.views import PaginationButton
pagination_buttons_data = []
try:
self.logger.debug('🔍 DOMWatchdog._detect_pagination_buttons: Detecting pagination buttons...')
pagination_buttons_raw = DomService.detect_pagination_buttons(selector_map)
# Convert to PaginationButton instances
pagination_buttons_data = [
PaginationButton(
button_type=btn['button_type'], # type: ignore
backend_node_id=btn['backend_node_id'], # type: ignore
text=btn['text'], # type: ignore
selector=btn['selector'], # type: ignore
is_disabled=btn['is_disabled'], # type: ignore
)
for btn in pagination_buttons_raw
]
if pagination_buttons_data:
self.logger.debug(
f'🔍 DOMWatchdog._detect_pagination_buttons: Found {len(pagination_buttons_data)} pagination buttons'
)
except Exception as e:
self.logger.warning(f'🔍 DOMWatchdog._detect_pagination_buttons: Pagination detection failed: {e}')
return pagination_buttons_data
async def _get_page_info(self) -> 'PageInfo':
"""Get comprehensive page information using a single CDP call.
TODO: should we make this an event as well?
Returns:
PageInfo with all viewport, page dimensions, and scroll information
"""
from browser_use.browser.views import PageInfo
# get_or_create_cdp_session() handles focus validation automatically
cdp_session = await self.browser_session.get_or_create_cdp_session(
target_id=self.browser_session.agent_focus_target_id, focus=True
)
# Get layout metrics which includes all the information we need
metrics = await asyncio.wait_for(
cdp_session.cdp_client.send.Page.getLayoutMetrics(session_id=cdp_session.session_id), timeout=10.0
)
# Extract different viewport types
layout_viewport = metrics.get('layoutViewport', {})
visual_viewport = metrics.get('visualViewport', {})
css_visual_viewport = metrics.get('cssVisualViewport', {})
css_layout_viewport = metrics.get('cssLayoutViewport', {})
content_size = metrics.get('contentSize', {})
# Calculate device pixel ratio to convert between device pixels and CSS pixels
# This matches the approach in dom/service.py _get_viewport_ratio method
css_width = css_visual_viewport.get('clientWidth', css_layout_viewport.get('clientWidth', 1280.0))
device_width = visual_viewport.get('clientWidth', css_width)
device_pixel_ratio = device_width / css_width if css_width > 0 else 1.0
# For viewport dimensions, use CSS pixels (what JavaScript sees)
# Prioritize CSS layout viewport, then fall back to layout viewport
viewport_width = int(css_layout_viewport.get('clientWidth') or layout_viewport.get('clientWidth', 1280))
viewport_height = int(css_layout_viewport.get('clientHeight') or layout_viewport.get('clientHeight', 720))
# For total page dimensions, content size is typically in device pixels, so convert to CSS pixels
# by dividing by device pixel ratio
raw_page_width = content_size.get('width', viewport_width * device_pixel_ratio)
raw_page_height = content_size.get('height', viewport_height * device_pixel_ratio)
page_width = int(raw_page_width / device_pixel_ratio)
page_height = int(raw_page_height / device_pixel_ratio)
# For scroll position, use CSS visual viewport if available, otherwise CSS layout viewport
# These should already be in CSS pixels
scroll_x = int(css_visual_viewport.get('pageX') or css_layout_viewport.get('pageX', 0))
scroll_y = int(css_visual_viewport.get('pageY') or css_layout_viewport.get('pageY', 0))
# Calculate scroll information - pixels that are above/below/left/right of current viewport
pixels_above = scroll_y
pixels_below = max(0, page_height - viewport_height - scroll_y)
pixels_left = scroll_x
pixels_right = max(0, page_width - viewport_width - scroll_x)
page_info = PageInfo(
viewport_width=viewport_width,
viewport_height=viewport_height,
page_width=page_width,
page_height=page_height,
scroll_x=scroll_x,
scroll_y=scroll_y,
pixels_above=pixels_above,
pixels_below=pixels_below,
pixels_left=pixels_left,
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | true |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/browser/watchdogs/recording_watchdog.py | browser_use/browser/watchdogs/recording_watchdog.py | """Recording Watchdog for Browser Use Sessions."""
import asyncio
from pathlib import Path
from typing import ClassVar
from bubus import BaseEvent
from cdp_use.cdp.page.events import ScreencastFrameEvent
from uuid_extensions import uuid7str
from browser_use.browser.events import BrowserConnectedEvent, BrowserStopEvent
from browser_use.browser.profile import ViewportSize
from browser_use.browser.video_recorder import VideoRecorderService
from browser_use.browser.watchdog_base import BaseWatchdog
from browser_use.utils import create_task_with_error_handling
class RecordingWatchdog(BaseWatchdog):
"""
Manages video recording of a browser session using CDP screencasting.
"""
LISTENS_TO: ClassVar[list[type[BaseEvent]]] = [BrowserConnectedEvent, BrowserStopEvent]
EMITS: ClassVar[list[type[BaseEvent]]] = []
_recorder: VideoRecorderService | None = None
async def on_BrowserConnectedEvent(self, event: BrowserConnectedEvent) -> None:
"""
Starts video recording if it is configured in the browser profile.
"""
profile = self.browser_session.browser_profile
if not profile.record_video_dir:
return
# Dynamically determine video size
size = profile.record_video_size
if not size:
self.logger.debug('record_video_size not specified, detecting viewport size...')
size = await self._get_current_viewport_size()
if not size:
self.logger.warning('Cannot start video recording: viewport size could not be determined.')
return
video_format = getattr(profile, 'record_video_format', 'mp4').strip('.')
output_path = Path(profile.record_video_dir) / f'{uuid7str()}.{video_format}'
self.logger.debug(f'Initializing video recorder for format: {video_format}')
self._recorder = VideoRecorderService(output_path=output_path, size=size, framerate=profile.record_video_framerate)
self._recorder.start()
if not self._recorder._is_active:
self._recorder = None
return
self.browser_session.cdp_client.register.Page.screencastFrame(self.on_screencastFrame)
try:
cdp_session = await self.browser_session.get_or_create_cdp_session()
await cdp_session.cdp_client.send.Page.startScreencast(
params={
'format': 'png',
'quality': 90,
'maxWidth': size['width'],
'maxHeight': size['height'],
'everyNthFrame': 1,
},
session_id=cdp_session.session_id,
)
self.logger.info(f'📹 Started video recording to {output_path}')
except Exception as e:
self.logger.error(f'Failed to start screencast via CDP: {e}')
if self._recorder:
self._recorder.stop_and_save()
self._recorder = None
async def _get_current_viewport_size(self) -> ViewportSize | None:
"""Gets the current viewport size directly from the browser via CDP."""
try:
cdp_session = await self.browser_session.get_or_create_cdp_session()
metrics = await cdp_session.cdp_client.send.Page.getLayoutMetrics(session_id=cdp_session.session_id)
# Use cssVisualViewport for the most accurate representation of the visible area
viewport = metrics.get('cssVisualViewport', {})
width = viewport.get('clientWidth')
height = viewport.get('clientHeight')
if width and height:
self.logger.debug(f'Detected viewport size: {width}x{height}')
return ViewportSize(width=int(width), height=int(height))
except Exception as e:
self.logger.warning(f'Failed to get viewport size from browser: {e}')
return None
def on_screencastFrame(self, event: ScreencastFrameEvent, session_id: str | None) -> None:
"""
Synchronous handler for incoming screencast frames.
"""
if not self._recorder:
return
self._recorder.add_frame(event['data'])
create_task_with_error_handling(
self._ack_screencast_frame(event, session_id),
name='ack_screencast_frame',
logger_instance=self.logger,
suppress_exceptions=True,
)
async def _ack_screencast_frame(self, event: ScreencastFrameEvent, session_id: str | None) -> None:
"""
Asynchronously acknowledges a screencast frame.
"""
try:
await self.browser_session.cdp_client.send.Page.screencastFrameAck(
params={'sessionId': event['sessionId']}, session_id=session_id
)
except Exception as e:
self.logger.debug(f'Failed to acknowledge screencast frame: {e}')
async def on_BrowserStopEvent(self, event: BrowserStopEvent) -> None:
"""
Stops the video recording and finalizes the video file.
"""
if self._recorder:
recorder = self._recorder
self._recorder = None
self.logger.debug('Stopping video recording and saving file...')
loop = asyncio.get_event_loop()
await loop.run_in_executor(None, recorder.stop_and_save)
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/browser/watchdogs/downloads_watchdog.py | browser_use/browser/watchdogs/downloads_watchdog.py | """Downloads watchdog for monitoring and handling file downloads."""
import asyncio
import json
import os
import tempfile
from pathlib import Path
from typing import TYPE_CHECKING, Any, ClassVar
from urllib.parse import urlparse
import anyio
from bubus import BaseEvent
from cdp_use.cdp.browser import DownloadProgressEvent, DownloadWillBeginEvent
from cdp_use.cdp.network import ResponseReceivedEvent
from cdp_use.cdp.target import SessionID, TargetID
from pydantic import PrivateAttr
from browser_use.browser.events import (
BrowserLaunchEvent,
BrowserStateRequestEvent,
BrowserStoppedEvent,
FileDownloadedEvent,
NavigationCompleteEvent,
TabClosedEvent,
TabCreatedEvent,
)
from browser_use.browser.watchdog_base import BaseWatchdog
from browser_use.utils import create_task_with_error_handling
if TYPE_CHECKING:
pass
class DownloadsWatchdog(BaseWatchdog):
"""Monitors downloads and handles file download events."""
# Events this watchdog listens to (for documentation)
LISTENS_TO: ClassVar[list[type[BaseEvent[Any]]]] = [
BrowserLaunchEvent,
BrowserStateRequestEvent,
BrowserStoppedEvent,
TabCreatedEvent,
TabClosedEvent,
NavigationCompleteEvent,
]
# Events this watchdog emits
EMITS: ClassVar[list[type[BaseEvent[Any]]]] = [
FileDownloadedEvent,
]
# Private state
_sessions_with_listeners: set[str] = PrivateAttr(default_factory=set) # Track sessions that already have download listeners
_active_downloads: dict[str, Any] = PrivateAttr(default_factory=dict)
_pdf_viewer_cache: dict[str, bool] = PrivateAttr(default_factory=dict) # Cache PDF viewer status by target URL
_download_cdp_session_setup: bool = PrivateAttr(default=False) # Track if CDP session is set up
_download_cdp_session: Any = PrivateAttr(default=None) # Store CDP session reference
_cdp_event_tasks: set[asyncio.Task] = PrivateAttr(default_factory=set) # Track CDP event handler tasks
_cdp_downloads_info: dict[str, dict[str, Any]] = PrivateAttr(default_factory=dict) # Map guid -> info
_use_js_fetch_for_local: bool = PrivateAttr(default=False) # Guard JS fetch path for local regular downloads
_session_pdf_urls: dict[str, str] = PrivateAttr(default_factory=dict) # URL -> path for PDFs downloaded this session
_network_monitored_targets: set[str] = PrivateAttr(default_factory=set) # Track targets with network monitoring enabled
_detected_downloads: set[str] = PrivateAttr(default_factory=set) # Track detected download URLs to avoid duplicates
_network_callback_registered: bool = PrivateAttr(default=False) # Track if global network callback is registered
async def on_BrowserLaunchEvent(self, event: BrowserLaunchEvent) -> None:
self.logger.debug(f'[DownloadsWatchdog] Received BrowserLaunchEvent, EventBus ID: {id(self.event_bus)}')
# Ensure downloads directory exists
downloads_path = self.browser_session.browser_profile.downloads_path
if downloads_path:
expanded_path = Path(downloads_path).expanduser().resolve()
expanded_path.mkdir(parents=True, exist_ok=True)
self.logger.debug(f'[DownloadsWatchdog] Ensured downloads directory exists: {expanded_path}')
async def on_TabCreatedEvent(self, event: TabCreatedEvent) -> None:
"""Monitor new tabs for downloads."""
# logger.info(f'[DownloadsWatchdog] TabCreatedEvent received for tab {event.target_id[-4:]}: {event.url}')
# Assert downloads path is configured (should always be set by BrowserProfile default)
assert self.browser_session.browser_profile.downloads_path is not None, 'Downloads path must be configured'
if event.target_id:
# logger.info(f'[DownloadsWatchdog] Found target for tab {event.target_id}, calling attach_to_target')
await self.attach_to_target(event.target_id)
else:
self.logger.warning(f'[DownloadsWatchdog] No target found for tab {event.target_id}')
async def on_TabClosedEvent(self, event: TabClosedEvent) -> None:
"""Stop monitoring closed tabs."""
pass # No cleanup needed, browser context handles target lifecycle
async def on_BrowserStateRequestEvent(self, event: BrowserStateRequestEvent) -> None:
"""Handle browser state request events."""
# Use public API - automatically validates and waits for recovery if needed
self.logger.debug(f'[DownloadsWatchdog] on_BrowserStateRequestEvent started, event_id={event.event_id[-4:]}')
try:
cdp_session = await self.browser_session.get_or_create_cdp_session()
except ValueError:
self.logger.warning(f'[DownloadsWatchdog] No valid focus, skipping BrowserStateRequestEvent {event.event_id[-4:]}')
return # No valid focus, skip
self.logger.debug(
f'[DownloadsWatchdog] About to call get_current_page_url(), target_id={cdp_session.target_id[-4:] if cdp_session.target_id else "None"}'
)
url = await self.browser_session.get_current_page_url()
self.logger.debug(f'[DownloadsWatchdog] Got URL: {url[:80] if url else "None"}')
if not url:
self.logger.warning(f'[DownloadsWatchdog] No URL found for BrowserStateRequestEvent {event.event_id[-4:]}')
return
target_id = cdp_session.target_id
self.logger.debug(f'[DownloadsWatchdog] About to dispatch NavigationCompleteEvent for target {target_id[-4:]}')
self.event_bus.dispatch(
NavigationCompleteEvent(
event_type='NavigationCompleteEvent',
url=url,
target_id=target_id,
event_parent_id=event.event_id,
)
)
self.logger.debug('[DownloadsWatchdog] Successfully completed BrowserStateRequestEvent')
async def on_BrowserStoppedEvent(self, event: BrowserStoppedEvent) -> None:
"""Clean up when browser stops."""
# Cancel all CDP event handler tasks
for task in list(self._cdp_event_tasks):
if not task.done():
task.cancel()
# Wait for all tasks to complete cancellation
if self._cdp_event_tasks:
await asyncio.gather(*self._cdp_event_tasks, return_exceptions=True)
self._cdp_event_tasks.clear()
# Clean up CDP session
# CDP sessions are now cached and managed by BrowserSession
self._download_cdp_session = None
self._download_cdp_session_setup = False
# Clear other state
self._sessions_with_listeners.clear()
self._active_downloads.clear()
self._pdf_viewer_cache.clear()
self._session_pdf_urls.clear()
self._network_monitored_targets.clear()
self._detected_downloads.clear()
self._network_callback_registered = False
async def on_NavigationCompleteEvent(self, event: NavigationCompleteEvent) -> None:
"""Check for PDFs after navigation completes."""
self.logger.debug(f'[DownloadsWatchdog] NavigationCompleteEvent received for {event.url}, tab #{event.target_id[-4:]}')
# Clear PDF cache for the navigated URL since content may have changed
if event.url in self._pdf_viewer_cache:
del self._pdf_viewer_cache[event.url]
# Check if auto-download is enabled
auto_download_enabled = self._is_auto_download_enabled()
if not auto_download_enabled:
return
# Note: Using network-based PDF detection that doesn't require JavaScript
target_id = event.target_id
self.logger.debug(f'[DownloadsWatchdog] Got target_id={target_id} for tab #{event.target_id[-4:]}')
is_pdf = await self.check_for_pdf_viewer(target_id)
if is_pdf:
self.logger.debug(f'[DownloadsWatchdog] 📄 PDF detected at {event.url}, triggering auto-download...')
download_path = await self.trigger_pdf_download(target_id)
if not download_path:
self.logger.warning(f'[DownloadsWatchdog] ⚠️ PDF download failed for {event.url}')
def _is_auto_download_enabled(self) -> bool:
"""Check if auto-download PDFs is enabled in browser profile."""
return self.browser_session.browser_profile.auto_download_pdfs
async def attach_to_target(self, target_id: TargetID) -> None:
"""Set up download monitoring for a specific target."""
# Define CDP event handlers outside of try to avoid indentation/scope issues
def download_will_begin_handler(event: DownloadWillBeginEvent, session_id: SessionID | None) -> None:
self.logger.debug(f'[DownloadsWatchdog] Download will begin: {event}')
# Cache info for later completion event handling (esp. remote browsers)
guid = event.get('guid', '')
try:
suggested_filename = event.get('suggestedFilename')
assert suggested_filename, 'CDP DownloadWillBegin missing suggestedFilename'
self._cdp_downloads_info[guid] = {
'url': event.get('url', ''),
'suggested_filename': suggested_filename,
'handled': False,
}
except (AssertionError, KeyError):
pass
# Create and track the task
task = create_task_with_error_handling(
self._handle_cdp_download(event, target_id, session_id),
name='handle_cdp_download',
logger_instance=self.logger,
suppress_exceptions=True,
)
self._cdp_event_tasks.add(task)
# Remove from set when done
task.add_done_callback(lambda t: self._cdp_event_tasks.discard(t))
def download_progress_handler(event: DownloadProgressEvent, session_id: SessionID | None) -> None:
# Check if download is complete
if event.get('state') == 'completed':
file_path = event.get('filePath')
guid = event.get('guid', '')
if self.browser_session.is_local:
if file_path:
self.logger.debug(f'[DownloadsWatchdog] Download completed: {file_path}')
# Track the download
self._track_download(file_path)
# Mark as handled to prevent fallback duplicate dispatch
try:
if guid in self._cdp_downloads_info:
self._cdp_downloads_info[guid]['handled'] = True
except (KeyError, AttributeError):
pass
else:
# No local file path provided, local polling in _handle_cdp_download will handle it
self.logger.debug(
'[DownloadsWatchdog] No filePath in progress event (local); polling will handle detection'
)
else:
# Remote browser: do not touch local filesystem. Fallback to downloadPath+suggestedFilename
info = self._cdp_downloads_info.get(guid, {})
try:
suggested_filename = info.get('suggested_filename') or (Path(file_path).name if file_path else 'download')
downloads_path = str(self.browser_session.browser_profile.downloads_path or '')
effective_path = file_path or str(Path(downloads_path) / suggested_filename)
file_name = Path(effective_path).name
file_ext = Path(file_name).suffix.lower().lstrip('.')
self.event_bus.dispatch(
FileDownloadedEvent(
url=info.get('url', ''),
path=str(effective_path),
file_name=file_name,
file_size=0,
file_type=file_ext if file_ext else None,
)
)
self.logger.debug(f'[DownloadsWatchdog] ✅ (remote) Download completed: {effective_path}')
finally:
if guid in self._cdp_downloads_info:
del self._cdp_downloads_info[guid]
try:
downloads_path_raw = self.browser_session.browser_profile.downloads_path
if not downloads_path_raw:
# logger.info(f'[DownloadsWatchdog] No downloads path configured, skipping target: {target_id}')
return # No downloads path configured
# Check if we already have a download listener on this session
# to prevent duplicate listeners from being added
# Note: Since download listeners are set up once per browser session, not per target,
# we just track if we've set up the browser-level listener
if self._download_cdp_session_setup:
self.logger.debug('[DownloadsWatchdog] Download listener already set up for browser session')
return
# logger.debug(f'[DownloadsWatchdog] Setting up CDP download listener for target: {target_id}')
# Use CDP session for download events but store reference in watchdog
if not self._download_cdp_session_setup:
# Set up CDP session for downloads (only once per browser session)
cdp_client = self.browser_session.cdp_client
# Set download behavior to allow downloads and enable events
downloads_path = self.browser_session.browser_profile.downloads_path
if not downloads_path:
self.logger.warning('[DownloadsWatchdog] No downloads path configured, skipping CDP download setup')
return
# Ensure path is properly expanded (~ -> absolute path)
expanded_downloads_path = Path(downloads_path).expanduser().resolve()
await cdp_client.send.Browser.setDownloadBehavior(
params={
'behavior': 'allow',
'downloadPath': str(expanded_downloads_path), # Use expanded absolute path
'eventsEnabled': True,
}
)
# Register the handlers with CDP
cdp_client.register.Browser.downloadWillBegin(download_will_begin_handler) # type: ignore[arg-type]
cdp_client.register.Browser.downloadProgress(download_progress_handler) # type: ignore[arg-type]
self._download_cdp_session_setup = True
self.logger.debug('[DownloadsWatchdog] Set up CDP download listeners')
# No need to track individual targets since download listener is browser-level
# logger.debug(f'[DownloadsWatchdog] Successfully set up CDP download listener for target: {target_id}')
except Exception as e:
self.logger.warning(f'[DownloadsWatchdog] Failed to set up CDP download listener for target {target_id}: {e}')
# Set up network monitoring for this target (catches ALL download variants)
await self._setup_network_monitoring(target_id)
async def _setup_network_monitoring(self, target_id: TargetID) -> None:
"""Set up network monitoring to detect PDFs and downloads from ALL sources.
This catches:
- Direct PDF navigation
- PDFs in iframes
- PDFs with embed/object tags
- JavaScript-triggered downloads
- Any Content-Disposition: attachment headers
"""
# Skip if already monitoring this target
if target_id in self._network_monitored_targets:
self.logger.debug(f'[DownloadsWatchdog] Network monitoring already enabled for target {target_id[-4:]}')
return
# Check if auto-download is enabled
if not self._is_auto_download_enabled():
self.logger.debug('[DownloadsWatchdog] Auto-download disabled, skipping network monitoring')
return
try:
cdp_client = self.browser_session.cdp_client
# Register the global callback once
if not self._network_callback_registered:
def on_response_received(event: ResponseReceivedEvent, session_id: str | None) -> None:
"""Handle Network.responseReceived event to detect downloadable content.
This callback is registered globally and uses session_id to determine the correct target.
"""
try:
# Check if session_manager exists (may be None during browser shutdown)
if not self.browser_session.session_manager:
self.logger.warning('[DownloadsWatchdog] Session manager not found, skipping network monitoring')
return
# Look up target_id from session_id
event_target_id = self.browser_session.session_manager.get_target_id_from_session_id(session_id)
if not event_target_id:
# Session not in pool - might be a stale session or not yet tracked
return
# Only process events for targets we're monitoring
if event_target_id not in self._network_monitored_targets:
return
response = event.get('response', {})
url = response.get('url', '')
content_type = response.get('mimeType', '').lower()
headers = response.get('headers', {})
# Skip non-HTTP URLs (data:, about:, chrome-extension:, etc.)
if not url.startswith('http'):
return
# Check if it's a PDF
is_pdf = 'application/pdf' in content_type
# Check if it's marked as download via Content-Disposition header
content_disposition = headers.get('content-disposition', '').lower()
is_download_attachment = 'attachment' in content_disposition
# Filter out image/video/audio files even if marked as attachment
# These are likely resources, not intentional downloads
unwanted_content_types = [
'image/',
'video/',
'audio/',
'text/css',
'text/javascript',
'application/javascript',
'application/x-javascript',
'text/html',
'application/json',
'font/',
'application/font',
'application/x-font',
]
is_unwanted_type = any(content_type.startswith(prefix) for prefix in unwanted_content_types)
if is_unwanted_type:
return
# Check URL extension to filter out obvious images/resources
url_lower = url.lower().split('?')[0] # Remove query params
unwanted_extensions = [
'.jpg',
'.jpeg',
'.png',
'.gif',
'.webp',
'.svg',
'.ico',
'.css',
'.js',
'.woff',
'.woff2',
'.ttf',
'.eot',
'.mp4',
'.webm',
'.mp3',
'.wav',
'.ogg',
]
if any(url_lower.endswith(ext) for ext in unwanted_extensions):
return
# Only process if it's a PDF or download
if not (is_pdf or is_download_attachment):
return
# Check if we've already processed this URL in this session
if url in self._detected_downloads:
self.logger.debug(f'[DownloadsWatchdog] Already detected download: {url[:80]}...')
return
# Mark as detected to avoid duplicates
self._detected_downloads.add(url)
# Extract filename from Content-Disposition if available
suggested_filename = None
if 'filename=' in content_disposition:
# Parse filename from Content-Disposition header
import re
filename_match = re.search(r'filename[^;=\n]*=(([\'"]).*?\2|[^;\n]*)', content_disposition)
if filename_match:
suggested_filename = filename_match.group(1).strip('\'"')
self.logger.info(f'[DownloadsWatchdog] 🔍 Detected downloadable content via network: {url[:80]}...')
self.logger.debug(
f'[DownloadsWatchdog] Content-Type: {content_type}, Is PDF: {is_pdf}, Is Attachment: {is_download_attachment}'
)
# Trigger download asynchronously in background (don't block event handler)
async def download_in_background():
try:
download_path = await self.download_file_from_url(
url=url,
target_id=event_target_id, # Use target_id from session_id lookup
content_type=content_type,
suggested_filename=suggested_filename,
)
if download_path:
self.logger.info(f'[DownloadsWatchdog] ✅ Successfully downloaded: {download_path}')
else:
self.logger.warning(f'[DownloadsWatchdog] ⚠️ Failed to download: {url[:80]}...')
except Exception as e:
self.logger.error(f'[DownloadsWatchdog] Error downloading in background: {type(e).__name__}: {e}')
# Create background task
task = create_task_with_error_handling(
download_in_background(),
name='download_in_background',
logger_instance=self.logger,
suppress_exceptions=True,
)
self._cdp_event_tasks.add(task)
task.add_done_callback(lambda t: self._cdp_event_tasks.discard(t))
except Exception as e:
self.logger.error(f'[DownloadsWatchdog] Error in network response handler: {type(e).__name__}: {e}')
# Register the callback globally (once)
cdp_client.register.Network.responseReceived(on_response_received)
self._network_callback_registered = True
self.logger.debug('[DownloadsWatchdog] ✅ Registered global network response callback')
# Get or create CDP session for this target
cdp_session = await self.browser_session.get_or_create_cdp_session(target_id, focus=False)
# Enable Network domain to monitor HTTP responses (per-target/per-session)
await cdp_client.send.Network.enable(session_id=cdp_session.session_id)
self.logger.debug(f'[DownloadsWatchdog] Enabled Network domain for target {target_id[-4:]}')
# Mark this target as monitored
self._network_monitored_targets.add(target_id)
self.logger.debug(f'[DownloadsWatchdog] ✅ Network monitoring enabled for target {target_id[-4:]}')
except Exception as e:
self.logger.warning(f'[DownloadsWatchdog] Failed to set up network monitoring for target {target_id}: {e}')
async def download_file_from_url(
self, url: str, target_id: TargetID, content_type: str | None = None, suggested_filename: str | None = None
) -> str | None:
"""Generic method to download any file from a URL.
Args:
url: The URL to download
target_id: The target ID for CDP session
content_type: Optional content type (e.g., 'application/pdf')
suggested_filename: Optional filename from Content-Disposition header
Returns:
Path to downloaded file, or None if download failed
"""
if not self.browser_session.browser_profile.downloads_path:
self.logger.warning('[DownloadsWatchdog] No downloads path configured')
return None
# Check if already downloaded in this session
if url in self._session_pdf_urls:
existing_path = self._session_pdf_urls[url]
self.logger.debug(f'[DownloadsWatchdog] File already downloaded in session: {existing_path}')
return existing_path
try:
# Get or create CDP session for this target
temp_session = await self.browser_session.get_or_create_cdp_session(target_id, focus=False)
# Determine filename
if suggested_filename:
filename = suggested_filename
else:
# Extract from URL
filename = os.path.basename(url.split('?')[0]) # Remove query params
if not filename or '.' not in filename:
# Fallback: use content type to determine extension
if content_type and 'pdf' in content_type:
filename = 'document.pdf'
else:
filename = 'download'
# Ensure downloads directory exists
downloads_dir = str(self.browser_session.browser_profile.downloads_path)
os.makedirs(downloads_dir, exist_ok=True)
# Generate unique filename if file exists
final_filename = filename
existing_files = os.listdir(downloads_dir)
if filename in existing_files:
base, ext = os.path.splitext(filename)
counter = 1
while f'{base} ({counter}){ext}' in existing_files:
counter += 1
final_filename = f'{base} ({counter}){ext}'
self.logger.debug(f'[DownloadsWatchdog] File exists, using: {final_filename}')
self.logger.debug(f'[DownloadsWatchdog] Downloading from: {url[:100]}...')
# Download using JavaScript fetch to leverage browser cache
escaped_url = json.dumps(url)
result = await asyncio.wait_for(
temp_session.cdp_client.send.Runtime.evaluate(
params={
'expression': f"""
(async () => {{
try {{
const response = await fetch({escaped_url}, {{
cache: 'force-cache'
}});
if (!response.ok) {{
throw new Error(`HTTP error! status: ${{response.status}}`);
}}
const blob = await response.blob();
const arrayBuffer = await blob.arrayBuffer();
const uint8Array = new Uint8Array(arrayBuffer);
return {{
data: Array.from(uint8Array),
responseSize: uint8Array.length
}};
}} catch (error) {{
throw new Error(`Fetch failed: ${{error.message}}`);
}}
}})()
""",
'awaitPromise': True,
'returnByValue': True,
},
session_id=temp_session.session_id,
),
timeout=15.0, # 15 second timeout
)
download_result = result.get('result', {}).get('value', {})
if download_result and download_result.get('data') and len(download_result['data']) > 0:
download_path = os.path.join(downloads_dir, final_filename)
# Save the file asynchronously
async with await anyio.open_file(download_path, 'wb') as f:
await f.write(bytes(download_result['data']))
# Verify file was written successfully
if os.path.exists(download_path):
actual_size = os.path.getsize(download_path)
self.logger.debug(f'[DownloadsWatchdog] File written: {download_path} ({actual_size} bytes)')
# Determine file type
file_ext = Path(final_filename).suffix.lower().lstrip('.')
mime_type = content_type or f'application/{file_ext}'
# Store URL->path mapping for this session
self._session_pdf_urls[url] = download_path
# Emit file downloaded event
self.logger.debug(f'[DownloadsWatchdog] Dispatching FileDownloadedEvent for {final_filename}')
self.event_bus.dispatch(
FileDownloadedEvent(
url=url,
path=download_path,
file_name=final_filename,
file_size=actual_size,
file_type=file_ext if file_ext else None,
mime_type=mime_type,
auto_download=True,
)
)
return download_path
else:
self.logger.error(f'[DownloadsWatchdog] Failed to write file: {download_path}')
return None
else:
self.logger.warning(f'[DownloadsWatchdog] No data received when downloading from {url}')
return None
except TimeoutError:
self.logger.warning(f'[DownloadsWatchdog] Download timed out: {url[:80]}...')
return None
except Exception as e:
self.logger.warning(f'[DownloadsWatchdog] Download failed: {type(e).__name__}: {e}')
return None
def _track_download(self, file_path: str) -> None:
"""Track a completed download and dispatch the appropriate event.
Args:
file_path: The path to the downloaded file
"""
try:
# Get file info
path = Path(file_path)
if path.exists():
file_size = path.stat().st_size
self.logger.debug(f'[DownloadsWatchdog] Tracked download: {path.name} ({file_size} bytes)')
# Dispatch download event
from browser_use.browser.events import FileDownloadedEvent
self.event_bus.dispatch(
FileDownloadedEvent(
url=str(path), # Use the file path as URL for local files
path=str(path),
file_name=path.name,
file_size=file_size,
)
)
else:
self.logger.warning(f'[DownloadsWatchdog] Downloaded file not found: {file_path}')
except Exception as e:
self.logger.error(f'[DownloadsWatchdog] Error tracking download: {e}')
async def _handle_cdp_download(
self, event: DownloadWillBeginEvent, target_id: TargetID, session_id: SessionID | None
) -> None:
"""Handle a CDP Page.downloadWillBegin event."""
downloads_dir = (
Path(
self.browser_session.browser_profile.downloads_path
or f'{tempfile.gettempdir()}/browser_use_downloads.{str(self.browser_session.id)[-4:]}'
)
.expanduser()
.resolve()
) # Ensure path is properly expanded
# Initialize variables that may be used outside try blocks
unique_filename = None
file_size = 0
expected_path = None
download_result = None
download_url = event.get('url', '')
suggested_filename = event.get('suggestedFilename', 'download')
guid = event.get('guid', '')
try:
self.logger.debug(f'[DownloadsWatchdog] ⬇️ File download starting: {suggested_filename} from {download_url[:100]}...')
self.logger.debug(f'[DownloadsWatchdog] Full CDP event: {event}')
# Since Browser.setDownloadBehavior is already configured, the browser will download the file
# We just need to wait for it to appear in the downloads directory
expected_path = downloads_dir / suggested_filename
# Debug: List current directory contents
self.logger.debug(f'[DownloadsWatchdog] Downloads directory: {downloads_dir}')
if downloads_dir.exists():
files_before = list(downloads_dir.iterdir())
self.logger.debug(f'[DownloadsWatchdog] Files before download: {[f.name for f in files_before]}')
# Try manual JavaScript fetch as a fallback for local browsers (disabled for regular local downloads)
if self.browser_session.is_local and self._use_js_fetch_for_local:
self.logger.debug(f'[DownloadsWatchdog] Attempting JS fetch fallback for {download_url}')
unique_filename = None
file_size = None
download_result = None
try:
# Escape the URL for JavaScript
import json
escaped_url = json.dumps(download_url)
# Get the proper session for the frame that initiated the download
cdp_session = await self.browser_session.cdp_client_for_frame(event.get('frameId'))
assert cdp_session
result = await cdp_session.cdp_client.send.Runtime.evaluate(
params={
'expression': f"""
(async () => {{
try {{
const response = await fetch({escaped_url});
if (!response.ok) {{
throw new Error(`HTTP error! status: ${{response.status}}`);
}}
const blob = await response.blob();
const arrayBuffer = await blob.arrayBuffer();
const uint8Array = new Uint8Array(arrayBuffer);
return {{
data: Array.from(uint8Array),
size: uint8Array.length,
contentType: response.headers.get('content-type') || 'application/octet-stream'
}};
}} catch (error) {{
throw new Error(`Fetch failed: ${{error.message}}`);
}}
}})()
""",
'awaitPromise': True,
'returnByValue': True,
},
session_id=cdp_session.session_id,
)
download_result = result.get('result', {}).get('value')
if download_result and download_result.get('data'):
# Save the file
file_data = bytes(download_result['data'])
file_size = len(file_data)
# Ensure unique filename
unique_filename = await self._get_unique_filename(str(downloads_dir), suggested_filename)
final_path = downloads_dir / unique_filename
# Write the file
import anyio
async with await anyio.open_file(final_path, 'wb') as f:
await f.write(file_data)
self.logger.debug(f'[DownloadsWatchdog] ✅ Downloaded and saved file: {final_path} ({file_size} bytes)')
expected_path = final_path
# Emit download event immediately
file_ext = expected_path.suffix.lower().lstrip('.')
file_type = file_ext if file_ext else None
self.event_bus.dispatch(
FileDownloadedEvent(
url=download_url,
path=str(expected_path),
file_name=unique_filename or expected_path.name,
file_size=file_size or 0,
file_type=file_type,
mime_type=(download_result.get('contentType') if download_result else None),
from_cache=False,
auto_download=False,
)
)
# Mark as handled to prevent duplicate dispatch from progress/polling paths
try:
if guid in self._cdp_downloads_info:
self._cdp_downloads_info[guid]['handled'] = True
except (KeyError, AttributeError):
pass
self.logger.debug(
f'[DownloadsWatchdog] ✅ File download completed via CDP: {suggested_filename} ({file_size} bytes) saved to {expected_path}'
)
return
else:
self.logger.error('[DownloadsWatchdog] ❌ No data received from fetch')
except Exception as fetch_error:
self.logger.error(f'[DownloadsWatchdog] ❌ Failed to download file via fetch: {fetch_error}')
# For remote browsers, don't poll local filesystem; downloadProgress handler will emit the event
if not self.browser_session.is_local:
return
except Exception as e:
self.logger.error(f'[DownloadsWatchdog] ❌ Error handling CDP download: {type(e).__name__} {e}')
# If we reach here, the fetch method failed, so wait for native download
# Poll the downloads directory for new files
self.logger.debug(f'[DownloadsWatchdog] Checking if browser auto-download saved the file for us: {suggested_filename}')
# Get initial list of files in downloads directory
initial_files = set()
if Path(downloads_dir).exists():
for f in Path(downloads_dir).iterdir():
if f.is_file() and not f.name.startswith('.'):
initial_files.add(f.name)
# Poll for new files
max_wait = 20 # seconds
start_time = asyncio.get_event_loop().time()
while asyncio.get_event_loop().time() - start_time < max_wait:
await asyncio.sleep(5.0) # Check every 5 seconds
if Path(downloads_dir).exists():
for file_path in Path(downloads_dir).iterdir():
# Skip hidden files and files that were already there
if file_path.is_file() and not file_path.name.startswith('.') and file_path.name not in initial_files:
# Check if file has content (> 4 bytes)
try:
file_size = file_path.stat().st_size
if file_size > 4:
# Found a new download!
self.logger.debug(
f'[DownloadsWatchdog] ✅ Found downloaded file: {file_path} ({file_size} bytes)'
)
# Determine file type from extension
file_ext = file_path.suffix.lower().lstrip('.')
file_type = file_ext if file_ext else None
# Dispatch download event
# Skip if already handled by progress/JS fetch
info = self._cdp_downloads_info.get(guid, {})
if info.get('handled'):
return
self.event_bus.dispatch(
FileDownloadedEvent(
url=download_url,
path=str(file_path),
file_name=file_path.name,
file_size=file_size,
file_type=file_type,
)
)
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | true |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/browser/watchdogs/permissions_watchdog.py | browser_use/browser/watchdogs/permissions_watchdog.py | """Permissions watchdog for granting browser permissions on connection."""
from typing import TYPE_CHECKING, ClassVar
from bubus import BaseEvent
from browser_use.browser.events import BrowserConnectedEvent
from browser_use.browser.watchdog_base import BaseWatchdog
if TYPE_CHECKING:
pass
class PermissionsWatchdog(BaseWatchdog):
"""Grants browser permissions when browser connects."""
# Event contracts
LISTENS_TO: ClassVar[list[type[BaseEvent]]] = [
BrowserConnectedEvent,
]
EMITS: ClassVar[list[type[BaseEvent]]] = []
async def on_BrowserConnectedEvent(self, event: BrowserConnectedEvent) -> None:
"""Grant permissions when browser connects."""
permissions = self.browser_session.browser_profile.permissions
if not permissions:
self.logger.debug('No permissions to grant')
return
self.logger.debug(f'🔓 Granting browser permissions: {permissions}')
try:
# Grant permissions using CDP Browser.grantPermissions
# origin=None means grant to all origins
# Browser domain commands don't use session_id
await self.browser_session.cdp_client.send.Browser.grantPermissions(
params={'permissions': permissions} # type: ignore
)
self.logger.debug(f'✅ Successfully granted permissions: {permissions}')
except Exception as e:
self.logger.error(f'❌ Failed to grant permissions: {str(e)}')
# Don't raise - permissions are not critical to browser operation
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/browser/watchdogs/screenshot_watchdog.py | browser_use/browser/watchdogs/screenshot_watchdog.py | """Screenshot watchdog for handling screenshot requests using CDP."""
from typing import TYPE_CHECKING, Any, ClassVar
from bubus import BaseEvent
from cdp_use.cdp.page import CaptureScreenshotParameters
from browser_use.browser.events import ScreenshotEvent
from browser_use.browser.views import BrowserError
from browser_use.browser.watchdog_base import BaseWatchdog
from browser_use.observability import observe_debug
if TYPE_CHECKING:
pass
class ScreenshotWatchdog(BaseWatchdog):
"""Handles screenshot requests using CDP."""
# Events this watchdog listens to
LISTENS_TO: ClassVar[list[type[BaseEvent[Any]]]] = [ScreenshotEvent]
# Events this watchdog emits
EMITS: ClassVar[list[type[BaseEvent[Any]]]] = []
@observe_debug(ignore_input=True, ignore_output=True, name='screenshot_event_handler')
async def on_ScreenshotEvent(self, event: ScreenshotEvent) -> str:
"""Handle screenshot request using CDP.
Args:
event: ScreenshotEvent with optional full_page and clip parameters
Returns:
Dict with 'screenshot' key containing base64-encoded screenshot or None
"""
self.logger.debug('[ScreenshotWatchdog] Handler START - on_ScreenshotEvent called')
try:
# Validate focused target is a top-level page (not iframe/worker)
# CDP Page.captureScreenshot only works on page/tab targets
focused_target = self.browser_session.get_focused_target()
if focused_target and focused_target.target_type in ('page', 'tab'):
target_id = focused_target.target_id
else:
# Focused target is iframe/worker/missing - fall back to any page target
target_type_str = focused_target.target_type if focused_target else 'None'
self.logger.warning(f'[ScreenshotWatchdog] Focused target is {target_type_str}, falling back to page target')
page_targets = self.browser_session.get_page_targets()
if not page_targets:
raise BrowserError('[ScreenshotWatchdog] No page targets available for screenshot')
target_id = page_targets[-1].target_id
cdp_session = await self.browser_session.get_or_create_cdp_session(target_id, focus=True)
# Prepare screenshot parameters
params = CaptureScreenshotParameters(format='png', captureBeyondViewport=False)
# Take screenshot using CDP
self.logger.debug(f'[ScreenshotWatchdog] Taking screenshot with params: {params}')
result = await cdp_session.cdp_client.send.Page.captureScreenshot(params=params, session_id=cdp_session.session_id)
# Return base64-encoded screenshot data
if result and 'data' in result:
self.logger.debug('[ScreenshotWatchdog] Screenshot captured successfully')
return result['data']
raise BrowserError('[ScreenshotWatchdog] Screenshot result missing data')
except Exception as e:
self.logger.error(f'[ScreenshotWatchdog] Screenshot failed: {e}')
raise
finally:
# Try to remove highlights even on failure
try:
await self.browser_session.remove_highlights()
except Exception:
pass
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/browser/watchdogs/aboutblank_watchdog.py | browser_use/browser/watchdogs/aboutblank_watchdog.py | """About:blank watchdog for managing about:blank tabs with DVD screensaver."""
from typing import TYPE_CHECKING, ClassVar
from bubus import BaseEvent
from cdp_use.cdp.target import TargetID
from pydantic import PrivateAttr
from browser_use.browser.events import (
AboutBlankDVDScreensaverShownEvent,
BrowserStopEvent,
BrowserStoppedEvent,
CloseTabEvent,
NavigateToUrlEvent,
TabClosedEvent,
TabCreatedEvent,
)
from browser_use.browser.watchdog_base import BaseWatchdog
if TYPE_CHECKING:
pass
class AboutBlankWatchdog(BaseWatchdog):
"""Ensures there's always exactly one about:blank tab with DVD screensaver."""
# Event contracts
LISTENS_TO: ClassVar[list[type[BaseEvent]]] = [
BrowserStopEvent,
BrowserStoppedEvent,
TabCreatedEvent,
TabClosedEvent,
]
EMITS: ClassVar[list[type[BaseEvent]]] = [
NavigateToUrlEvent,
CloseTabEvent,
AboutBlankDVDScreensaverShownEvent,
]
_stopping: bool = PrivateAttr(default=False)
async def on_BrowserStopEvent(self, event: BrowserStopEvent) -> None:
"""Handle browser stop request - stop creating new tabs."""
# logger.info('[AboutBlankWatchdog] Browser stop requested, stopping tab creation')
self._stopping = True
async def on_BrowserStoppedEvent(self, event: BrowserStoppedEvent) -> None:
"""Handle browser stopped event."""
# logger.info('[AboutBlankWatchdog] Browser stopped')
self._stopping = True
async def on_TabCreatedEvent(self, event: TabCreatedEvent) -> None:
"""Check tabs when a new tab is created."""
# logger.debug(f'[AboutBlankWatchdog] ➕ New tab created: {event.url}')
# If an about:blank tab was created, show DVD screensaver on all about:blank tabs
if event.url == 'about:blank':
await self._show_dvd_screensaver_on_about_blank_tabs()
async def on_TabClosedEvent(self, event: TabClosedEvent) -> None:
"""Check tabs when a tab is closed and proactively create about:blank if needed."""
# logger.debug('[AboutBlankWatchdog] Tab closing, checking if we need to create about:blank tab')
# Don't create new tabs if browser is shutting down
if self._stopping:
# logger.debug('[AboutBlankWatchdog] Browser is stopping, not creating new tabs')
return
# Check if we're about to close the last tab (event happens BEFORE tab closes)
# Use _cdp_get_all_pages for quick check without fetching titles
page_targets = await self.browser_session._cdp_get_all_pages()
if len(page_targets) < 1:
self.logger.debug(
'[AboutBlankWatchdog] Last tab closing, creating new about:blank tab to avoid closing entire browser'
)
# Create the animation tab since no tabs should remain
navigate_event = self.event_bus.dispatch(NavigateToUrlEvent(url='about:blank', new_tab=True))
await navigate_event
# Show DVD screensaver on the new tab
await self._show_dvd_screensaver_on_about_blank_tabs()
else:
# Multiple tabs exist, check after close
await self._check_and_ensure_about_blank_tab()
async def attach_to_target(self, target_id: TargetID) -> None:
"""AboutBlankWatchdog doesn't monitor individual targets."""
pass
async def _check_and_ensure_about_blank_tab(self) -> None:
"""Check current tabs and ensure exactly one about:blank tab with animation exists."""
try:
# For quick checks, just get page targets without titles to reduce noise
page_targets = await self.browser_session._cdp_get_all_pages()
# If no tabs exist at all, create one to keep browser alive
if len(page_targets) == 0:
# Only create a new tab if there are no tabs at all
self.logger.debug('[AboutBlankWatchdog] No tabs exist, creating new about:blank DVD screensaver tab')
navigate_event = self.event_bus.dispatch(NavigateToUrlEvent(url='about:blank', new_tab=True))
await navigate_event
# Show DVD screensaver on the new tab
await self._show_dvd_screensaver_on_about_blank_tabs()
# Otherwise there are tabs, don't create new ones to avoid interfering
except Exception as e:
self.logger.error(f'[AboutBlankWatchdog] Error ensuring about:blank tab: {e}')
async def _show_dvd_screensaver_on_about_blank_tabs(self) -> None:
"""Show DVD screensaver on all about:blank pages only."""
try:
# Get just the page targets without expensive title fetching
page_targets = await self.browser_session._cdp_get_all_pages()
browser_session_label = str(self.browser_session.id)[-4:]
for page_target in page_targets:
target_id = page_target['targetId']
url = page_target['url']
# Only target about:blank pages specifically
if url == 'about:blank':
await self._show_dvd_screensaver_loading_animation_cdp(target_id, browser_session_label)
except Exception as e:
self.logger.error(f'[AboutBlankWatchdog] Error showing DVD screensaver: {e}')
async def _show_dvd_screensaver_loading_animation_cdp(self, target_id: TargetID, browser_session_label: str) -> None:
"""
Injects a DVD screensaver-style bouncing logo loading animation overlay into the target using CDP.
This is used to visually indicate that the browser is setting up or waiting.
"""
try:
# Create temporary session for this target without switching focus
temp_session = await self.browser_session.get_or_create_cdp_session(target_id, focus=False)
# Inject the DVD screensaver script (from main branch with idempotency added)
script = f"""
(function(browser_session_label) {{
// Idempotency check
if (window.__dvdAnimationRunning) {{
return; // Already running, don't add another
}}
window.__dvdAnimationRunning = true;
// Ensure document.body exists before proceeding
if (!document.body) {{
// Try again after DOM is ready
window.__dvdAnimationRunning = false; // Reset flag to retry
if (document.readyState === 'loading') {{
document.addEventListener('DOMContentLoaded', () => arguments.callee(browser_session_label));
}}
return;
}}
const animated_title = `Starting agent ${{browser_session_label}}...`;
if (document.title === animated_title) {{
return; // already run on this tab, dont run again
}}
document.title = animated_title;
// Create the main overlay
const loadingOverlay = document.createElement('div');
loadingOverlay.id = 'pretty-loading-animation';
loadingOverlay.style.position = 'fixed';
loadingOverlay.style.top = '0';
loadingOverlay.style.left = '0';
loadingOverlay.style.width = '100vw';
loadingOverlay.style.height = '100vh';
loadingOverlay.style.background = '#000';
loadingOverlay.style.zIndex = '99999';
loadingOverlay.style.overflow = 'hidden';
// Create the image element
const img = document.createElement('img');
img.src = 'https://cf.browser-use.com/logo.svg';
img.alt = 'Browser-Use';
img.style.width = '200px';
img.style.height = 'auto';
img.style.position = 'absolute';
img.style.left = '0px';
img.style.top = '0px';
img.style.zIndex = '2';
img.style.opacity = '0.8';
loadingOverlay.appendChild(img);
document.body.appendChild(loadingOverlay);
// DVD screensaver bounce logic
let x = Math.random() * (window.innerWidth - 300);
let y = Math.random() * (window.innerHeight - 300);
let dx = 1.2 + Math.random() * 0.4; // px per frame
let dy = 1.2 + Math.random() * 0.4;
// Randomize direction
if (Math.random() > 0.5) dx = -dx;
if (Math.random() > 0.5) dy = -dy;
function animate() {{
const imgWidth = img.offsetWidth || 300;
const imgHeight = img.offsetHeight || 300;
x += dx;
y += dy;
if (x <= 0) {{
x = 0;
dx = Math.abs(dx);
}} else if (x + imgWidth >= window.innerWidth) {{
x = window.innerWidth - imgWidth;
dx = -Math.abs(dx);
}}
if (y <= 0) {{
y = 0;
dy = Math.abs(dy);
}} else if (y + imgHeight >= window.innerHeight) {{
y = window.innerHeight - imgHeight;
dy = -Math.abs(dy);
}}
img.style.left = `${{x}}px`;
img.style.top = `${{y}}px`;
requestAnimationFrame(animate);
}}
animate();
// Responsive: update bounds on resize
window.addEventListener('resize', () => {{
x = Math.min(x, window.innerWidth - img.offsetWidth);
y = Math.min(y, window.innerHeight - img.offsetHeight);
}});
// Add a little CSS for smoothness
const style = document.createElement('style');
style.textContent = `
#pretty-loading-animation {{
/*backdrop-filter: blur(2px) brightness(0.9);*/
}}
#pretty-loading-animation img {{
user-select: none;
pointer-events: none;
}}
`;
document.head.appendChild(style);
}})('{browser_session_label}');
"""
await temp_session.cdp_client.send.Runtime.evaluate(params={'expression': script}, session_id=temp_session.session_id)
# No need to detach - session is cached
# Dispatch event
self.event_bus.dispatch(AboutBlankDVDScreensaverShownEvent(target_id=target_id))
except Exception as e:
self.logger.error(f'[AboutBlankWatchdog] Error injecting DVD screensaver: {e}')
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/browser/watchdogs/local_browser_watchdog.py | browser_use/browser/watchdogs/local_browser_watchdog.py | """Local browser watchdog for managing browser subprocess lifecycle."""
import asyncio
import os
import shutil
import tempfile
from pathlib import Path
from typing import TYPE_CHECKING, Any, ClassVar
import psutil
from bubus import BaseEvent
from pydantic import PrivateAttr
from browser_use.browser.events import (
BrowserKillEvent,
BrowserLaunchEvent,
BrowserLaunchResult,
BrowserStopEvent,
)
from browser_use.browser.watchdog_base import BaseWatchdog
from browser_use.observability import observe_debug
if TYPE_CHECKING:
pass
class LocalBrowserWatchdog(BaseWatchdog):
"""Manages local browser subprocess lifecycle."""
# Events this watchdog listens to
LISTENS_TO: ClassVar[list[type[BaseEvent[Any]]]] = [
BrowserLaunchEvent,
BrowserKillEvent,
BrowserStopEvent,
]
# Events this watchdog emits
EMITS: ClassVar[list[type[BaseEvent[Any]]]] = []
# Private state for subprocess management
_subprocess: psutil.Process | None = PrivateAttr(default=None)
_owns_browser_resources: bool = PrivateAttr(default=True)
_temp_dirs_to_cleanup: list[Path] = PrivateAttr(default_factory=list)
_original_user_data_dir: str | None = PrivateAttr(default=None)
@observe_debug(ignore_input=True, ignore_output=True, name='browser_launch_event')
async def on_BrowserLaunchEvent(self, event: BrowserLaunchEvent) -> BrowserLaunchResult:
"""Launch a local browser process."""
try:
self.logger.debug('[LocalBrowserWatchdog] Received BrowserLaunchEvent, launching local browser...')
# self.logger.debug('[LocalBrowserWatchdog] Calling _launch_browser...')
process, cdp_url = await self._launch_browser()
self._subprocess = process
# self.logger.debug(f'[LocalBrowserWatchdog] _launch_browser returned: process={process}, cdp_url={cdp_url}')
return BrowserLaunchResult(cdp_url=cdp_url)
except Exception as e:
self.logger.error(f'[LocalBrowserWatchdog] Exception in on_BrowserLaunchEvent: {e}', exc_info=True)
raise
async def on_BrowserKillEvent(self, event: BrowserKillEvent) -> None:
"""Kill the local browser subprocess."""
self.logger.debug('[LocalBrowserWatchdog] Killing local browser process')
if self._subprocess:
await self._cleanup_process(self._subprocess)
self._subprocess = None
# Clean up temp directories if any were created
for temp_dir in self._temp_dirs_to_cleanup:
self._cleanup_temp_dir(temp_dir)
self._temp_dirs_to_cleanup.clear()
# Restore original user_data_dir if it was modified
if self._original_user_data_dir is not None:
self.browser_session.browser_profile.user_data_dir = self._original_user_data_dir
self._original_user_data_dir = None
self.logger.debug('[LocalBrowserWatchdog] Browser cleanup completed')
async def on_BrowserStopEvent(self, event: BrowserStopEvent) -> None:
"""Listen for BrowserStopEvent and dispatch BrowserKillEvent without awaiting it."""
if self.browser_session.is_local and self._subprocess:
self.logger.debug('[LocalBrowserWatchdog] BrowserStopEvent received, dispatching BrowserKillEvent')
# Dispatch BrowserKillEvent without awaiting so it gets processed after all BrowserStopEvent handlers
self.event_bus.dispatch(BrowserKillEvent())
@observe_debug(ignore_input=True, ignore_output=True, name='launch_browser_process')
async def _launch_browser(self, max_retries: int = 3) -> tuple[psutil.Process, str]:
"""Launch browser process and return (process, cdp_url).
Handles launch errors by falling back to temporary directories if needed.
Returns:
Tuple of (psutil.Process, cdp_url)
"""
# Keep track of original user_data_dir to restore if needed
profile = self.browser_session.browser_profile
self._original_user_data_dir = str(profile.user_data_dir) if profile.user_data_dir else None
self._temp_dirs_to_cleanup = []
for attempt in range(max_retries):
try:
# Get launch args from profile
launch_args = profile.get_args()
# Add debugging port
debug_port = self._find_free_port()
launch_args.extend(
[
f'--remote-debugging-port={debug_port}',
]
)
assert '--user-data-dir' in str(launch_args), (
'User data dir must be set somewhere in launch args to a non-default path, otherwise Chrome will not let us attach via CDP'
)
# Get browser executable
# Priority: custom executable > fallback paths > playwright subprocess
if profile.executable_path:
browser_path = profile.executable_path
self.logger.debug(f'[LocalBrowserWatchdog] 📦 Using custom local browser executable_path= {browser_path}')
else:
# self.logger.debug('[LocalBrowserWatchdog] 🔍 Looking for local browser binary path...')
# Try fallback paths first (system browsers preferred)
browser_path = self._find_installed_browser_path()
if not browser_path:
self.logger.error(
'[LocalBrowserWatchdog] ⚠️ No local browser binary found, installing browser using playwright subprocess...'
)
browser_path = await self._install_browser_with_playwright()
self.logger.debug(f'[LocalBrowserWatchdog] 📦 Found local browser installed at executable_path= {browser_path}')
if not browser_path:
raise RuntimeError('No local Chrome/Chromium install found, and failed to install with playwright')
# Launch browser subprocess directly
self.logger.debug(f'[LocalBrowserWatchdog] 🚀 Launching browser subprocess with {len(launch_args)} args...')
self.logger.debug(
f'[LocalBrowserWatchdog] 📂 user_data_dir={profile.user_data_dir}, profile_directory={profile.profile_directory}'
)
subprocess = await asyncio.create_subprocess_exec(
browser_path,
*launch_args,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
self.logger.debug(
f'[LocalBrowserWatchdog] 🎭 Browser running with browser_pid= {subprocess.pid} 🔗 listening on CDP port :{debug_port}'
)
# Convert to psutil.Process
process = psutil.Process(subprocess.pid)
# Wait for CDP to be ready and get the URL
cdp_url = await self._wait_for_cdp_url(debug_port)
# Success! Clean up only the temp dirs we created but didn't use
currently_used_dir = str(profile.user_data_dir)
unused_temp_dirs = [tmp_dir for tmp_dir in self._temp_dirs_to_cleanup if str(tmp_dir) != currently_used_dir]
for tmp_dir in unused_temp_dirs:
try:
shutil.rmtree(tmp_dir, ignore_errors=True)
except Exception:
pass
# Keep only the in-use directory for cleanup during browser kill
if currently_used_dir and 'browseruse-tmp-' in currently_used_dir:
self._temp_dirs_to_cleanup = [Path(currently_used_dir)]
else:
self._temp_dirs_to_cleanup = []
return process, cdp_url
except Exception as e:
error_str = str(e).lower()
# Check if this is a user_data_dir related error
if any(err in error_str for err in ['singletonlock', 'user data directory', 'cannot create', 'already in use']):
self.logger.warning(f'Browser launch failed (attempt {attempt + 1}/{max_retries}): {e}')
if attempt < max_retries - 1:
# Create a temporary directory for next attempt
tmp_dir = Path(tempfile.mkdtemp(prefix='browseruse-tmp-'))
self._temp_dirs_to_cleanup.append(tmp_dir)
# Update profile to use temp directory
profile.user_data_dir = str(tmp_dir)
self.logger.debug(f'Retrying with temporary user_data_dir: {tmp_dir}')
# Small delay before retry
await asyncio.sleep(0.5)
continue
# Not a recoverable error or last attempt failed
# Restore original user_data_dir before raising
if self._original_user_data_dir is not None:
profile.user_data_dir = self._original_user_data_dir
# Clean up any temp dirs we created
for tmp_dir in self._temp_dirs_to_cleanup:
try:
shutil.rmtree(tmp_dir, ignore_errors=True)
except Exception:
pass
raise
# Should not reach here, but just in case
if self._original_user_data_dir is not None:
profile.user_data_dir = self._original_user_data_dir
raise RuntimeError(f'Failed to launch browser after {max_retries} attempts')
@staticmethod
def _find_installed_browser_path() -> str | None:
"""Try to find browser executable from common fallback locations.
Prioritizes:
1. System Chrome Stable
1. Playwright chromium
2. Other system native browsers (Chromium -> Chrome Canary/Dev -> Brave)
3. Playwright headless-shell fallback
Returns:
Path to browser executable or None if not found
"""
import glob
import platform
from pathlib import Path
system = platform.system()
patterns = []
# Get playwright browsers path from environment variable if set
playwright_path = os.environ.get('PLAYWRIGHT_BROWSERS_PATH')
if system == 'Darwin': # macOS
if not playwright_path:
playwright_path = '~/Library/Caches/ms-playwright'
patterns = [
'/Applications/Google Chrome.app/Contents/MacOS/Google Chrome',
f'{playwright_path}/chromium-*/chrome-mac/Chromium.app/Contents/MacOS/Chromium',
'/Applications/Chromium.app/Contents/MacOS/Chromium',
'/Applications/Google Chrome Canary.app/Contents/MacOS/Google Chrome Canary',
'/Applications/Brave Browser.app/Contents/MacOS/Brave Browser',
f'{playwright_path}/chromium_headless_shell-*/chrome-mac/Chromium.app/Contents/MacOS/Chromium',
]
elif system == 'Linux':
if not playwright_path:
playwright_path = '~/.cache/ms-playwright'
patterns = [
'/usr/bin/google-chrome-stable',
'/usr/bin/google-chrome',
'/usr/local/bin/google-chrome',
f'{playwright_path}/chromium-*/chrome-linux*/chrome',
'/usr/bin/chromium',
'/usr/bin/chromium-browser',
'/usr/local/bin/chromium',
'/snap/bin/chromium',
'/usr/bin/google-chrome-beta',
'/usr/bin/google-chrome-dev',
'/usr/bin/brave-browser',
f'{playwright_path}/chromium_headless_shell-*/chrome-linux*/chrome',
]
elif system == 'Windows':
if not playwright_path:
playwright_path = r'%LOCALAPPDATA%\ms-playwright'
patterns = [
r'C:\Program Files\Google\Chrome\Application\chrome.exe',
r'C:\Program Files (x86)\Google\Chrome\Application\chrome.exe',
r'%LOCALAPPDATA%\Google\Chrome\Application\chrome.exe',
r'%PROGRAMFILES%\Google\Chrome\Application\chrome.exe',
r'%PROGRAMFILES(X86)%\Google\Chrome\Application\chrome.exe',
f'{playwright_path}\\chromium-*\\chrome-win\\chrome.exe',
r'C:\Program Files\Chromium\Application\chrome.exe',
r'C:\Program Files (x86)\Chromium\Application\chrome.exe',
r'%LOCALAPPDATA%\Chromium\Application\chrome.exe',
r'C:\Program Files\BraveSoftware\Brave-Browser\Application\brave.exe',
r'C:\Program Files (x86)\BraveSoftware\Brave-Browser\Application\brave.exe',
r'C:\Program Files (x86)\Microsoft\Edge\Application\msedge.exe',
r'C:\Program Files\Microsoft\Edge\Application\msedge.exe',
r'%LOCALAPPDATA%\Microsoft\Edge\Application\msedge.exe',
f'{playwright_path}\\chromium_headless_shell-*\\chrome-win\\chrome.exe',
]
for pattern in patterns:
# Expand user home directory
expanded_pattern = Path(pattern).expanduser()
# Handle Windows environment variables
if system == 'Windows':
pattern_str = str(expanded_pattern)
for env_var in ['%LOCALAPPDATA%', '%PROGRAMFILES%', '%PROGRAMFILES(X86)%']:
if env_var in pattern_str:
env_key = env_var.strip('%').replace('(X86)', ' (x86)')
env_value = os.environ.get(env_key, '')
if env_value:
pattern_str = pattern_str.replace(env_var, env_value)
expanded_pattern = Path(pattern_str)
# Convert to string for glob
pattern_str = str(expanded_pattern)
# Check if pattern contains wildcards
if '*' in pattern_str:
# Use glob to expand the pattern
matches = glob.glob(pattern_str)
if matches:
# Sort matches and take the last one (alphanumerically highest version)
matches.sort()
browser_path = matches[-1]
if Path(browser_path).exists() and Path(browser_path).is_file():
return browser_path
else:
# Direct path check
if expanded_pattern.exists() and expanded_pattern.is_file():
return str(expanded_pattern)
return None
async def _install_browser_with_playwright(self) -> str:
"""Get browser executable path from playwright in a subprocess to avoid thread issues."""
import platform
# Build command - only use --with-deps on Linux (it fails on Windows/macOS)
cmd = ['uvx', 'playwright', 'install', 'chrome']
if platform.system() == 'Linux':
cmd.append('--with-deps')
# Run in subprocess with timeout
process = await asyncio.create_subprocess_exec(
*cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
try:
stdout, stderr = await asyncio.wait_for(process.communicate(), timeout=60.0)
self.logger.debug(f'[LocalBrowserWatchdog] 📦 Playwright install output: {stdout}')
browser_path = self._find_installed_browser_path()
if browser_path:
return browser_path
self.logger.error(f'[LocalBrowserWatchdog] ❌ Playwright local browser installation error: \n{stdout}\n{stderr}')
raise RuntimeError('No local browser path found after: uvx playwright install chrome')
except TimeoutError:
# Kill the subprocess if it times out
process.kill()
await process.wait()
raise RuntimeError('Timeout getting browser path from playwright')
except Exception as e:
# Make sure subprocess is terminated
if process.returncode is None:
process.kill()
await process.wait()
raise RuntimeError(f'Error getting browser path: {e}')
@staticmethod
def _find_free_port() -> int:
"""Find a free port for the debugging interface."""
import socket
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(('127.0.0.1', 0))
s.listen(1)
port = s.getsockname()[1]
return port
@staticmethod
async def _wait_for_cdp_url(port: int, timeout: float = 30) -> str:
"""Wait for the browser to start and return the CDP URL."""
import aiohttp
start_time = asyncio.get_event_loop().time()
while asyncio.get_event_loop().time() - start_time < timeout:
try:
async with aiohttp.ClientSession() as session:
async with session.get(f'http://127.0.0.1:{port}/json/version') as resp:
if resp.status == 200:
# Chrome is ready
return f'http://127.0.0.1:{port}/'
else:
# Chrome is starting up and returning 502/500 errors
await asyncio.sleep(0.1)
except Exception:
# Connection error - Chrome might not be ready yet
await asyncio.sleep(0.1)
raise TimeoutError(f'Browser did not start within {timeout} seconds')
@staticmethod
async def _cleanup_process(process: psutil.Process) -> None:
"""Clean up browser process.
Args:
process: psutil.Process to terminate
"""
if not process:
return
try:
# Try graceful shutdown first
process.terminate()
# Use async wait instead of blocking wait
for _ in range(50): # Wait up to 5 seconds (50 * 0.1)
if not process.is_running():
return
await asyncio.sleep(0.1)
# If still running after 5 seconds, force kill
if process.is_running():
process.kill()
# Give it a moment to die
await asyncio.sleep(0.1)
except psutil.NoSuchProcess:
# Process already gone
pass
except Exception:
# Ignore any other errors during cleanup
pass
def _cleanup_temp_dir(self, temp_dir: Path | str) -> None:
"""Clean up temporary directory.
Args:
temp_dir: Path to temporary directory to remove
"""
if not temp_dir:
return
try:
temp_path = Path(temp_dir)
# Only remove if it's actually a temp directory we created
if 'browseruse-tmp-' in str(temp_path):
shutil.rmtree(temp_path, ignore_errors=True)
except Exception as e:
self.logger.debug(f'Failed to cleanup temp dir {temp_dir}: {e}')
@property
def browser_pid(self) -> int | None:
"""Get the browser process ID."""
if self._subprocess:
return self._subprocess.pid
return None
@staticmethod
async def get_browser_pid_via_cdp(browser) -> int | None:
"""Get the browser process ID via CDP SystemInfo.getProcessInfo.
Args:
browser: Playwright Browser instance
Returns:
Process ID or None if failed
"""
try:
cdp_session = await browser.new_browser_cdp_session()
result = await cdp_session.send('SystemInfo.getProcessInfo')
process_info = result.get('processInfo', {})
pid = process_info.get('id')
await cdp_session.detach()
return pid
except Exception:
# If we can't get PID via CDP, it's not critical
return None
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/browser/watchdogs/storage_state_watchdog.py | browser_use/browser/watchdogs/storage_state_watchdog.py | """Storage state watchdog for managing browser cookies and storage persistence."""
import asyncio
import json
import os
from pathlib import Path
from typing import Any, ClassVar
from bubus import BaseEvent
from cdp_use.cdp.network import Cookie
from pydantic import Field, PrivateAttr
from browser_use.browser.events import (
BrowserConnectedEvent,
BrowserStopEvent,
LoadStorageStateEvent,
SaveStorageStateEvent,
StorageStateLoadedEvent,
StorageStateSavedEvent,
)
from browser_use.browser.watchdog_base import BaseWatchdog
from browser_use.utils import create_task_with_error_handling
class StorageStateWatchdog(BaseWatchdog):
"""Monitors and persists browser storage state including cookies and localStorage."""
# Event contracts
LISTENS_TO: ClassVar[list[type[BaseEvent]]] = [
BrowserConnectedEvent,
BrowserStopEvent,
SaveStorageStateEvent,
LoadStorageStateEvent,
]
EMITS: ClassVar[list[type[BaseEvent]]] = [
StorageStateSavedEvent,
StorageStateLoadedEvent,
]
# Configuration
auto_save_interval: float = Field(default=30.0) # Auto-save every 30 seconds
save_on_change: bool = Field(default=True) # Save immediately when cookies change
# Private state
_monitoring_task: asyncio.Task | None = PrivateAttr(default=None)
_last_cookie_state: list[dict] = PrivateAttr(default_factory=list)
_save_lock: asyncio.Lock = PrivateAttr(default_factory=asyncio.Lock)
async def on_BrowserConnectedEvent(self, event: BrowserConnectedEvent) -> None:
"""Start monitoring when browser starts."""
self.logger.debug('[StorageStateWatchdog] 🍪 Initializing auth/cookies sync <-> with storage_state.json file')
# Start monitoring
await self._start_monitoring()
# Automatically load storage state after browser start
await self.event_bus.dispatch(LoadStorageStateEvent())
async def on_BrowserStopEvent(self, event: BrowserStopEvent) -> None:
"""Stop monitoring when browser stops."""
self.logger.debug('[StorageStateWatchdog] Stopping storage_state monitoring')
await self._stop_monitoring()
async def on_SaveStorageStateEvent(self, event: SaveStorageStateEvent) -> None:
"""Handle storage state save request."""
# Use provided path or fall back to profile default
path = event.path
if path is None:
# Use profile default path if available
if self.browser_session.browser_profile.storage_state:
path = str(self.browser_session.browser_profile.storage_state)
else:
path = None # Skip saving if no path available
await self._save_storage_state(path)
async def on_LoadStorageStateEvent(self, event: LoadStorageStateEvent) -> None:
"""Handle storage state load request."""
# Use provided path or fall back to profile default
path = event.path
if path is None:
# Use profile default path if available
if self.browser_session.browser_profile.storage_state:
path = str(self.browser_session.browser_profile.storage_state)
else:
path = None # Skip loading if no path available
await self._load_storage_state(path)
async def _start_monitoring(self) -> None:
"""Start the monitoring task."""
if self._monitoring_task and not self._monitoring_task.done():
return
assert self.browser_session.cdp_client is not None
self._monitoring_task = create_task_with_error_handling(
self._monitor_storage_changes(), name='monitor_storage_changes', logger_instance=self.logger, suppress_exceptions=True
)
# self.logger'[StorageStateWatchdog] Started storage monitoring task')
async def _stop_monitoring(self) -> None:
"""Stop the monitoring task."""
if self._monitoring_task and not self._monitoring_task.done():
self._monitoring_task.cancel()
try:
await self._monitoring_task
except asyncio.CancelledError:
pass
# self.logger.debug('[StorageStateWatchdog] Stopped storage monitoring task')
async def _check_for_cookie_changes_cdp(self, event: dict) -> None:
"""Check if a CDP network event indicates cookie changes.
This would be called by Network.responseReceivedExtraInfo events
if we set up CDP event listeners.
"""
try:
# Check for Set-Cookie headers in the response
headers = event.get('headers', {})
if 'set-cookie' in headers or 'Set-Cookie' in headers:
self.logger.debug('[StorageStateWatchdog] Cookie change detected via CDP')
# If save on change is enabled, trigger save immediately
if self.save_on_change:
await self._save_storage_state()
except Exception as e:
self.logger.warning(f'[StorageStateWatchdog] Error checking for cookie changes: {e}')
async def _monitor_storage_changes(self) -> None:
"""Periodically check for storage changes and auto-save."""
while True:
try:
await asyncio.sleep(self.auto_save_interval)
# Check if cookies have changed
if await self._have_cookies_changed():
self.logger.debug('[StorageStateWatchdog] Detected changes to sync with storage_state.json')
await self._save_storage_state()
except asyncio.CancelledError:
break
except Exception as e:
self.logger.error(f'[StorageStateWatchdog] Error in monitoring loop: {e}')
async def _have_cookies_changed(self) -> bool:
"""Check if cookies have changed since last save."""
if not self.browser_session.cdp_client:
return False
try:
# Get current cookies using CDP
current_cookies = await self.browser_session._cdp_get_cookies()
# Convert to comparable format, using .get() for optional fields
current_cookie_set = {
(c.get('name', ''), c.get('domain', ''), c.get('path', '')): c.get('value', '') for c in current_cookies
}
last_cookie_set = {
(c.get('name', ''), c.get('domain', ''), c.get('path', '')): c.get('value', '') for c in self._last_cookie_state
}
return current_cookie_set != last_cookie_set
except Exception as e:
self.logger.debug(f'[StorageStateWatchdog] Error comparing cookies: {e}')
return False
async def _save_storage_state(self, path: str | None = None) -> None:
"""Save browser storage state to file."""
async with self._save_lock:
# Check if CDP client is available
assert await self.browser_session.get_or_create_cdp_session(target_id=None)
save_path = path or self.browser_session.browser_profile.storage_state
if not save_path:
return
# Skip saving if the storage state is already a dict (indicates it was loaded from memory)
# We only save to file if it started as a file path
if isinstance(save_path, dict):
self.logger.debug('[StorageStateWatchdog] Storage state is already a dict, skipping file save')
return
try:
# Get current storage state using CDP
storage_state = await self.browser_session._cdp_get_storage_state()
# Update our last known state
self._last_cookie_state = storage_state.get('cookies', []).copy()
# Convert path to Path object
json_path = Path(save_path).expanduser().resolve()
json_path.parent.mkdir(parents=True, exist_ok=True)
# Merge with existing state if file exists
merged_state = storage_state
if json_path.exists():
try:
existing_state = json.loads(json_path.read_text())
merged_state = self._merge_storage_states(existing_state, dict(storage_state))
except Exception as e:
self.logger.error(f'[StorageStateWatchdog] Failed to merge with existing state: {e}')
# Write atomically
temp_path = json_path.with_suffix('.json.tmp')
temp_path.write_text(json.dumps(merged_state, indent=4))
# Backup existing file
if json_path.exists():
backup_path = json_path.with_suffix('.json.bak')
json_path.replace(backup_path)
# Move temp to final
temp_path.replace(json_path)
# Emit success event
self.event_bus.dispatch(
StorageStateSavedEvent(
path=str(json_path),
cookies_count=len(merged_state.get('cookies', [])),
origins_count=len(merged_state.get('origins', [])),
)
)
self.logger.debug(
f'[StorageStateWatchdog] Saved storage state to {json_path} '
f'({len(merged_state.get("cookies", []))} cookies, '
f'{len(merged_state.get("origins", []))} origins)'
)
except Exception as e:
self.logger.error(f'[StorageStateWatchdog] Failed to save storage state: {e}')
async def _load_storage_state(self, path: str | None = None) -> None:
"""Load browser storage state from file."""
if not self.browser_session.cdp_client:
self.logger.warning('[StorageStateWatchdog] No CDP client available for loading')
return
load_path = path or self.browser_session.browser_profile.storage_state
if not load_path or not os.path.exists(str(load_path)):
return
try:
# Read the storage state file asynchronously
import anyio
content = await anyio.Path(str(load_path)).read_text()
storage = json.loads(content)
# Apply cookies if present
if 'cookies' in storage and storage['cookies']:
await self.browser_session._cdp_set_cookies(storage['cookies'])
self._last_cookie_state = storage['cookies'].copy()
self.logger.debug(f'[StorageStateWatchdog] Added {len(storage["cookies"])} cookies from storage state')
# Apply origins (localStorage/sessionStorage) if present
if 'origins' in storage and storage['origins']:
for origin in storage['origins']:
if 'localStorage' in origin:
for item in origin['localStorage']:
script = f"""
window.localStorage.setItem({json.dumps(item['name'])}, {json.dumps(item['value'])});
"""
await self.browser_session._cdp_add_init_script(script)
if 'sessionStorage' in origin:
for item in origin['sessionStorage']:
script = f"""
window.sessionStorage.setItem({json.dumps(item['name'])}, {json.dumps(item['value'])});
"""
await self.browser_session._cdp_add_init_script(script)
self.logger.debug(
f'[StorageStateWatchdog] Applied localStorage/sessionStorage from {len(storage["origins"])} origins'
)
self.event_bus.dispatch(
StorageStateLoadedEvent(
path=str(load_path),
cookies_count=len(storage.get('cookies', [])),
origins_count=len(storage.get('origins', [])),
)
)
self.logger.debug(f'[StorageStateWatchdog] Loaded storage state from: {load_path}')
except Exception as e:
self.logger.error(f'[StorageStateWatchdog] Failed to load storage state: {e}')
@staticmethod
def _merge_storage_states(existing: dict[str, Any], new: dict[str, Any]) -> dict[str, Any]:
"""Merge two storage states, with new values taking precedence."""
merged = existing.copy()
# Merge cookies
existing_cookies = {(c['name'], c['domain'], c['path']): c for c in existing.get('cookies', [])}
for cookie in new.get('cookies', []):
key = (cookie['name'], cookie['domain'], cookie['path'])
existing_cookies[key] = cookie
merged['cookies'] = list(existing_cookies.values())
# Merge origins
existing_origins = {origin['origin']: origin for origin in existing.get('origins', [])}
for origin in new.get('origins', []):
existing_origins[origin['origin']] = origin
merged['origins'] = list(existing_origins.values())
return merged
async def get_current_cookies(self) -> list[dict[str, Any]]:
"""Get current cookies using CDP."""
if not self.browser_session.cdp_client:
return []
try:
cookies = await self.browser_session._cdp_get_cookies()
# Cookie is a TypedDict, cast to dict for compatibility
return [dict(cookie) for cookie in cookies]
except Exception as e:
self.logger.error(f'[StorageStateWatchdog] Failed to get cookies: {e}')
return []
async def add_cookies(self, cookies: list[dict[str, Any]]) -> None:
"""Add cookies using CDP."""
if not self.browser_session.cdp_client:
self.logger.warning('[StorageStateWatchdog] No CDP client available for adding cookies')
return
try:
# Convert dicts to Cookie objects
cookie_objects = [Cookie(**cookie_dict) if isinstance(cookie_dict, dict) else cookie_dict for cookie_dict in cookies]
# Set cookies using CDP
await self.browser_session._cdp_set_cookies(cookie_objects)
self.logger.debug(f'[StorageStateWatchdog] Added {len(cookies)} cookies')
except Exception as e:
self.logger.error(f'[StorageStateWatchdog] Failed to add cookies: {e}')
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/browser/watchdogs/security_watchdog.py | browser_use/browser/watchdogs/security_watchdog.py | """Security watchdog for enforcing URL access policies."""
from typing import TYPE_CHECKING, ClassVar
from bubus import BaseEvent
from browser_use.browser.events import (
BrowserErrorEvent,
NavigateToUrlEvent,
NavigationCompleteEvent,
TabCreatedEvent,
)
from browser_use.browser.watchdog_base import BaseWatchdog
if TYPE_CHECKING:
pass
# Track if we've shown the glob warning
_GLOB_WARNING_SHOWN = False
class SecurityWatchdog(BaseWatchdog):
"""Monitors and enforces security policies for URL access."""
# Event contracts
LISTENS_TO: ClassVar[list[type[BaseEvent]]] = [
NavigateToUrlEvent,
NavigationCompleteEvent,
TabCreatedEvent,
]
EMITS: ClassVar[list[type[BaseEvent]]] = [
BrowserErrorEvent,
]
async def on_NavigateToUrlEvent(self, event: NavigateToUrlEvent) -> None:
"""Check if navigation URL is allowed before navigation starts."""
# Security check BEFORE navigation
if not self._is_url_allowed(event.url):
self.logger.warning(f'⛔️ Blocking navigation to disallowed URL: {event.url}')
self.event_bus.dispatch(
BrowserErrorEvent(
error_type='NavigationBlocked',
message=f'Navigation blocked to disallowed URL: {event.url}',
details={'url': event.url, 'reason': 'not_in_allowed_domains'},
)
)
# Stop event propagation by raising exception
raise ValueError(f'Navigation to {event.url} blocked by security policy')
async def on_NavigationCompleteEvent(self, event: NavigationCompleteEvent) -> None:
"""Check if navigated URL is allowed (catches redirects to blocked domains)."""
# Check if the navigated URL is allowed (in case of redirects)
if not self._is_url_allowed(event.url):
self.logger.warning(f'⛔️ Navigation to non-allowed URL detected: {event.url}')
# Dispatch browser error
self.event_bus.dispatch(
BrowserErrorEvent(
error_type='NavigationBlocked',
message=f'Navigation blocked to non-allowed URL: {event.url} - redirecting to about:blank',
details={'url': event.url, 'target_id': event.target_id},
)
)
# Navigate to about:blank to keep session alive
# Agent will see the error and can continue with other tasks
try:
session = await self.browser_session.get_or_create_cdp_session(target_id=event.target_id)
await session.cdp_client.send.Page.navigate(params={'url': 'about:blank'}, session_id=session.session_id)
self.logger.info(f'⛔️ Navigated to about:blank after blocked URL: {event.url}')
except Exception as e:
pass
self.logger.error(f'⛔️ Failed to navigate to about:blank: {type(e).__name__} {e}')
async def on_TabCreatedEvent(self, event: TabCreatedEvent) -> None:
"""Check if new tab URL is allowed."""
if not self._is_url_allowed(event.url):
self.logger.warning(f'⛔️ New tab created with disallowed URL: {event.url}')
# Dispatch error and try to close the tab
self.event_bus.dispatch(
BrowserErrorEvent(
error_type='TabCreationBlocked',
message=f'Tab created with non-allowed URL: {event.url}',
details={'url': event.url, 'target_id': event.target_id},
)
)
# Try to close the offending tab
try:
await self.browser_session._cdp_close_page(event.target_id)
self.logger.info(f'⛔️ Closed new tab with non-allowed URL: {event.url}')
except Exception as e:
self.logger.error(f'⛔️ Failed to close new tab with non-allowed URL: {type(e).__name__} {e}')
def _is_root_domain(self, domain: str) -> bool:
"""Check if a domain is a root domain (no subdomain present).
Simple heuristic: only add www for domains with exactly 1 dot (domain.tld).
For complex cases like country TLDs or subdomains, users should configure explicitly.
Args:
domain: The domain to check
Returns:
True if it's a simple root domain, False otherwise
"""
# Skip if it contains wildcards or protocol
if '*' in domain or '://' in domain:
return False
return domain.count('.') == 1
def _log_glob_warning(self) -> None:
"""Log a warning about glob patterns in allowed_domains."""
global _GLOB_WARNING_SHOWN
if not _GLOB_WARNING_SHOWN:
_GLOB_WARNING_SHOWN = True
self.logger.warning(
'⚠️ Using glob patterns in allowed_domains. '
'Note: Patterns like "*.example.com" will match both subdomains AND the main domain.'
)
def _get_domain_variants(self, host: str) -> tuple[str, str]:
"""Get both variants of a domain (with and without www prefix).
Args:
host: The hostname to process
Returns:
Tuple of (original_host, variant_host)
- If host starts with www., variant is without www.
- Otherwise, variant is with www. prefix
"""
if host.startswith('www.'):
return (host, host[4:]) # ('www.example.com', 'example.com')
else:
return (host, f'www.{host}') # ('example.com', 'www.example.com')
def _is_ip_address(self, host: str) -> bool:
"""Check if a hostname is an IP address (IPv4 or IPv6).
Args:
host: The hostname to check
Returns:
True if the host is an IP address, False otherwise
"""
import ipaddress
try:
# Try to parse as IP address (handles both IPv4 and IPv6)
ipaddress.ip_address(host)
return True
except ValueError:
return False
except Exception:
return False
def _is_url_allowed(self, url: str) -> bool:
"""Check if a URL is allowed based on the allowed_domains configuration.
Args:
url: The URL to check
Returns:
True if the URL is allowed, False otherwise
"""
# Always allow internal browser targets (before any other checks)
if url in ['about:blank', 'chrome://new-tab-page/', 'chrome://new-tab-page', 'chrome://newtab/']:
return True
# Parse the URL to extract components
from urllib.parse import urlparse
try:
parsed = urlparse(url)
except Exception:
# Invalid URL
return False
# Allow data: and blob: URLs (they don't have hostnames)
if parsed.scheme in ['data', 'blob']:
return True
# Get the actual host (domain)
host = parsed.hostname
if not host:
return False
# Check if IP addresses should be blocked (before domain checks)
if self.browser_session.browser_profile.block_ip_addresses:
if self._is_ip_address(host):
return False
# If no allowed_domains specified, allow all URLs
if (
not self.browser_session.browser_profile.allowed_domains
and not self.browser_session.browser_profile.prohibited_domains
):
return True
# Check allowed domains (fast path for sets, slow path for lists with patterns)
if self.browser_session.browser_profile.allowed_domains:
allowed_domains = self.browser_session.browser_profile.allowed_domains
if isinstance(allowed_domains, set):
# Fast path: O(1) exact hostname match - check both www and non-www variants
host_variant, host_alt = self._get_domain_variants(host)
return host_variant in allowed_domains or host_alt in allowed_domains
else:
# Slow path: O(n) pattern matching for lists
for pattern in allowed_domains:
if self._is_url_match(url, host, parsed.scheme, pattern):
return True
return False
# Check prohibited domains (fast path for sets, slow path for lists with patterns)
if self.browser_session.browser_profile.prohibited_domains:
prohibited_domains = self.browser_session.browser_profile.prohibited_domains
if isinstance(prohibited_domains, set):
# Fast path: O(1) exact hostname match - check both www and non-www variants
host_variant, host_alt = self._get_domain_variants(host)
return host_variant not in prohibited_domains and host_alt not in prohibited_domains
else:
# Slow path: O(n) pattern matching for lists
for pattern in prohibited_domains:
if self._is_url_match(url, host, parsed.scheme, pattern):
return False
return True
return True
def _is_url_match(self, url: str, host: str, scheme: str, pattern: str) -> bool:
"""Check if a URL matches a pattern."""
# Full URL for matching (scheme + host)
full_url_pattern = f'{scheme}://{host}'
# Handle glob patterns
if '*' in pattern:
self._log_glob_warning()
import fnmatch
# Check if pattern matches the host
if pattern.startswith('*.'):
# Pattern like *.example.com should match subdomains and main domain
domain_part = pattern[2:] # Remove *.
if host == domain_part or host.endswith('.' + domain_part):
# Only match http/https URLs for domain-only patterns
if scheme in ['http', 'https']:
return True
elif pattern.endswith('/*'):
# Pattern like brave://* or http*://example.com/*
if fnmatch.fnmatch(url, pattern):
return True
else:
# Use fnmatch for other glob patterns
if fnmatch.fnmatch(
full_url_pattern if '://' in pattern else host,
pattern,
):
return True
else:
# Exact match
if '://' in pattern:
# Full URL pattern
if url.startswith(pattern):
return True
else:
# Domain-only pattern (case-insensitive comparison)
if host.lower() == pattern.lower():
return True
# If pattern is a root domain, also check www subdomain
if self._is_root_domain(pattern) and host.lower() == f'www.{pattern.lower()}':
return True
return False
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/browser/watchdogs/default_action_watchdog.py | browser_use/browser/watchdogs/default_action_watchdog.py | """Default browser action handlers using CDP."""
import asyncio
import json
from cdp_use.cdp.input.commands import DispatchKeyEventParameters
from browser_use.actor.utils import get_key_info
from browser_use.browser.events import (
ClickCoordinateEvent,
ClickElementEvent,
GetDropdownOptionsEvent,
GoBackEvent,
GoForwardEvent,
RefreshEvent,
ScrollEvent,
ScrollToTextEvent,
SelectDropdownOptionEvent,
SendKeysEvent,
TypeTextEvent,
UploadFileEvent,
WaitEvent,
)
from browser_use.browser.views import BrowserError, URLNotAllowedError
from browser_use.browser.watchdog_base import BaseWatchdog
from browser_use.dom.service import EnhancedDOMTreeNode
from browser_use.observability import observe_debug
# Import EnhancedDOMTreeNode and rebuild event models that have forward references to it
# This must be done after all imports are complete
ClickCoordinateEvent.model_rebuild()
ClickElementEvent.model_rebuild()
GetDropdownOptionsEvent.model_rebuild()
SelectDropdownOptionEvent.model_rebuild()
TypeTextEvent.model_rebuild()
ScrollEvent.model_rebuild()
UploadFileEvent.model_rebuild()
class DefaultActionWatchdog(BaseWatchdog):
"""Handles default browser actions like click, type, and scroll using CDP."""
def _is_print_related_element(self, element_node: EnhancedDOMTreeNode) -> bool:
"""Check if an element is related to printing (print buttons, print dialogs, etc.).
Primary check: onclick attribute (most reliable for print detection)
Fallback: button text/value (for cases without onclick)
"""
# Primary: Check onclick attribute for print-related functions (most reliable)
onclick = element_node.attributes.get('onclick', '').lower() if element_node.attributes else ''
if onclick and 'print' in onclick:
# Matches: window.print(), PrintElem(), print(), etc.
return True
return False
async def _handle_print_button_click(self, element_node: EnhancedDOMTreeNode) -> dict | None:
"""Handle print button by directly generating PDF via CDP instead of opening dialog.
Returns:
Metadata dict with download path if successful, None otherwise
"""
try:
import base64
import os
from pathlib import Path
# Get CDP session
cdp_session = await self.browser_session.get_or_create_cdp_session(focus=True)
# Generate PDF using CDP Page.printToPDF
result = await asyncio.wait_for(
cdp_session.cdp_client.send.Page.printToPDF(
params={
'printBackground': True,
'preferCSSPageSize': True,
},
session_id=cdp_session.session_id,
),
timeout=15.0, # 15 second timeout for PDF generation
)
pdf_data = result.get('data')
if not pdf_data:
self.logger.warning('⚠️ PDF generation returned no data')
return None
# Decode base64 PDF data
pdf_bytes = base64.b64decode(pdf_data)
# Get downloads path
downloads_path = self.browser_session.browser_profile.downloads_path
if not downloads_path:
self.logger.warning('⚠️ No downloads path configured, cannot save PDF')
return None
# Generate filename from page title or URL
try:
page_title = await asyncio.wait_for(self.browser_session.get_current_page_title(), timeout=2.0)
# Sanitize title for filename
import re
safe_title = re.sub(r'[^\w\s-]', '', page_title)[:50] # Max 50 chars
filename = f'{safe_title}.pdf' if safe_title else 'print.pdf'
except Exception:
filename = 'print.pdf'
# Ensure downloads directory exists
downloads_dir = Path(downloads_path).expanduser().resolve()
downloads_dir.mkdir(parents=True, exist_ok=True)
# Generate unique filename if file exists
final_path = downloads_dir / filename
if final_path.exists():
base, ext = os.path.splitext(filename)
counter = 1
while (downloads_dir / f'{base} ({counter}){ext}').exists():
counter += 1
final_path = downloads_dir / f'{base} ({counter}){ext}'
# Write PDF to file
import anyio
async with await anyio.open_file(final_path, 'wb') as f:
await f.write(pdf_bytes)
file_size = final_path.stat().st_size
self.logger.info(f'✅ Generated PDF via CDP: {final_path} ({file_size:,} bytes)')
# Dispatch FileDownloadedEvent
from browser_use.browser.events import FileDownloadedEvent
page_url = await self.browser_session.get_current_page_url()
self.browser_session.event_bus.dispatch(
FileDownloadedEvent(
url=page_url,
path=str(final_path),
file_name=final_path.name,
file_size=file_size,
file_type='pdf',
mime_type='application/pdf',
auto_download=False, # This was intentional (user clicked print)
)
)
return {'pdf_generated': True, 'path': str(final_path)}
except TimeoutError:
self.logger.warning('⏱️ PDF generation timed out')
return None
except Exception as e:
self.logger.warning(f'⚠️ Failed to generate PDF via CDP: {type(e).__name__}: {e}')
return None
@observe_debug(ignore_input=True, ignore_output=True, name='click_element_event')
async def on_ClickElementEvent(self, event: ClickElementEvent) -> dict | None:
"""Handle click request with CDP."""
try:
# Check if session is alive before attempting any operations
if not self.browser_session.agent_focus_target_id:
error_msg = 'Cannot execute click: browser session is corrupted (target_id=None). Session may have crashed.'
self.logger.error(f'{error_msg}')
raise BrowserError(error_msg)
# Use the provided node
element_node = event.node
index_for_logging = element_node.backend_node_id or 'unknown'
starting_target_id = self.browser_session.agent_focus_target_id
# Check if element is a file input (should not be clicked)
if self.browser_session.is_file_input(element_node):
msg = f'Index {index_for_logging} - has an element which opens file upload dialog. To upload files please use a specific function to upload files'
self.logger.info(f'{msg}')
# Return validation error instead of raising to avoid ERROR logs
return {'validation_error': msg}
# Detect print-related elements and handle them specially
is_print_element = self._is_print_related_element(element_node)
if is_print_element:
self.logger.info(
f'🖨️ Detected print button (index {index_for_logging}), generating PDF directly instead of opening dialog...'
)
# Instead of clicking, directly generate PDF via CDP
click_metadata = await self._handle_print_button_click(element_node)
if click_metadata and click_metadata.get('pdf_generated'):
msg = f'Generated PDF: {click_metadata.get("path")}'
self.logger.info(f'💾 {msg}')
return click_metadata
else:
# Fallback to regular click if PDF generation failed
self.logger.warning('⚠️ PDF generation failed, falling back to regular click')
# Perform the actual click using internal implementation
click_metadata = await self._click_element_node_impl(element_node)
download_path = None # moved to downloads_watchdog.py
# Check for validation errors - return them without raising to avoid ERROR logs
if isinstance(click_metadata, dict) and 'validation_error' in click_metadata:
self.logger.info(f'{click_metadata["validation_error"]}')
return click_metadata
# Build success message
if download_path:
msg = f'Downloaded file to {download_path}'
self.logger.info(f'💾 {msg}')
else:
msg = f'Clicked button {element_node.node_name}: {element_node.get_all_children_text(max_depth=2)}'
self.logger.debug(f'🖱️ {msg}')
self.logger.debug(f'Element xpath: {element_node.xpath}')
return click_metadata if isinstance(click_metadata, dict) else None
except Exception as e:
raise
async def on_ClickCoordinateEvent(self, event: ClickCoordinateEvent) -> dict | None:
"""Handle click at coordinates with CDP."""
try:
# Check if session is alive before attempting any operations
if not self.browser_session.agent_focus_target_id:
error_msg = 'Cannot execute click: browser session is corrupted (target_id=None). Session may have crashed.'
self.logger.error(f'{error_msg}')
raise BrowserError(error_msg)
# If force=True, skip safety checks and click directly
if event.force:
self.logger.debug(f'Force clicking at coordinates ({event.coordinate_x}, {event.coordinate_y})')
return await self._click_on_coordinate(event.coordinate_x, event.coordinate_y, force=True)
# Get element at coordinates for safety checks
element_node = await self.browser_session.get_dom_element_at_coordinates(event.coordinate_x, event.coordinate_y)
if element_node is None:
# No element found, click directly
self.logger.debug(
f'No element found at coordinates ({event.coordinate_x}, {event.coordinate_y}), proceeding with click anyway'
)
return await self._click_on_coordinate(event.coordinate_x, event.coordinate_y, force=False)
# Safety check: file input
if self.browser_session.is_file_input(element_node):
msg = f'Cannot click at ({event.coordinate_x}, {event.coordinate_y}) - element is a file input. To upload files please use upload_file action'
self.logger.info(f'{msg}')
return {'validation_error': msg}
# Safety check: select element
tag_name = element_node.tag_name.lower() if element_node.tag_name else ''
if tag_name == 'select':
msg = f'Cannot click at ({event.coordinate_x}, {event.coordinate_y}) - element is a <select>. Use dropdown_options action instead.'
self.logger.info(f'{msg}')
return {'validation_error': msg}
# Safety check: print-related elements
is_print_element = self._is_print_related_element(element_node)
if is_print_element:
self.logger.info(
f'🖨️ Detected print button at ({event.coordinate_x}, {event.coordinate_y}), generating PDF directly instead of opening dialog...'
)
click_metadata = await self._handle_print_button_click(element_node)
if click_metadata and click_metadata.get('pdf_generated'):
msg = f'Generated PDF: {click_metadata.get("path")}'
self.logger.info(f'💾 {msg}')
return click_metadata
else:
self.logger.warning('⚠️ PDF generation failed, falling back to regular click')
# All safety checks passed, click at coordinates
return await self._click_on_coordinate(event.coordinate_x, event.coordinate_y, force=False)
except Exception:
raise
async def on_TypeTextEvent(self, event: TypeTextEvent) -> dict | None:
"""Handle text input request with CDP."""
try:
# Use the provided node
element_node = event.node
index_for_logging = element_node.backend_node_id or 'unknown'
# Check if this is index 0 or a falsy index - type to the page (whatever has focus)
if not element_node.backend_node_id or element_node.backend_node_id == 0:
# Type to the page without focusing any specific element
await self._type_to_page(event.text)
# Log with sensitive data protection
if event.is_sensitive:
if event.sensitive_key_name:
self.logger.info(f'⌨️ Typed <{event.sensitive_key_name}> to the page (current focus)')
else:
self.logger.info('⌨️ Typed <sensitive> to the page (current focus)')
else:
self.logger.info(f'⌨️ Typed "{event.text}" to the page (current focus)')
return None # No coordinates available for page typing
else:
try:
# Try to type to the specific element
input_metadata = await self._input_text_element_node_impl(
element_node,
event.text,
clear=event.clear or (not event.text),
is_sensitive=event.is_sensitive,
)
# Log with sensitive data protection
if event.is_sensitive:
if event.sensitive_key_name:
self.logger.info(f'⌨️ Typed <{event.sensitive_key_name}> into element with index {index_for_logging}')
else:
self.logger.info(f'⌨️ Typed <sensitive> into element with index {index_for_logging}')
else:
self.logger.info(f'⌨️ Typed "{event.text}" into element with index {index_for_logging}')
self.logger.debug(f'Element xpath: {element_node.xpath}')
return input_metadata # Return coordinates if available
except Exception as e:
# Element not found or error - fall back to typing to the page
self.logger.warning(f'Failed to type to element {index_for_logging}: {e}. Falling back to page typing.')
try:
await asyncio.wait_for(self._click_element_node_impl(element_node), timeout=10.0)
except Exception as e:
pass
await self._type_to_page(event.text)
# Log with sensitive data protection
if event.is_sensitive:
if event.sensitive_key_name:
self.logger.info(f'⌨️ Typed <{event.sensitive_key_name}> to the page as fallback')
else:
self.logger.info('⌨️ Typed <sensitive> to the page as fallback')
else:
self.logger.info(f'⌨️ Typed "{event.text}" to the page as fallback')
return None # No coordinates available for fallback typing
# Note: We don't clear cached state here - let multi_act handle DOM change detection
# by explicitly rebuilding and comparing when needed
except Exception as e:
raise
async def on_ScrollEvent(self, event: ScrollEvent) -> None:
"""Handle scroll request with CDP."""
# Check if we have a current target for scrolling
if not self.browser_session.agent_focus_target_id:
error_msg = 'No active target for scrolling'
raise BrowserError(error_msg)
try:
# Convert direction and amount to pixels
# Positive pixels = scroll down, negative = scroll up
pixels = event.amount if event.direction == 'down' else -event.amount
# Element-specific scrolling if node is provided
if event.node is not None:
element_node = event.node
index_for_logging = element_node.backend_node_id or 'unknown'
# Check if the element is an iframe
is_iframe = element_node.tag_name and element_node.tag_name.upper() == 'IFRAME'
# Try to scroll the element's container
success = await self._scroll_element_container(element_node, pixels)
if success:
self.logger.debug(
f'📜 Scrolled element {index_for_logging} container {event.direction} by {event.amount} pixels'
)
# For iframe scrolling, we need to force a full DOM refresh
# because the iframe's content has changed position
if is_iframe:
self.logger.debug('🔄 Forcing DOM refresh after iframe scroll')
# Note: We don't clear cached state here - let multi_act handle DOM change detection
# by explicitly rebuilding and comparing when needed
# Wait a bit for the scroll to settle and DOM to update
await asyncio.sleep(0.2)
return None
# Perform target-level scroll
await self._scroll_with_cdp_gesture(pixels)
# Note: We don't clear cached state here - let multi_act handle DOM change detection
# by explicitly rebuilding and comparing when needed
# Log success
self.logger.debug(f'📜 Scrolled {event.direction} by {event.amount} pixels')
return None
except Exception as e:
raise
# ========== Implementation Methods ==========
async def _check_element_occlusion(self, backend_node_id: int, x: float, y: float, cdp_session) -> bool:
"""Check if an element is occluded by other elements at the given coordinates.
Args:
backend_node_id: The backend node ID of the target element
x: X coordinate to check
y: Y coordinate to check
cdp_session: CDP session to use
Returns:
True if element is occluded, False if clickable
"""
try:
session_id = cdp_session.session_id
# Get target element info for comparison
target_result = await cdp_session.cdp_client.send.DOM.resolveNode(
params={'backendNodeId': backend_node_id}, session_id=session_id
)
if 'object' not in target_result:
self.logger.debug('Could not resolve target element, assuming occluded')
return True
object_id = target_result['object']['objectId']
# Get target element info
target_info_result = await cdp_session.cdp_client.send.Runtime.callFunctionOn(
params={
'objectId': object_id,
'functionDeclaration': """
function() {
const getElementInfo = (el) => {
return {
tagName: el.tagName,
id: el.id || '',
className: el.className || '',
textContent: (el.textContent || '').substring(0, 100)
};
};
const elementAtPoint = document.elementFromPoint(arguments[0], arguments[1]);
if (!elementAtPoint) {
return { targetInfo: getElementInfo(this), isClickable: false };
}
// Simple containment-based clickability logic
const isClickable = this === elementAtPoint ||
this.contains(elementAtPoint) ||
elementAtPoint.contains(this);
return {
targetInfo: getElementInfo(this),
elementAtPointInfo: getElementInfo(elementAtPoint),
isClickable: isClickable
};
}
""",
'arguments': [{'value': x}, {'value': y}],
'returnByValue': True,
},
session_id=session_id,
)
if 'result' not in target_info_result or 'value' not in target_info_result['result']:
self.logger.debug('Could not get target element info, assuming occluded')
return True
target_data = target_info_result['result']['value']
is_clickable = target_data.get('isClickable', False)
if is_clickable:
self.logger.debug('Element is clickable (target, contained, or semantically related)')
return False
else:
target_info = target_data.get('targetInfo', {})
element_at_point_info = target_data.get('elementAtPointInfo', {})
self.logger.debug(
f'Element is occluded. Target: {target_info.get("tagName", "unknown")} '
f'(id={target_info.get("id", "none")}), '
f'ElementAtPoint: {element_at_point_info.get("tagName", "unknown")} '
f'(id={element_at_point_info.get("id", "none")})'
)
return True
except Exception as e:
self.logger.debug(f'Occlusion check failed: {e}, assuming not occluded')
return False
async def _click_element_node_impl(self, element_node) -> dict | None:
"""
Click an element using pure CDP with multiple fallback methods for getting element geometry.
Args:
element_node: The DOM element to click
"""
try:
# Check if element is a file input or select dropdown - these should not be clicked
tag_name = element_node.tag_name.lower() if element_node.tag_name else ''
element_type = element_node.attributes.get('type', '').lower() if element_node.attributes else ''
if tag_name == 'select':
msg = f'Cannot click on <select> elements. Use dropdown_options(index={element_node.backend_node_id}) action instead.'
# Return error dict instead of raising to avoid ERROR logs
return {'validation_error': msg}
if tag_name == 'input' and element_type == 'file':
msg = f'Cannot click on file input element (index={element_node.backend_node_id}). File uploads must be handled using upload_file_to_element action.'
# Return error dict instead of raising to avoid ERROR logs
return {'validation_error': msg}
# Get CDP client
cdp_session = await self.browser_session.cdp_client_for_node(element_node)
# Get the correct session ID for the element's frame
session_id = cdp_session.session_id
# Get element bounds
backend_node_id = element_node.backend_node_id
# Get viewport dimensions for visibility checks
layout_metrics = await cdp_session.cdp_client.send.Page.getLayoutMetrics(session_id=session_id)
viewport_width = layout_metrics['layoutViewport']['clientWidth']
viewport_height = layout_metrics['layoutViewport']['clientHeight']
# Scroll element into view FIRST before getting coordinates
try:
await cdp_session.cdp_client.send.DOM.scrollIntoViewIfNeeded(
params={'backendNodeId': backend_node_id}, session_id=session_id
)
await asyncio.sleep(0.05) # Wait for scroll to complete
self.logger.debug('Scrolled element into view before getting coordinates')
except Exception as e:
self.logger.debug(f'Failed to scroll element into view: {e}')
# Get element coordinates using the unified method AFTER scrolling
element_rect = await self.browser_session.get_element_coordinates(backend_node_id, cdp_session)
# Convert rect to quads format if we got coordinates
quads = []
if element_rect:
# Convert DOMRect to quad format
x, y, w, h = element_rect.x, element_rect.y, element_rect.width, element_rect.height
quads = [
[
x,
y, # top-left
x + w,
y, # top-right
x + w,
y + h, # bottom-right
x,
y + h, # bottom-left
]
]
self.logger.debug(
f'Got coordinates from unified method: {element_rect.x}, {element_rect.y}, {element_rect.width}x{element_rect.height}'
)
# If we still don't have quads, fall back to JS click
if not quads:
self.logger.warning('Could not get element geometry from any method, falling back to JavaScript click')
try:
result = await cdp_session.cdp_client.send.DOM.resolveNode(
params={'backendNodeId': backend_node_id},
session_id=session_id,
)
assert 'object' in result and 'objectId' in result['object'], (
'Failed to find DOM element based on backendNodeId, maybe page content changed?'
)
object_id = result['object']['objectId']
await cdp_session.cdp_client.send.Runtime.callFunctionOn(
params={
'functionDeclaration': 'function() { this.click(); }',
'objectId': object_id,
},
session_id=session_id,
)
await asyncio.sleep(0.05)
# Navigation is handled by BrowserSession via events
return None
except Exception as js_e:
self.logger.warning(f'CDP JavaScript click also failed: {js_e}')
if 'No node with given id found' in str(js_e):
raise Exception('Element with given id not found')
else:
raise Exception(f'Failed to click element: {js_e}')
# Find the largest visible quad within the viewport
best_quad = None
best_area = 0
for quad in quads:
if len(quad) < 8:
continue
# Calculate quad bounds
xs = [quad[i] for i in range(0, 8, 2)]
ys = [quad[i] for i in range(1, 8, 2)]
min_x, max_x = min(xs), max(xs)
min_y, max_y = min(ys), max(ys)
# Check if quad intersects with viewport
if max_x < 0 or max_y < 0 or min_x > viewport_width or min_y > viewport_height:
continue # Quad is completely outside viewport
# Calculate visible area (intersection with viewport)
visible_min_x = max(0, min_x)
visible_max_x = min(viewport_width, max_x)
visible_min_y = max(0, min_y)
visible_max_y = min(viewport_height, max_y)
visible_width = visible_max_x - visible_min_x
visible_height = visible_max_y - visible_min_y
visible_area = visible_width * visible_height
if visible_area > best_area:
best_area = visible_area
best_quad = quad
if not best_quad:
# No visible quad found, use the first quad anyway
best_quad = quads[0]
self.logger.warning('No visible quad found, using first quad')
# Calculate center point of the best quad
center_x = sum(best_quad[i] for i in range(0, 8, 2)) / 4
center_y = sum(best_quad[i] for i in range(1, 8, 2)) / 4
# Ensure click point is within viewport bounds
center_x = max(0, min(viewport_width - 1, center_x))
center_y = max(0, min(viewport_height - 1, center_y))
# Check for occlusion before attempting CDP click
is_occluded = await self._check_element_occlusion(backend_node_id, center_x, center_y, cdp_session)
if is_occluded:
self.logger.debug('🚫 Element is occluded, falling back to JavaScript click')
try:
result = await cdp_session.cdp_client.send.DOM.resolveNode(
params={'backendNodeId': backend_node_id},
session_id=session_id,
)
assert 'object' in result and 'objectId' in result['object'], (
'Failed to find DOM element based on backendNodeId'
)
object_id = result['object']['objectId']
await cdp_session.cdp_client.send.Runtime.callFunctionOn(
params={
'functionDeclaration': 'function() { this.click(); }',
'objectId': object_id,
},
session_id=session_id,
)
await asyncio.sleep(0.05)
return None
except Exception as js_e:
self.logger.error(f'JavaScript click fallback failed: {js_e}')
raise Exception(f'Failed to click occluded element: {js_e}')
# Perform the click using CDP (element is not occluded)
try:
self.logger.debug(f'👆 Dragging mouse over element before clicking x: {center_x}px y: {center_y}px ...')
# Move mouse to element
await cdp_session.cdp_client.send.Input.dispatchMouseEvent(
params={
'type': 'mouseMoved',
'x': center_x,
'y': center_y,
},
session_id=session_id,
)
await asyncio.sleep(0.05)
# Mouse down
self.logger.debug(f'👆🏾 Clicking x: {center_x}px y: {center_y}px ...')
try:
await asyncio.wait_for(
cdp_session.cdp_client.send.Input.dispatchMouseEvent(
params={
'type': 'mousePressed',
'x': center_x,
'y': center_y,
'button': 'left',
'clickCount': 1,
},
session_id=session_id,
),
timeout=3.0, # 3 second timeout for mousePressed
)
await asyncio.sleep(0.08)
except TimeoutError:
self.logger.debug('⏱️ Mouse down timed out (likely due to dialog), continuing...')
# Don't sleep if we timed out
# Mouse up
try:
await asyncio.wait_for(
cdp_session.cdp_client.send.Input.dispatchMouseEvent(
params={
'type': 'mouseReleased',
'x': center_x,
'y': center_y,
'button': 'left',
'clickCount': 1,
},
session_id=session_id,
),
timeout=5.0, # 5 second timeout for mouseReleased
)
except TimeoutError:
self.logger.debug('⏱️ Mouse up timed out (possibly due to lag or dialog popup), continuing...')
self.logger.debug('🖱️ Clicked successfully using x,y coordinates')
# Return coordinates as dict for metadata
return {'click_x': center_x, 'click_y': center_y}
except Exception as e:
self.logger.warning(f'CDP click failed: {type(e).__name__}: {e}')
# Fall back to JavaScript click via CDP
try:
result = await cdp_session.cdp_client.send.DOM.resolveNode(
params={'backendNodeId': backend_node_id},
session_id=session_id,
)
assert 'object' in result and 'objectId' in result['object'], (
'Failed to find DOM element based on backendNodeId, maybe page content changed?'
)
object_id = result['object']['objectId']
await cdp_session.cdp_client.send.Runtime.callFunctionOn(
params={
'functionDeclaration': 'function() { this.click(); }',
'objectId': object_id,
},
session_id=session_id,
)
# Small delay for dialog dismissal
await asyncio.sleep(0.1)
return None
except Exception as js_e:
self.logger.warning(f'CDP JavaScript click also failed: {js_e}')
raise Exception(f'Failed to click element: {e}')
finally:
# Always re-focus back to original top-level page session context in case click opened a new tab/popup/window/dialog/etc.
# Use timeout to prevent hanging if dialog is blocking
try:
cdp_session = await asyncio.wait_for(self.browser_session.get_or_create_cdp_session(focus=True), timeout=3.0)
await asyncio.wait_for(
cdp_session.cdp_client.send.Runtime.runIfWaitingForDebugger(session_id=cdp_session.session_id),
timeout=2.0,
)
except TimeoutError:
self.logger.debug('⏱️ Refocus after click timed out (page may be blocked by dialog). Continuing...')
except Exception as e:
self.logger.debug(f'⚠️ Refocus error (non-critical): {type(e).__name__}: {e}')
except URLNotAllowedError as e:
raise e
except BrowserError as e:
raise e
except Exception as e:
# Extract key element info for error message
element_info = f'<{element_node.tag_name or "unknown"}'
if element_node.backend_node_id:
element_info += f' index={element_node.backend_node_id}'
element_info += '>'
# Create helpful error message based on context
error_detail = f'Failed to click element {element_info}. The element may not be interactable or visible.'
# Add hint if element has index (common in code-use mode)
if element_node.backend_node_id:
error_detail += f' If the page changed after navigation/interaction, the index [{element_node.backend_node_id}] may be stale. Get fresh browser state before retrying.'
raise BrowserError(
message=f'Failed to click element: {str(e)}',
long_term_memory=error_detail,
)
async def _click_on_coordinate(self, coordinate_x: int, coordinate_y: int, force: bool = False) -> dict | None:
"""
Click directly at coordinates using CDP Input.dispatchMouseEvent.
Args:
coordinate_x: X coordinate in viewport
coordinate_y: Y coordinate in viewport
force: If True, skip all safety checks (used when force=True in event)
Returns:
Dict with click coordinates or None
"""
try:
# Get CDP session
cdp_session = await self.browser_session.get_or_create_cdp_session()
session_id = cdp_session.session_id
self.logger.debug(f'👆 Moving mouse to ({coordinate_x}, {coordinate_y})...')
# Move mouse to coordinates
await cdp_session.cdp_client.send.Input.dispatchMouseEvent(
params={
'type': 'mouseMoved',
'x': coordinate_x,
'y': coordinate_y,
},
session_id=session_id,
)
await asyncio.sleep(0.05)
# Mouse down
self.logger.debug(f'👆🏾 Clicking at ({coordinate_x}, {coordinate_y})...')
try:
await asyncio.wait_for(
cdp_session.cdp_client.send.Input.dispatchMouseEvent(
params={
'type': 'mousePressed',
'x': coordinate_x,
'y': coordinate_y,
'button': 'left',
'clickCount': 1,
},
session_id=session_id,
),
timeout=3.0,
)
await asyncio.sleep(0.05)
except TimeoutError:
self.logger.debug('⏱️ Mouse down timed out (likely due to dialog), continuing...')
# Mouse up
try:
await asyncio.wait_for(
cdp_session.cdp_client.send.Input.dispatchMouseEvent(
params={
'type': 'mouseReleased',
'x': coordinate_x,
'y': coordinate_y,
'button': 'left',
'clickCount': 1,
},
session_id=session_id,
),
timeout=5.0,
)
except TimeoutError:
self.logger.debug('⏱️ Mouse up timed out (possibly due to lag or dialog popup), continuing...')
self.logger.debug(f'🖱️ Clicked successfully at ({coordinate_x}, {coordinate_y})')
# Return coordinates as metadata
return {'click_x': coordinate_x, 'click_y': coordinate_y}
except Exception as e:
self.logger.error(f'Failed to click at coordinates ({coordinate_x}, {coordinate_y}): {type(e).__name__}: {e}')
raise BrowserError(
message=f'Failed to click at coordinates: {e}',
long_term_memory=f'Failed to click at coordinates ({coordinate_x}, {coordinate_y}). The coordinates may be outside viewport or the page may have changed.',
)
async def _type_to_page(self, text: str):
"""
Type text to the page (whatever element currently has focus).
This is used when index is 0 or when an element can't be found.
"""
try:
# Get CDP client and session
cdp_session = await self.browser_session.get_or_create_cdp_session(target_id=None, focus=True)
# Type the text character by character to the focused element
for char in text:
# Handle newline characters as Enter key
if char == '\n':
# Send proper Enter key sequence
await cdp_session.cdp_client.send.Input.dispatchKeyEvent(
params={
'type': 'keyDown',
'key': 'Enter',
'code': 'Enter',
'windowsVirtualKeyCode': 13,
},
session_id=cdp_session.session_id,
)
# Send char event with carriage return
await cdp_session.cdp_client.send.Input.dispatchKeyEvent(
params={
'type': 'char',
'text': '\r',
},
session_id=cdp_session.session_id,
)
# Send keyup
await cdp_session.cdp_client.send.Input.dispatchKeyEvent(
params={
'type': 'keyUp',
'key': 'Enter',
'code': 'Enter',
'windowsVirtualKeyCode': 13,
},
session_id=cdp_session.session_id,
)
else:
# Handle regular characters
# Send keydown
await cdp_session.cdp_client.send.Input.dispatchKeyEvent(
params={
'type': 'keyDown',
'key': char,
},
session_id=cdp_session.session_id,
)
# Send char for actual text input
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | true |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/browser/watchdogs/__init__.py | browser_use/browser/watchdogs/__init__.py | python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false | |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/browser/watchdogs/crash_watchdog.py | browser_use/browser/watchdogs/crash_watchdog.py | """Browser watchdog for monitoring crashes and network timeouts using CDP."""
import asyncio
import time
from typing import TYPE_CHECKING, ClassVar
import psutil
from bubus import BaseEvent
from cdp_use.cdp.target import SessionID, TargetID
from cdp_use.cdp.target.events import TargetCrashedEvent
from pydantic import Field, PrivateAttr
from browser_use.browser.events import (
BrowserConnectedEvent,
BrowserErrorEvent,
BrowserStoppedEvent,
TabClosedEvent,
TabCreatedEvent,
)
from browser_use.browser.watchdog_base import BaseWatchdog
from browser_use.utils import create_task_with_error_handling
if TYPE_CHECKING:
pass
class NetworkRequestTracker:
"""Tracks ongoing network requests."""
def __init__(self, request_id: str, start_time: float, url: str, method: str, resource_type: str | None = None):
self.request_id = request_id
self.start_time = start_time
self.url = url
self.method = method
self.resource_type = resource_type
class CrashWatchdog(BaseWatchdog):
"""Monitors browser health for crashes and network timeouts using CDP."""
# Event contracts
LISTENS_TO: ClassVar[list[type[BaseEvent]]] = [
BrowserConnectedEvent,
BrowserStoppedEvent,
TabCreatedEvent,
TabClosedEvent,
]
EMITS: ClassVar[list[type[BaseEvent]]] = [BrowserErrorEvent]
# Configuration
network_timeout_seconds: float = Field(default=10.0)
check_interval_seconds: float = Field(default=5.0) # Reduced frequency to reduce noise
# Private state
_active_requests: dict[str, NetworkRequestTracker] = PrivateAttr(default_factory=dict)
_monitoring_task: asyncio.Task | None = PrivateAttr(default=None)
_last_responsive_checks: dict[str, float] = PrivateAttr(default_factory=dict) # target_url -> timestamp
_cdp_event_tasks: set[asyncio.Task] = PrivateAttr(default_factory=set) # Track CDP event handler tasks
_targets_with_listeners: set[str] = PrivateAttr(default_factory=set) # Track targets that already have event listeners
async def on_BrowserConnectedEvent(self, event: BrowserConnectedEvent) -> None:
"""Start monitoring when browser is connected."""
# logger.debug('[CrashWatchdog] Browser connected event received, beginning monitoring')
create_task_with_error_handling(
self._start_monitoring(), name='start_crash_monitoring', logger_instance=self.logger, suppress_exceptions=True
)
# logger.debug(f'[CrashWatchdog] Monitoring task started: {self._monitoring_task and not self._monitoring_task.done()}')
async def on_BrowserStoppedEvent(self, event: BrowserStoppedEvent) -> None:
"""Stop monitoring when browser stops."""
# logger.debug('[CrashWatchdog] Browser stopped, ending monitoring')
await self._stop_monitoring()
async def on_TabCreatedEvent(self, event: TabCreatedEvent) -> None:
"""Attach to new tab."""
assert self.browser_session.agent_focus_target_id is not None, 'No current target ID'
await self.attach_to_target(self.browser_session.agent_focus_target_id)
async def on_TabClosedEvent(self, event: TabClosedEvent) -> None:
"""Clean up tracking when tab closes."""
# Remove target from listener tracking to prevent memory leak
if event.target_id in self._targets_with_listeners:
self._targets_with_listeners.discard(event.target_id)
self.logger.debug(f'[CrashWatchdog] Removed target {event.target_id[:8]}... from monitoring')
async def attach_to_target(self, target_id: TargetID) -> None:
"""Set up crash monitoring for a specific target using CDP."""
try:
# Check if we already have listeners for this target
if target_id in self._targets_with_listeners:
self.logger.debug(f'[CrashWatchdog] Event listeners already exist for target: {target_id[:8]}...')
return
# Create temporary session for monitoring without switching focus
cdp_session = await self.browser_session.get_or_create_cdp_session(target_id, focus=False)
# Register crash event handler
def on_target_crashed(event: TargetCrashedEvent, session_id: SessionID | None = None):
# Create and track the task
task = create_task_with_error_handling(
self._on_target_crash_cdp(target_id),
name='handle_target_crash',
logger_instance=self.logger,
suppress_exceptions=True,
)
self._cdp_event_tasks.add(task)
# Remove from set when done
task.add_done_callback(lambda t: self._cdp_event_tasks.discard(t))
cdp_session.cdp_client.register.Target.targetCrashed(on_target_crashed)
# Track that we've added listeners to this target
self._targets_with_listeners.add(target_id)
target = self.browser_session.session_manager.get_target(target_id)
if target:
self.logger.debug(f'[CrashWatchdog] Added target to monitoring: {target.url}')
except Exception as e:
self.logger.warning(f'[CrashWatchdog] Failed to attach to target {target_id}: {e}')
async def _on_request_cdp(self, event: dict) -> None:
"""Track new network request from CDP event."""
request_id = event.get('requestId', '')
request = event.get('request', {})
self._active_requests[request_id] = NetworkRequestTracker(
request_id=request_id,
start_time=time.time(),
url=request.get('url', ''),
method=request.get('method', ''),
resource_type=event.get('type'),
)
# logger.debug(f'[CrashWatchdog] Tracking request: {request.get("method", "")} {request.get("url", "")[:50]}...')
def _on_response_cdp(self, event: dict) -> None:
"""Remove request from tracking on response."""
request_id = event.get('requestId', '')
if request_id in self._active_requests:
elapsed = time.time() - self._active_requests[request_id].start_time
response = event.get('response', {})
self.logger.debug(f'[CrashWatchdog] Request completed in {elapsed:.2f}s: {response.get("url", "")[:50]}...')
# Don't remove yet - wait for loadingFinished
def _on_request_failed_cdp(self, event: dict) -> None:
"""Remove request from tracking on failure."""
request_id = event.get('requestId', '')
if request_id in self._active_requests:
elapsed = time.time() - self._active_requests[request_id].start_time
self.logger.debug(
f'[CrashWatchdog] Request failed after {elapsed:.2f}s: {self._active_requests[request_id].url[:50]}...'
)
del self._active_requests[request_id]
def _on_request_finished_cdp(self, event: dict) -> None:
"""Remove request from tracking when loading is finished."""
request_id = event.get('requestId', '')
self._active_requests.pop(request_id, None)
async def _on_target_crash_cdp(self, target_id: TargetID) -> None:
"""Handle target crash detected via CDP."""
self.logger.debug(f'[CrashWatchdog] Target crashed: {target_id[:8]}..., waiting for detach event')
target = self.browser_session.session_manager.get_target(target_id)
is_agent_focus = (
target
and self.browser_session.agent_focus_target_id
and target.target_id == self.browser_session.agent_focus_target_id
)
if is_agent_focus:
self.logger.error(f'[CrashWatchdog] 💥 Agent focus tab crashed: {target.url} (SessionManager will auto-recover)')
# Emit browser error event
self.event_bus.dispatch(
BrowserErrorEvent(
error_type='TargetCrash',
message=f'Target crashed: {target_id}',
details={
'url': target.url if target else None,
'target_id': target_id,
'was_agent_focus': is_agent_focus,
},
)
)
async def _start_monitoring(self) -> None:
"""Start the monitoring loop."""
assert self.browser_session.cdp_client is not None, 'Root CDP client not initialized - browser may not be connected yet'
if self._monitoring_task and not self._monitoring_task.done():
# logger.info('[CrashWatchdog] Monitoring already running')
return
self._monitoring_task = create_task_with_error_handling(
self._monitoring_loop(), name='crash_monitoring_loop', logger_instance=self.logger, suppress_exceptions=True
)
# logger.debug('[CrashWatchdog] Monitoring loop created and started')
async def _stop_monitoring(self) -> None:
"""Stop the monitoring loop and clean up all tracking."""
if self._monitoring_task and not self._monitoring_task.done():
self._monitoring_task.cancel()
try:
await self._monitoring_task
except asyncio.CancelledError:
pass
self.logger.debug('[CrashWatchdog] Monitoring loop stopped')
# Cancel all CDP event handler tasks
for task in list(self._cdp_event_tasks):
if not task.done():
task.cancel()
# Wait for all tasks to complete cancellation
if self._cdp_event_tasks:
await asyncio.gather(*self._cdp_event_tasks, return_exceptions=True)
self._cdp_event_tasks.clear()
# Clear all tracking
self._active_requests.clear()
self._targets_with_listeners.clear()
self._last_responsive_checks.clear()
async def _monitoring_loop(self) -> None:
"""Main monitoring loop."""
await asyncio.sleep(10) # give browser time to start up and load the first page after first LLM call
while True:
try:
await self._check_network_timeouts()
await self._check_browser_health()
await asyncio.sleep(self.check_interval_seconds)
except asyncio.CancelledError:
break
except Exception as e:
self.logger.error(f'[CrashWatchdog] Error in monitoring loop: {e}')
async def _check_network_timeouts(self) -> None:
"""Check for network requests exceeding timeout."""
current_time = time.time()
timed_out_requests = []
# Debug logging
if self._active_requests:
self.logger.debug(
f'[CrashWatchdog] Checking {len(self._active_requests)} active requests for timeouts (threshold: {self.network_timeout_seconds}s)'
)
for request_id, tracker in self._active_requests.items():
elapsed = current_time - tracker.start_time
self.logger.debug(
f'[CrashWatchdog] Request {tracker.url[:30]}... elapsed: {elapsed:.1f}s, timeout: {self.network_timeout_seconds}s'
)
if elapsed >= self.network_timeout_seconds:
timed_out_requests.append((request_id, tracker))
# Emit events for timed out requests
for request_id, tracker in timed_out_requests:
self.logger.warning(
f'[CrashWatchdog] Network request timeout after {self.network_timeout_seconds}s: '
f'{tracker.method} {tracker.url[:100]}...'
)
self.event_bus.dispatch(
BrowserErrorEvent(
error_type='NetworkTimeout',
message=f'Network request timed out after {self.network_timeout_seconds}s',
details={
'url': tracker.url,
'method': tracker.method,
'resource_type': tracker.resource_type,
'elapsed_seconds': current_time - tracker.start_time,
},
)
)
# Remove from tracking
del self._active_requests[request_id]
async def _check_browser_health(self) -> None:
"""Check if browser and targets are still responsive."""
try:
self.logger.debug(f'[CrashWatchdog] Checking browser health for target {self.browser_session.agent_focus_target_id}')
cdp_session = await self.browser_session.get_or_create_cdp_session()
for target in self.browser_session.session_manager.get_all_page_targets():
if self._is_new_tab_page(target.url) and target.url != 'about:blank':
self.logger.debug(f'[CrashWatchdog] Redirecting chrome://new-tab-page/ to about:blank {target.url}')
cdp_session = await self.browser_session.get_or_create_cdp_session(target_id=target.target_id)
await cdp_session.cdp_client.send.Page.navigate(
params={'url': 'about:blank'}, session_id=cdp_session.session_id
)
# Quick ping to check if session is alive
self.logger.debug(f'[CrashWatchdog] Attempting to run simple JS test expression in session {cdp_session} 1+1')
await asyncio.wait_for(
cdp_session.cdp_client.send.Runtime.evaluate(params={'expression': '1+1'}, session_id=cdp_session.session_id),
timeout=1.0,
)
self.logger.debug(
f'[CrashWatchdog] Browser health check passed for target {self.browser_session.agent_focus_target_id}'
)
except Exception as e:
self.logger.error(
f'[CrashWatchdog] ❌ Crashed/unresponsive session detected for target {self.browser_session.agent_focus_target_id} '
f'error: {type(e).__name__}: {e} (Chrome will send detach event, SessionManager will auto-recover)'
)
# Check browser process if we have PID
if self.browser_session._local_browser_watchdog and (proc := self.browser_session._local_browser_watchdog._subprocess):
try:
if proc.status() in (psutil.STATUS_ZOMBIE, psutil.STATUS_DEAD):
self.logger.error(f'[CrashWatchdog] Browser process {proc.pid} has crashed')
# Browser process crashed - SessionManager will clean up via detach events
# Just dispatch error event and stop monitoring
self.event_bus.dispatch(
BrowserErrorEvent(
error_type='BrowserProcessCrashed',
message=f'Browser process {proc.pid} has crashed',
details={'pid': proc.pid, 'status': proc.status()},
)
)
self.logger.warning('[CrashWatchdog] Browser process dead - stopping health monitoring')
await self._stop_monitoring()
return
except Exception:
pass # psutil not available or process doesn't exist
@staticmethod
def _is_new_tab_page(url: str) -> bool:
"""Check if URL is a new tab page."""
return url in ['about:blank', 'chrome://new-tab-page/', 'chrome://newtab/']
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/browser/watchdogs/popups_watchdog.py | browser_use/browser/watchdogs/popups_watchdog.py | """Watchdog for handling JavaScript dialogs (alert, confirm, prompt) automatically."""
import asyncio
from typing import ClassVar
from bubus import BaseEvent
from pydantic import PrivateAttr
from browser_use.browser.events import TabCreatedEvent
from browser_use.browser.watchdog_base import BaseWatchdog
class PopupsWatchdog(BaseWatchdog):
"""Handles JavaScript dialogs (alert, confirm, prompt) by automatically accepting them immediately."""
# Events this watchdog listens to and emits
LISTENS_TO: ClassVar[list[type[BaseEvent]]] = [TabCreatedEvent]
EMITS: ClassVar[list[type[BaseEvent]]] = []
# Track which targets have dialog handlers registered
_dialog_listeners_registered: set[str] = PrivateAttr(default_factory=set)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.logger.debug(f'🚀 PopupsWatchdog initialized with browser_session={self.browser_session}, ID={id(self)}')
async def on_TabCreatedEvent(self, event: TabCreatedEvent) -> None:
"""Set up JavaScript dialog handling when a new tab is created."""
target_id = event.target_id
self.logger.debug(f'🎯 PopupsWatchdog received TabCreatedEvent for target {target_id}')
# Skip if we've already registered for this target
if target_id in self._dialog_listeners_registered:
self.logger.debug(f'Already registered dialog handlers for target {target_id}')
return
self.logger.debug(f'📌 Starting dialog handler setup for target {target_id}')
try:
# Get all CDP sessions for this target and any child frames
cdp_session = await self.browser_session.get_or_create_cdp_session(
target_id, focus=False
) # don't auto-focus new tabs! sometimes we need to open tabs in background
# CRITICAL: Enable Page domain to receive dialog events
try:
await cdp_session.cdp_client.send.Page.enable(session_id=cdp_session.session_id)
self.logger.debug(f'✅ Enabled Page domain for session {cdp_session.session_id[-8:]}')
except Exception as e:
self.logger.debug(f'Failed to enable Page domain: {e}')
# Also register for the root CDP client to catch dialogs from any frame
if self.browser_session._cdp_client_root:
self.logger.debug('📌 Also registering handler on root CDP client')
try:
# Enable Page domain on root client too
await self.browser_session._cdp_client_root.send.Page.enable()
self.logger.debug('✅ Enabled Page domain on root CDP client')
except Exception as e:
self.logger.debug(f'Failed to enable Page domain on root: {e}')
# Set up async handler for JavaScript dialogs - accept immediately without event dispatch
async def handle_dialog(event_data, session_id: str | None = None):
"""Handle JavaScript dialog events - accept immediately."""
try:
dialog_type = event_data.get('type', 'alert')
message = event_data.get('message', '')
# Store the popup message in browser session for inclusion in browser state
if message:
formatted_message = f'[{dialog_type}] {message}'
self.browser_session._closed_popup_messages.append(formatted_message)
self.logger.debug(f'📝 Stored popup message: {formatted_message[:100]}')
# Choose action based on dialog type:
# - alert: accept=true (click OK to dismiss)
# - confirm: accept=true (click OK to proceed - safer for automation)
# - prompt: accept=false (click Cancel since we can't provide input)
# - beforeunload: accept=true (allow navigation)
should_accept = dialog_type in ('alert', 'confirm', 'beforeunload')
action_str = 'accepting (OK)' if should_accept else 'dismissing (Cancel)'
self.logger.info(f"🔔 JavaScript {dialog_type} dialog: '{message[:100]}' - {action_str}...")
dismissed = False
# Approach 1: Use the session that detected the dialog (most reliable)
if self.browser_session._cdp_client_root and session_id:
try:
self.logger.debug(f'🔄 Approach 1: Using detecting session {session_id[-8:]}')
await asyncio.wait_for(
self.browser_session._cdp_client_root.send.Page.handleJavaScriptDialog(
params={'accept': should_accept},
session_id=session_id,
),
timeout=0.5,
)
dismissed = True
self.logger.info('✅ Dialog handled successfully via detecting session')
except (TimeoutError, Exception) as e:
self.logger.debug(f'Approach 1 failed: {type(e).__name__}')
# Approach 2: Try with current agent focus session
if not dismissed and self.browser_session._cdp_client_root and self.browser_session.agent_focus_target_id:
try:
# Use public API with focus=False to avoid changing focus during popup dismissal
cdp_session = await self.browser_session.get_or_create_cdp_session(
self.browser_session.agent_focus_target_id, focus=False
)
self.logger.debug(f'🔄 Approach 2: Using agent focus session {cdp_session.session_id[-8:]}')
await asyncio.wait_for(
self.browser_session._cdp_client_root.send.Page.handleJavaScriptDialog(
params={'accept': should_accept},
session_id=cdp_session.session_id,
),
timeout=0.5,
)
dismissed = True
self.logger.info('✅ Dialog handled successfully via agent focus session')
except (TimeoutError, Exception) as e:
self.logger.debug(f'Approach 2 failed: {type(e).__name__}')
except Exception as e:
self.logger.error(f'❌ Critical error in dialog handler: {type(e).__name__}: {e}')
# Register handler on the specific session
cdp_session.cdp_client.register.Page.javascriptDialogOpening(handle_dialog) # type: ignore[arg-type]
self.logger.debug(
f'Successfully registered Page.javascriptDialogOpening handler for session {cdp_session.session_id}'
)
# Also register on root CDP client to catch dialogs from any frame
if hasattr(self.browser_session._cdp_client_root, 'register'):
try:
self.browser_session._cdp_client_root.register.Page.javascriptDialogOpening(handle_dialog) # type: ignore[arg-type]
self.logger.debug('Successfully registered dialog handler on root CDP client for all frames')
except Exception as root_error:
self.logger.warning(f'Failed to register on root CDP client: {root_error}')
# Mark this target as having dialog handling set up
self._dialog_listeners_registered.add(target_id)
self.logger.debug(f'Set up JavaScript dialog handling for tab {target_id}')
except Exception as e:
self.logger.warning(f'Failed to set up popup handling for tab {target_id}: {e}')
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/browser/cloud/views.py | browser_use/browser/cloud/views.py | from typing import Literal
from uuid import UUID
from pydantic import BaseModel, ConfigDict, Field
ProxyCountryCode = (
Literal[
'us', # United States
'uk', # United Kingdom
'fr', # France
'it', # Italy
'jp', # Japan
'au', # Australia
'de', # Germany
'fi', # Finland
'ca', # Canada
'in', # India
]
| str
)
# Browser session timeout limits (in minutes)
MAX_FREE_USER_SESSION_TIMEOUT = 15 # Free users limited to 15 minutes
MAX_PAID_USER_SESSION_TIMEOUT = 240 # Paid users can go up to 4 hours
# Requests
class CreateBrowserRequest(BaseModel):
"""Request to create a cloud browser instance.
Args:
cloud_profile_id: The ID of the profile to use for the session
cloud_proxy_country_code: Country code for proxy location
cloud_timeout: The timeout for the session in minutes
"""
model_config = ConfigDict(extra='forbid', populate_by_name=True)
profile_id: UUID | str | None = Field(
default=None,
alias='cloud_profile_id',
description='The ID of the profile to use for the session. Can be a UUID or a string of UUID.',
title='Cloud Profile ID',
)
proxy_country_code: ProxyCountryCode | None = Field(
default=None,
alias='cloud_proxy_country_code',
description='Country code for proxy location.',
title='Cloud Proxy Country Code',
)
timeout: int | None = Field(
ge=1,
le=MAX_PAID_USER_SESSION_TIMEOUT,
default=None,
alias='cloud_timeout',
description=f'The timeout for the session in minutes. Free users are limited to {MAX_FREE_USER_SESSION_TIMEOUT} minutes, paid users can use up to {MAX_PAID_USER_SESSION_TIMEOUT} minutes ({MAX_PAID_USER_SESSION_TIMEOUT // 60} hours).',
title='Cloud Timeout',
)
CloudBrowserParams = CreateBrowserRequest # alias for easier readability
# Responses
class CloudBrowserResponse(BaseModel):
"""Response from cloud browser API."""
id: str
status: str
liveUrl: str = Field(alias='liveUrl')
cdpUrl: str = Field(alias='cdpUrl')
timeoutAt: str = Field(alias='timeoutAt')
startedAt: str = Field(alias='startedAt')
finishedAt: str | None = Field(alias='finishedAt', default=None)
# Errors
class CloudBrowserError(Exception):
"""Exception raised when cloud browser operations fail."""
pass
class CloudBrowserAuthError(CloudBrowserError):
"""Exception raised when cloud browser authentication fails."""
pass
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/browser/cloud/cloud.py | browser_use/browser/cloud/cloud.py | """Cloud browser service integration for browser-use.
This module provides integration with the browser-use cloud browser service.
When cloud_browser=True, it automatically creates a cloud browser instance
and returns the CDP URL for connection.
"""
import logging
import os
import httpx
from browser_use.browser.cloud.views import CloudBrowserAuthError, CloudBrowserError, CloudBrowserResponse, CreateBrowserRequest
from browser_use.sync.auth import CloudAuthConfig
logger = logging.getLogger(__name__)
class CloudBrowserClient:
"""Client for browser-use cloud browser service."""
def __init__(self, api_base_url: str = 'https://api.browser-use.com'):
self.api_base_url = api_base_url
self.client = httpx.AsyncClient(timeout=30.0)
self.current_session_id: str | None = None
async def create_browser(
self, request: CreateBrowserRequest, extra_headers: dict[str, str] | None = None
) -> CloudBrowserResponse:
"""Create a new cloud browser instance. For full docs refer to https://docs.cloud.browser-use.com/api-reference/v-2-api-current/browsers/create-browser-session-browsers-post
Args:
request: CreateBrowserRequest object containing browser creation parameters
Returns:
CloudBrowserResponse: Contains CDP URL and other browser info
"""
url = f'{self.api_base_url}/api/v2/browsers'
# Try to get API key from environment variable first, then auth config
api_token = os.getenv('BROWSER_USE_API_KEY')
if not api_token:
# Fallback to auth config file
try:
auth_config = CloudAuthConfig.load_from_file()
api_token = auth_config.api_token
except Exception:
pass
if not api_token:
raise CloudBrowserAuthError(
'No authentication token found. Please set BROWSER_USE_API_KEY environment variable to authenticate with the cloud service. You can also create an API key at https://cloud.browser-use.com/new-api-key'
)
headers = {'X-Browser-Use-API-Key': api_token, 'Content-Type': 'application/json', **(extra_headers or {})}
# Convert request to dictionary and exclude unset fields
request_body = request.model_dump(exclude_unset=True)
try:
logger.info('🌤️ Creating cloud browser instance...')
response = await self.client.post(url, headers=headers, json=request_body)
if response.status_code == 401:
raise CloudBrowserAuthError(
'Authentication failed. Please make sure you have set BROWSER_USE_API_KEY environment variable to authenticate with the cloud service. You can also create an API key at https://cloud.browser-use.com/new-api-key'
)
elif response.status_code == 403:
raise CloudBrowserAuthError('Access forbidden. Please check your browser-use cloud subscription status.')
elif not response.is_success:
error_msg = f'Failed to create cloud browser: HTTP {response.status_code}'
try:
error_data = response.json()
if 'detail' in error_data:
error_msg += f' - {error_data["detail"]}'
except Exception:
pass
raise CloudBrowserError(error_msg)
browser_data = response.json()
browser_response = CloudBrowserResponse(**browser_data)
# Store session ID for cleanup
self.current_session_id = browser_response.id
logger.info(f'🌤️ Cloud browser created successfully: {browser_response.id}')
logger.debug(f'🌤️ CDP URL: {browser_response.cdpUrl}')
# Cyan color for live URL
logger.info(f'\033[36m🔗 Live URL: {browser_response.liveUrl}\033[0m')
return browser_response
except httpx.TimeoutException:
raise CloudBrowserError('Timeout while creating cloud browser. Please try again.')
except httpx.ConnectError:
raise CloudBrowserError('Failed to connect to cloud browser service. Please check your internet connection.')
except Exception as e:
if isinstance(e, (CloudBrowserError, CloudBrowserAuthError)):
raise
raise CloudBrowserError(f'Unexpected error creating cloud browser: {e}')
async def stop_browser(
self, session_id: str | None = None, extra_headers: dict[str, str] | None = None
) -> CloudBrowserResponse:
"""Stop a cloud browser session.
Args:
session_id: Session ID to stop. If None, uses current session.
Returns:
CloudBrowserResponse: Updated browser info with stopped status
Raises:
CloudBrowserAuthError: If authentication fails
CloudBrowserError: If stopping fails
"""
if session_id is None:
session_id = self.current_session_id
if not session_id:
raise CloudBrowserError('No session ID provided and no current session available')
url = f'{self.api_base_url}/api/v2/browsers/{session_id}'
# Try to get API key from environment variable first, then auth config
api_token = os.getenv('BROWSER_USE_API_KEY')
if not api_token:
# Fallback to auth config file
try:
auth_config = CloudAuthConfig.load_from_file()
api_token = auth_config.api_token
except Exception:
pass
if not api_token:
raise CloudBrowserAuthError(
'No authentication token found. Please set BROWSER_USE_API_KEY environment variable to authenticate with the cloud service. You can also create an API key at https://cloud.browser-use.com/new-api-key'
)
headers = {'X-Browser-Use-API-Key': api_token, 'Content-Type': 'application/json', **(extra_headers or {})}
request_body = {'action': 'stop'}
try:
logger.info(f'🌤️ Stopping cloud browser session: {session_id}')
response = await self.client.patch(url, headers=headers, json=request_body)
if response.status_code == 401:
raise CloudBrowserAuthError(
'Authentication failed. Please make sure you have set the BROWSER_USE_API_KEY environment variable to authenticate with the cloud service.'
)
elif response.status_code == 404:
# Session already stopped or doesn't exist - treating as error and clearing session
logger.debug(f'🌤️ Cloud browser session {session_id} not found (already stopped)')
# Clear current session if it was this one
if session_id == self.current_session_id:
self.current_session_id = None
raise CloudBrowserError(f'Cloud browser session {session_id} not found')
elif not response.is_success:
error_msg = f'Failed to stop cloud browser: HTTP {response.status_code}'
try:
error_data = response.json()
if 'detail' in error_data:
error_msg += f' - {error_data["detail"]}'
except Exception:
pass
raise CloudBrowserError(error_msg)
browser_data = response.json()
browser_response = CloudBrowserResponse(**browser_data)
# Clear current session if it was this one
if session_id == self.current_session_id:
self.current_session_id = None
logger.info(f'🌤️ Cloud browser session stopped: {browser_response.id}')
logger.debug(f'🌤️ Status: {browser_response.status}')
return browser_response
except httpx.TimeoutException:
raise CloudBrowserError('Timeout while stopping cloud browser. Please try again.')
except httpx.ConnectError:
raise CloudBrowserError('Failed to connect to cloud browser service. Please check your internet connection.')
except Exception as e:
if isinstance(e, (CloudBrowserError, CloudBrowserAuthError)):
raise
raise CloudBrowserError(f'Unexpected error stopping cloud browser: {e}')
async def close(self):
"""Close the HTTP client and cleanup any active sessions."""
# Try to stop current session if active
if self.current_session_id:
try:
await self.stop_browser()
except Exception as e:
logger.debug(f'Failed to stop cloud browser session during cleanup: {e}')
await self.client.aclose()
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/agent/prompts.py | browser_use/agent/prompts.py | import importlib.resources
from datetime import datetime
from typing import TYPE_CHECKING, Literal, Optional
from browser_use.dom.views import NodeType, SimplifiedNode
from browser_use.llm.messages import ContentPartImageParam, ContentPartTextParam, ImageURL, SystemMessage, UserMessage
from browser_use.observability import observe_debug
from browser_use.utils import is_new_tab_page, sanitize_surrogates
if TYPE_CHECKING:
from browser_use.agent.views import AgentStepInfo
from browser_use.browser.views import BrowserStateSummary
from browser_use.filesystem.file_system import FileSystem
class SystemPrompt:
def __init__(
self,
max_actions_per_step: int = 3,
override_system_message: str | None = None,
extend_system_message: str | None = None,
use_thinking: bool = True,
flash_mode: bool = False,
is_anthropic: bool = False,
is_browser_use_model: bool = False,
):
self.max_actions_per_step = max_actions_per_step
self.use_thinking = use_thinking
self.flash_mode = flash_mode
self.is_anthropic = is_anthropic
self.is_browser_use_model = is_browser_use_model
prompt = ''
if override_system_message is not None:
prompt = override_system_message
else:
self._load_prompt_template()
prompt = self.prompt_template.format(max_actions=self.max_actions_per_step)
if extend_system_message:
prompt += f'\n{extend_system_message}'
self.system_message = SystemMessage(content=prompt, cache=True)
def _load_prompt_template(self) -> None:
"""Load the prompt template from the markdown file."""
try:
# Choose the appropriate template based on model type and mode
# Browser-use models use simplified prompts optimized for fine-tuned models
if self.is_browser_use_model:
if self.flash_mode:
template_filename = 'system_prompt_browser_use_flash.md'
elif self.use_thinking:
template_filename = 'system_prompt_browser_use.md'
else:
template_filename = 'system_prompt_browser_use_no_thinking.md'
elif self.flash_mode and self.is_anthropic:
template_filename = 'system_prompt_flash_anthropic.md'
elif self.flash_mode:
template_filename = 'system_prompt_flash.md'
elif self.use_thinking:
template_filename = 'system_prompt.md'
else:
template_filename = 'system_prompt_no_thinking.md'
# This works both in development and when installed as a package
with (
importlib.resources.files('browser_use.agent.system_prompts')
.joinpath(template_filename)
.open('r', encoding='utf-8') as f
):
self.prompt_template = f.read()
except Exception as e:
raise RuntimeError(f'Failed to load system prompt template: {e}')
def get_system_message(self) -> SystemMessage:
"""
Get the system prompt for the agent.
Returns:
SystemMessage: Formatted system prompt
"""
return self.system_message
class AgentMessagePrompt:
vision_detail_level: Literal['auto', 'low', 'high']
def __init__(
self,
browser_state_summary: 'BrowserStateSummary',
file_system: 'FileSystem',
agent_history_description: str | None = None,
read_state_description: str | None = None,
task: str | None = None,
include_attributes: list[str] | None = None,
step_info: Optional['AgentStepInfo'] = None,
page_filtered_actions: str | None = None,
max_clickable_elements_length: int = 40000,
sensitive_data: str | None = None,
available_file_paths: list[str] | None = None,
screenshots: list[str] | None = None,
vision_detail_level: Literal['auto', 'low', 'high'] = 'auto',
include_recent_events: bool = False,
sample_images: list[ContentPartTextParam | ContentPartImageParam] | None = None,
read_state_images: list[dict] | None = None,
llm_screenshot_size: tuple[int, int] | None = None,
unavailable_skills_info: str | None = None,
):
self.browser_state: 'BrowserStateSummary' = browser_state_summary
self.file_system: 'FileSystem | None' = file_system
self.agent_history_description: str | None = agent_history_description
self.read_state_description: str | None = read_state_description
self.task: str | None = task
self.include_attributes = include_attributes
self.step_info = step_info
self.page_filtered_actions: str | None = page_filtered_actions
self.max_clickable_elements_length: int = max_clickable_elements_length
self.sensitive_data: str | None = sensitive_data
self.available_file_paths: list[str] | None = available_file_paths
self.screenshots = screenshots or []
self.vision_detail_level = vision_detail_level
self.include_recent_events = include_recent_events
self.sample_images = sample_images or []
self.read_state_images = read_state_images or []
self.unavailable_skills_info: str | None = unavailable_skills_info
self.llm_screenshot_size = llm_screenshot_size
assert self.browser_state
def _extract_page_statistics(self) -> dict[str, int]:
"""Extract high-level page statistics from DOM tree for LLM context"""
stats = {
'links': 0,
'iframes': 0,
'shadow_open': 0,
'shadow_closed': 0,
'scroll_containers': 0,
'images': 0,
'interactive_elements': 0,
'total_elements': 0,
}
if not self.browser_state.dom_state or not self.browser_state.dom_state._root:
return stats
def traverse_node(node: SimplifiedNode) -> None:
"""Recursively traverse simplified DOM tree to count elements"""
if not node or not node.original_node:
return
original = node.original_node
stats['total_elements'] += 1
# Count by node type and tag
if original.node_type == NodeType.ELEMENT_NODE:
tag = original.tag_name.lower() if original.tag_name else ''
if tag == 'a':
stats['links'] += 1
elif tag in ('iframe', 'frame'):
stats['iframes'] += 1
elif tag == 'img':
stats['images'] += 1
# Check if scrollable
if original.is_actually_scrollable:
stats['scroll_containers'] += 1
# Check if interactive
if node.is_interactive:
stats['interactive_elements'] += 1
# Check if this element hosts shadow DOM
if node.is_shadow_host:
# Check if any shadow children are closed
has_closed_shadow = any(
child.original_node.node_type == NodeType.DOCUMENT_FRAGMENT_NODE
and child.original_node.shadow_root_type
and child.original_node.shadow_root_type.lower() == 'closed'
for child in node.children
)
if has_closed_shadow:
stats['shadow_closed'] += 1
else:
stats['shadow_open'] += 1
elif original.node_type == NodeType.DOCUMENT_FRAGMENT_NODE:
# Shadow DOM fragment - these are the actual shadow roots
# But don't double-count since we count them at the host level above
pass
# Traverse children
for child in node.children:
traverse_node(child)
traverse_node(self.browser_state.dom_state._root)
return stats
@observe_debug(ignore_input=True, ignore_output=True, name='_get_browser_state_description')
def _get_browser_state_description(self) -> str:
# Extract page statistics first
page_stats = self._extract_page_statistics()
# Format statistics for LLM
stats_text = '<page_stats>'
if page_stats['total_elements'] < 10:
stats_text += 'Page appears empty (SPA not loaded?) - '
stats_text += f'{page_stats["links"]} links, {page_stats["interactive_elements"]} interactive, '
stats_text += f'{page_stats["iframes"]} iframes, {page_stats["scroll_containers"]} scroll containers'
if page_stats['shadow_open'] > 0 or page_stats['shadow_closed'] > 0:
stats_text += f', {page_stats["shadow_open"]} shadow(open), {page_stats["shadow_closed"]} shadow(closed)'
if page_stats['images'] > 0:
stats_text += f', {page_stats["images"]} images'
stats_text += f', {page_stats["total_elements"]} total elements'
stats_text += '</page_stats>\n'
elements_text = self.browser_state.dom_state.llm_representation(include_attributes=self.include_attributes)
if len(elements_text) > self.max_clickable_elements_length:
elements_text = elements_text[: self.max_clickable_elements_length]
truncated_text = f' (truncated to {self.max_clickable_elements_length} characters)'
else:
truncated_text = ''
has_content_above = False
has_content_below = False
# Enhanced page information for the model
page_info_text = ''
if self.browser_state.page_info:
pi = self.browser_state.page_info
# Compute page statistics dynamically
pages_above = pi.pixels_above / pi.viewport_height if pi.viewport_height > 0 else 0
pages_below = pi.pixels_below / pi.viewport_height if pi.viewport_height > 0 else 0
has_content_above = pages_above > 0
has_content_below = pages_below > 0
total_pages = pi.page_height / pi.viewport_height if pi.viewport_height > 0 else 0
current_page_position = pi.scroll_y / max(pi.page_height - pi.viewport_height, 1)
page_info_text = '<page_info>'
page_info_text += f'{pages_above:.1f} pages above, '
page_info_text += f'{pages_below:.1f} pages below, '
page_info_text += f'{total_pages:.1f} total pages'
page_info_text += '</page_info>\n'
# , at {current_page_position:.0%} of page
if elements_text != '':
if has_content_above:
if self.browser_state.page_info:
pi = self.browser_state.page_info
pages_above = pi.pixels_above / pi.viewport_height if pi.viewport_height > 0 else 0
elements_text = f'... {pages_above:.1f} pages above ...\n{elements_text}'
else:
elements_text = f'[Start of page]\n{elements_text}'
if not has_content_below:
elements_text = f'{elements_text}\n[End of page]'
else:
elements_text = 'empty page'
tabs_text = ''
current_tab_candidates = []
# Find tabs that match both URL and title to identify current tab more reliably
for tab in self.browser_state.tabs:
if tab.url == self.browser_state.url and tab.title == self.browser_state.title:
current_tab_candidates.append(tab.target_id)
# If we have exactly one match, mark it as current
# Otherwise, don't mark any tab as current to avoid confusion
current_target_id = current_tab_candidates[0] if len(current_tab_candidates) == 1 else None
for tab in self.browser_state.tabs:
tabs_text += f'Tab {tab.target_id[-4:]}: {tab.url} - {tab.title[:30]}\n'
current_tab_text = f'Current tab: {current_target_id[-4:]}' if current_target_id is not None else ''
# Check if current page is a PDF viewer and add appropriate message
pdf_message = ''
if self.browser_state.is_pdf_viewer:
pdf_message = (
'PDF viewer cannot be rendered. In this page, DO NOT use the extract action as PDF content cannot be rendered. '
)
pdf_message += (
'Use the read_file action on the downloaded PDF in available_file_paths to read the full text content.\n\n'
)
# Add recent events if available and requested
recent_events_text = ''
if self.include_recent_events and self.browser_state.recent_events:
recent_events_text = f'Recent browser events: {self.browser_state.recent_events}\n'
# Add closed popup messages if any
closed_popups_text = ''
if self.browser_state.closed_popup_messages:
closed_popups_text = 'Auto-closed JavaScript dialogs:\n'
for popup_msg in self.browser_state.closed_popup_messages:
closed_popups_text += f' - {popup_msg}\n'
closed_popups_text += '\n'
browser_state = f"""{stats_text}{current_tab_text}
Available tabs:
{tabs_text}
{page_info_text}
{recent_events_text}{closed_popups_text}{pdf_message}Interactive elements{truncated_text}:
{elements_text}
"""
return browser_state
def _get_agent_state_description(self) -> str:
if self.step_info:
step_info_description = f'Step{self.step_info.step_number + 1} maximum:{self.step_info.max_steps}\n'
else:
step_info_description = ''
time_str = datetime.now().strftime('%Y-%m-%d')
step_info_description += f'Today:{time_str}'
_todo_contents = self.file_system.get_todo_contents() if self.file_system else ''
if not len(_todo_contents):
_todo_contents = '[empty todo.md, fill it when applicable]'
agent_state = f"""
<user_request>
{self.task}
</user_request>
<file_system>
{self.file_system.describe() if self.file_system else 'No file system available'}
</file_system>
<todo_contents>
{_todo_contents}
</todo_contents>
"""
if self.sensitive_data:
agent_state += f'<sensitive_data>{self.sensitive_data}</sensitive_data>\n'
agent_state += f'<step_info>{step_info_description}</step_info>\n'
if self.available_file_paths:
available_file_paths_text = '\n'.join(self.available_file_paths)
agent_state += f'<available_file_paths>{available_file_paths_text}\nUse with absolute paths</available_file_paths>\n'
return agent_state
def _resize_screenshot(self, screenshot_b64: str) -> str:
"""Resize screenshot to llm_screenshot_size if configured."""
if not self.llm_screenshot_size:
return screenshot_b64
try:
import base64
import logging
from io import BytesIO
from PIL import Image
img = Image.open(BytesIO(base64.b64decode(screenshot_b64)))
if img.size == self.llm_screenshot_size:
return screenshot_b64
logging.getLogger(__name__).info(
f'🔄 Resizing screenshot from {img.size[0]}x{img.size[1]} to {self.llm_screenshot_size[0]}x{self.llm_screenshot_size[1]} for LLM'
)
img_resized = img.resize(self.llm_screenshot_size, Image.Resampling.LANCZOS)
buffer = BytesIO()
img_resized.save(buffer, format='PNG')
return base64.b64encode(buffer.getvalue()).decode('utf-8')
except Exception as e:
logging.getLogger(__name__).warning(f'Failed to resize screenshot: {e}, using original')
return screenshot_b64
@observe_debug(ignore_input=True, ignore_output=True, name='get_user_message')
def get_user_message(self, use_vision: bool = True) -> UserMessage:
"""Get complete state as a single cached message"""
# Don't pass screenshot to model if page is a new tab page, step is 0, and there's only one tab
if (
is_new_tab_page(self.browser_state.url)
and self.step_info is not None
and self.step_info.step_number == 0
and len(self.browser_state.tabs) == 1
):
use_vision = False
# Build complete state description
state_description = (
'<agent_history>\n'
+ (self.agent_history_description.strip('\n') if self.agent_history_description else '')
+ '\n</agent_history>\n\n'
)
state_description += '<agent_state>\n' + self._get_agent_state_description().strip('\n') + '\n</agent_state>\n'
state_description += '<browser_state>\n' + self._get_browser_state_description().strip('\n') + '\n</browser_state>\n'
# Only add read_state if it has content
read_state_description = self.read_state_description.strip('\n').strip() if self.read_state_description else ''
if read_state_description:
state_description += '<read_state>\n' + read_state_description + '\n</read_state>\n'
if self.page_filtered_actions:
state_description += '<page_specific_actions>\n'
state_description += self.page_filtered_actions + '\n'
state_description += '</page_specific_actions>\n'
# Add unavailable skills information if any
if self.unavailable_skills_info:
state_description += '\n' + self.unavailable_skills_info + '\n'
# Sanitize surrogates from all text content
state_description = sanitize_surrogates(state_description)
# Check if we have images to include (from read_file action)
has_images = bool(self.read_state_images)
if (use_vision is True and self.screenshots) or has_images:
# Start with text description
content_parts: list[ContentPartTextParam | ContentPartImageParam] = [ContentPartTextParam(text=state_description)]
# Add sample images
content_parts.extend(self.sample_images)
# Add screenshots with labels
for i, screenshot in enumerate(self.screenshots):
if i == len(self.screenshots) - 1:
label = 'Current screenshot:'
else:
# Use simple, accurate labeling since we don't have actual step timing info
label = 'Previous screenshot:'
# Add label as text content
content_parts.append(ContentPartTextParam(text=label))
# Resize screenshot if llm_screenshot_size is configured
processed_screenshot = self._resize_screenshot(screenshot)
# Add the screenshot
content_parts.append(
ContentPartImageParam(
image_url=ImageURL(
url=f'data:image/png;base64,{processed_screenshot}',
media_type='image/png',
detail=self.vision_detail_level,
),
)
)
# Add read_state images (from read_file action) before screenshots
for img_data in self.read_state_images:
img_name = img_data.get('name', 'unknown')
img_base64 = img_data.get('data', '')
if not img_base64:
continue
# Detect image format from name
if img_name.lower().endswith('.png'):
media_type = 'image/png'
else:
media_type = 'image/jpeg'
# Add label
content_parts.append(ContentPartTextParam(text=f'Image from file: {img_name}'))
# Add the image
content_parts.append(
ContentPartImageParam(
image_url=ImageURL(
url=f'data:{media_type};base64,{img_base64}',
media_type=media_type,
detail=self.vision_detail_level,
),
)
)
return UserMessage(content=content_parts, cache=True)
return UserMessage(content=state_description, cache=True)
def get_rerun_summary_prompt(original_task: str, total_steps: int, success_count: int, error_count: int) -> str:
return f'''You are analyzing the completion of a rerun task. Based on the screenshot and execution info, provide a summary.
Original task: {original_task}
Execution statistics:
- Total steps: {total_steps}
- Successful steps: {success_count}
- Failed steps: {error_count}
Analyze the screenshot to determine:
1. Whether the task completed successfully
2. What the final state shows
3. Overall completion status (complete/partial/failed)
Respond with:
- summary: A clear, concise summary of what happened during the rerun
- success: Whether the task completed successfully (true/false)
- completion_status: One of "complete", "partial", or "failed"'''
def get_rerun_summary_message(prompt: str, screenshot_b64: str | None = None) -> UserMessage:
"""
Build a UserMessage for rerun summary generation.
Args:
prompt: The prompt text
screenshot_b64: Optional base64-encoded screenshot
Returns:
UserMessage with prompt and optional screenshot
"""
if screenshot_b64:
# With screenshot: use multi-part content
content_parts: list[ContentPartTextParam | ContentPartImageParam] = [
ContentPartTextParam(type='text', text=prompt),
ContentPartImageParam(
type='image_url',
image_url=ImageURL(url=f'data:image/png;base64,{screenshot_b64}'),
),
]
return UserMessage(content=content_parts)
else:
# Without screenshot: use simple string content
return UserMessage(content=prompt)
def get_ai_step_system_prompt() -> str:
"""
Get system prompt for AI step action used during rerun.
Returns:
System prompt string for AI step
"""
return """
You are an expert at extracting data from webpages.
<input>
You will be given:
1. A query describing what to extract
2. The markdown of the webpage (filtered to remove noise)
3. Optionally, a screenshot of the current page state
</input>
<instructions>
- Extract information from the webpage that is relevant to the query
- ONLY use the information available in the webpage - do not make up information
- If the information is not available, mention that clearly
- If the query asks for all items, list all of them
</instructions>
<output>
- Present ALL relevant information in a concise way
- Do not use conversational format - directly output the relevant information
- If information is unavailable, state that clearly
</output>
""".strip()
def get_ai_step_user_prompt(query: str, stats_summary: str, content: str) -> str:
"""
Build user prompt for AI step action.
Args:
query: What to extract or analyze
stats_summary: Content statistics summary
content: Page markdown content
Returns:
Formatted prompt string
"""
return f'<query>\n{query}\n</query>\n\n<content_stats>\n{stats_summary}\n</content_stats>\n\n<webpage_content>\n{content}\n</webpage_content>'
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/agent/views.py | browser_use/agent/views.py | from __future__ import annotations
import json
import logging
import traceback
from dataclasses import dataclass
from pathlib import Path
from typing import Any, Generic, Literal
from pydantic import BaseModel, ConfigDict, Field, ValidationError, create_model, model_validator
from typing_extensions import TypeVar
from uuid_extensions import uuid7str
from browser_use.agent.message_manager.views import MessageManagerState
from browser_use.browser.views import BrowserStateHistory
from browser_use.dom.views import DEFAULT_INCLUDE_ATTRIBUTES, DOMInteractedElement, DOMSelectorMap
# from browser_use.dom.history_tree_processor.service import (
# DOMElementNode,
# DOMHistoryElement,
# HistoryTreeProcessor,
# )
# from browser_use.dom.views import SelectorMap
from browser_use.filesystem.file_system import FileSystemState
from browser_use.llm.base import BaseChatModel
from browser_use.tokens.views import UsageSummary
from browser_use.tools.registry.views import ActionModel
logger = logging.getLogger(__name__)
class AgentSettings(BaseModel):
"""Configuration options for the Agent"""
use_vision: bool | Literal['auto'] = True
vision_detail_level: Literal['auto', 'low', 'high'] = 'auto'
save_conversation_path: str | Path | None = None
save_conversation_path_encoding: str | None = 'utf-8'
max_failures: int = 3
generate_gif: bool | str = False
override_system_message: str | None = None
extend_system_message: str | None = None
include_attributes: list[str] | None = DEFAULT_INCLUDE_ATTRIBUTES
max_actions_per_step: int = 3
use_thinking: bool = True
flash_mode: bool = False # If enabled, disables evaluation_previous_goal and next_goal, and sets use_thinking = False
use_judge: bool = True
ground_truth: str | None = None # Ground truth answer or criteria for judge validation
max_history_items: int | None = None
page_extraction_llm: BaseChatModel | None = None
calculate_cost: bool = False
include_tool_call_examples: bool = False
llm_timeout: int = 60 # Timeout in seconds for LLM calls (auto-detected: 30s for gemini, 90s for o3, 60s default)
step_timeout: int = 180 # Timeout in seconds for each step
final_response_after_failure: bool = True # If True, attempt one final recovery call after max_failures
class AgentState(BaseModel):
"""Holds all state information for an Agent"""
model_config = ConfigDict(arbitrary_types_allowed=True)
agent_id: str = Field(default_factory=uuid7str)
n_steps: int = 1
consecutive_failures: int = 0
last_result: list[ActionResult] | None = None
last_plan: str | None = None
last_model_output: AgentOutput | None = None
# Pause/resume state (kept serialisable for checkpointing)
paused: bool = False
stopped: bool = False
session_initialized: bool = False # Track if session events have been dispatched
follow_up_task: bool = False # Track if the agent is a follow-up task
message_manager_state: MessageManagerState = Field(default_factory=MessageManagerState)
file_system_state: FileSystemState | None = None
@dataclass
class AgentStepInfo:
step_number: int
max_steps: int
def is_last_step(self) -> bool:
"""Check if this is the last step"""
return self.step_number >= self.max_steps - 1
class JudgementResult(BaseModel):
"""LLM judgement of agent trace"""
reasoning: str | None = Field(default=None, description='Explanation of the judgement')
verdict: bool = Field(description='Whether the trace was successful or not')
failure_reason: str | None = Field(
default=None,
description='Max 5 sentences explanation of why the task was not completed successfully in case of failure. If verdict is true, use an empty string.',
)
impossible_task: bool = Field(
default=False,
description='True if the task was impossible to complete due to vague instructions, broken website, inaccessible links, missing login credentials, or other insurmountable obstacles',
)
reached_captcha: bool = Field(
default=False,
description='True if the agent encountered captcha challenges during task execution',
)
class ActionResult(BaseModel):
"""Result of executing an action"""
# For done action
is_done: bool | None = False
success: bool | None = None
# For trace judgement
judgement: JudgementResult | None = None
# Error handling - always include in long term memory
error: str | None = None
# Files
attachments: list[str] | None = None # Files to display in the done message
# Images (base64 encoded) - separate from text content for efficient handling
images: list[dict[str, Any]] | None = None # [{"name": "file.jpg", "data": "base64_string"}]
# Always include in long term memory
long_term_memory: str | None = None # Memory of this action
# if update_only_read_state is True we add the extracted_content to the agent context only once for the next step
# if update_only_read_state is False we add the extracted_content to the agent long term memory if no long_term_memory is provided
extracted_content: str | None = None
include_extracted_content_only_once: bool = False # Whether the extracted content should be used to update the read_state
# Metadata for observability (e.g., click coordinates)
metadata: dict | None = None
# Deprecated
include_in_memory: bool = False # whether to include in extracted_content inside long_term_memory
@model_validator(mode='after')
def validate_success_requires_done(self):
"""Ensure success=True can only be set when is_done=True"""
if self.success is True and self.is_done is not True:
raise ValueError(
'success=True can only be set when is_done=True. '
'For regular actions that succeed, leave success as None. '
'Use success=False only for actions that fail.'
)
return self
class RerunSummaryAction(BaseModel):
"""AI-generated summary for rerun completion"""
summary: str = Field(description='Summary of what happened during the rerun')
success: bool = Field(description='Whether the rerun completed successfully based on visual inspection')
completion_status: Literal['complete', 'partial', 'failed'] = Field(
description='Status of rerun completion: complete (all steps succeeded), partial (some steps succeeded), failed (task did not complete)'
)
class StepMetadata(BaseModel):
"""Metadata for a single step including timing and token information"""
step_start_time: float
step_end_time: float
step_number: int
step_interval: float | None = None
@property
def duration_seconds(self) -> float:
"""Calculate step duration in seconds"""
return self.step_end_time - self.step_start_time
class AgentBrain(BaseModel):
thinking: str | None = None
evaluation_previous_goal: str
memory: str
next_goal: str
class AgentOutput(BaseModel):
model_config = ConfigDict(arbitrary_types_allowed=True, extra='forbid')
thinking: str | None = None
evaluation_previous_goal: str | None = None
memory: str | None = None
next_goal: str | None = None
action: list[ActionModel] = Field(
...,
json_schema_extra={'min_items': 1}, # Ensure at least one action is provided
)
@classmethod
def model_json_schema(cls, **kwargs):
schema = super().model_json_schema(**kwargs)
schema['required'] = ['evaluation_previous_goal', 'memory', 'next_goal', 'action']
return schema
@property
def current_state(self) -> AgentBrain:
"""For backward compatibility - returns an AgentBrain with the flattened properties"""
return AgentBrain(
thinking=self.thinking,
evaluation_previous_goal=self.evaluation_previous_goal if self.evaluation_previous_goal else '',
memory=self.memory if self.memory else '',
next_goal=self.next_goal if self.next_goal else '',
)
@staticmethod
def type_with_custom_actions(custom_actions: type[ActionModel]) -> type[AgentOutput]:
"""Extend actions with custom actions"""
model_ = create_model(
'AgentOutput',
__base__=AgentOutput,
action=(
list[custom_actions], # type: ignore
Field(..., description='List of actions to execute', json_schema_extra={'min_items': 1}),
),
__module__=AgentOutput.__module__,
)
return model_
@staticmethod
def type_with_custom_actions_no_thinking(custom_actions: type[ActionModel]) -> type[AgentOutput]:
"""Extend actions with custom actions and exclude thinking field"""
class AgentOutputNoThinking(AgentOutput):
@classmethod
def model_json_schema(cls, **kwargs):
schema = super().model_json_schema(**kwargs)
del schema['properties']['thinking']
schema['required'] = ['evaluation_previous_goal', 'memory', 'next_goal', 'action']
return schema
model = create_model(
'AgentOutput',
__base__=AgentOutputNoThinking,
action=(
list[custom_actions], # type: ignore
Field(..., json_schema_extra={'min_items': 1}),
),
__module__=AgentOutputNoThinking.__module__,
)
return model
@staticmethod
def type_with_custom_actions_flash_mode(custom_actions: type[ActionModel]) -> type[AgentOutput]:
"""Extend actions with custom actions for flash mode - memory and action fields only"""
class AgentOutputFlashMode(AgentOutput):
@classmethod
def model_json_schema(cls, **kwargs):
schema = super().model_json_schema(**kwargs)
# Remove thinking, evaluation_previous_goal, and next_goal fields
del schema['properties']['thinking']
del schema['properties']['evaluation_previous_goal']
del schema['properties']['next_goal']
# Update required fields to only include remaining properties
schema['required'] = ['memory', 'action']
return schema
model = create_model(
'AgentOutput',
__base__=AgentOutputFlashMode,
action=(
list[custom_actions], # type: ignore
Field(..., json_schema_extra={'min_items': 1}),
),
__module__=AgentOutputFlashMode.__module__,
)
return model
class AgentHistory(BaseModel):
"""History item for agent actions"""
model_output: AgentOutput | None
result: list[ActionResult]
state: BrowserStateHistory
metadata: StepMetadata | None = None
state_message: str | None = None
model_config = ConfigDict(arbitrary_types_allowed=True, protected_namespaces=())
@staticmethod
def get_interacted_element(model_output: AgentOutput, selector_map: DOMSelectorMap) -> list[DOMInteractedElement | None]:
elements = []
for action in model_output.action:
index = action.get_index()
if index is not None and index in selector_map:
el = selector_map[index]
elements.append(DOMInteractedElement.load_from_enhanced_dom_tree(el))
else:
elements.append(None)
return elements
def _filter_sensitive_data_from_string(self, value: str, sensitive_data: dict[str, str | dict[str, str]] | None) -> str:
"""Filter out sensitive data from a string value"""
if not sensitive_data:
return value
# Collect all sensitive values, immediately converting old format to new format
sensitive_values: dict[str, str] = {}
# Process all sensitive data entries
for key_or_domain, content in sensitive_data.items():
if isinstance(content, dict):
# Already in new format: {domain: {key: value}}
for key, val in content.items():
if val: # Skip empty values
sensitive_values[key] = val
elif content: # Old format: {key: value} - convert to new format internally
# We treat this as if it was {'http*://*': {key_or_domain: content}}
sensitive_values[key_or_domain] = content
# If there are no valid sensitive data entries, just return the original value
if not sensitive_values:
return value
# Replace all valid sensitive data values with their placeholder tags
for key, val in sensitive_values.items():
value = value.replace(val, f'<secret>{key}</secret>')
return value
def _filter_sensitive_data_from_dict(
self, data: dict[str, Any], sensitive_data: dict[str, str | dict[str, str]] | None
) -> dict[str, Any]:
"""Recursively filter sensitive data from a dictionary"""
if not sensitive_data:
return data
filtered_data = {}
for key, value in data.items():
if isinstance(value, str):
filtered_data[key] = self._filter_sensitive_data_from_string(value, sensitive_data)
elif isinstance(value, dict):
filtered_data[key] = self._filter_sensitive_data_from_dict(value, sensitive_data)
elif isinstance(value, list):
filtered_data[key] = [
self._filter_sensitive_data_from_string(item, sensitive_data)
if isinstance(item, str)
else self._filter_sensitive_data_from_dict(item, sensitive_data)
if isinstance(item, dict)
else item
for item in value
]
else:
filtered_data[key] = value
return filtered_data
def model_dump(self, sensitive_data: dict[str, str | dict[str, str]] | None = None, **kwargs) -> dict[str, Any]:
"""Custom serialization handling circular references and filtering sensitive data"""
# Handle action serialization
model_output_dump = None
if self.model_output:
action_dump = [action.model_dump(exclude_none=True, mode='json') for action in self.model_output.action]
# Filter sensitive data only from input action parameters if sensitive_data is provided
if sensitive_data:
action_dump = [
self._filter_sensitive_data_from_dict(action, sensitive_data) if 'input' in action else action
for action in action_dump
]
model_output_dump = {
'evaluation_previous_goal': self.model_output.evaluation_previous_goal,
'memory': self.model_output.memory,
'next_goal': self.model_output.next_goal,
'action': action_dump, # This preserves the actual action data
}
# Only include thinking if it's present
if self.model_output.thinking is not None:
model_output_dump['thinking'] = self.model_output.thinking
# Handle result serialization - don't filter ActionResult data
# as it should contain meaningful information for the agent
result_dump = [r.model_dump(exclude_none=True, mode='json') for r in self.result]
return {
'model_output': model_output_dump,
'result': result_dump,
'state': self.state.to_dict(),
'metadata': self.metadata.model_dump() if self.metadata else None,
'state_message': self.state_message,
}
AgentStructuredOutput = TypeVar('AgentStructuredOutput', bound=BaseModel)
class AgentHistoryList(BaseModel, Generic[AgentStructuredOutput]):
"""List of AgentHistory messages, i.e. the history of the agent's actions and thoughts."""
history: list[AgentHistory]
usage: UsageSummary | None = None
_output_model_schema: type[AgentStructuredOutput] | None = None
def total_duration_seconds(self) -> float:
"""Get total duration of all steps in seconds"""
total = 0.0
for h in self.history:
if h.metadata:
total += h.metadata.duration_seconds
return total
def __len__(self) -> int:
"""Return the number of history items"""
return len(self.history)
def __str__(self) -> str:
"""Representation of the AgentHistoryList object"""
return f'AgentHistoryList(all_results={self.action_results()}, all_model_outputs={self.model_actions()})'
def add_item(self, history_item: AgentHistory) -> None:
"""Add a history item to the list"""
self.history.append(history_item)
def __repr__(self) -> str:
"""Representation of the AgentHistoryList object"""
return self.__str__()
def save_to_file(self, filepath: str | Path, sensitive_data: dict[str, str | dict[str, str]] | None = None) -> None:
"""Save history to JSON file with proper serialization and optional sensitive data filtering"""
try:
Path(filepath).parent.mkdir(parents=True, exist_ok=True)
data = self.model_dump(sensitive_data=sensitive_data)
with open(filepath, 'w', encoding='utf-8') as f:
json.dump(data, f, indent=2)
except Exception as e:
raise e
# def save_as_playwright_script(
# self,
# output_path: str | Path,
# sensitive_data_keys: list[str] | None = None,
# browser_config: BrowserConfig | None = None,
# context_config: BrowserContextConfig | None = None,
# ) -> None:
# """
# Generates a Playwright script based on the agent's history and saves it to a file.
# Args:
# output_path: The path where the generated Python script will be saved.
# sensitive_data_keys: A list of keys used as placeholders for sensitive data
# (e.g., ['username_placeholder', 'password_placeholder']).
# These will be loaded from environment variables in the
# generated script.
# browser_config: Configuration of the original Browser instance.
# context_config: Configuration of the original BrowserContext instance.
# """
# from browser_use.agent.playwright_script_generator import PlaywrightScriptGenerator
# try:
# serialized_history = self.model_dump()['history']
# generator = PlaywrightScriptGenerator(serialized_history, sensitive_data_keys, browser_config, context_config)
# script_content = generator.generate_script_content()
# path_obj = Path(output_path)
# path_obj.parent.mkdir(parents=True, exist_ok=True)
# with open(path_obj, 'w', encoding='utf-8') as f:
# f.write(script_content)
# except Exception as e:
# raise e
def model_dump(self, **kwargs) -> dict[str, Any]:
"""Custom serialization that properly uses AgentHistory's model_dump"""
return {
'history': [h.model_dump(**kwargs) for h in self.history],
}
@classmethod
def load_from_dict(cls, data: dict[str, Any], output_model: type[AgentOutput]) -> AgentHistoryList:
# loop through history and validate output_model actions to enrich with custom actions
for h in data['history']:
if h['model_output']:
if isinstance(h['model_output'], dict):
h['model_output'] = output_model.model_validate(h['model_output'])
else:
h['model_output'] = None
if 'interacted_element' not in h['state']:
h['state']['interacted_element'] = None
history = cls.model_validate(data)
return history
@classmethod
def load_from_file(cls, filepath: str | Path, output_model: type[AgentOutput]) -> AgentHistoryList:
"""Load history from JSON file"""
with open(filepath, encoding='utf-8') as f:
data = json.load(f)
return cls.load_from_dict(data, output_model)
def last_action(self) -> None | dict:
"""Last action in history"""
if self.history and self.history[-1].model_output:
return self.history[-1].model_output.action[-1].model_dump(exclude_none=True, mode='json')
return None
def errors(self) -> list[str | None]:
"""Get all errors from history, with None for steps without errors"""
errors = []
for h in self.history:
step_errors = [r.error for r in h.result if r.error]
# each step can have only one error
errors.append(step_errors[0] if step_errors else None)
return errors
def final_result(self) -> None | str:
"""Final result from history"""
if self.history and self.history[-1].result[-1].extracted_content:
return self.history[-1].result[-1].extracted_content
return None
def is_done(self) -> bool:
"""Check if the agent is done"""
if self.history and len(self.history[-1].result) > 0:
last_result = self.history[-1].result[-1]
return last_result.is_done is True
return False
def is_successful(self) -> bool | None:
"""Check if the agent completed successfully - the agent decides in the last step if it was successful or not. None if not done yet."""
if self.history and len(self.history[-1].result) > 0:
last_result = self.history[-1].result[-1]
if last_result.is_done is True:
return last_result.success
return None
def has_errors(self) -> bool:
"""Check if the agent has any non-None errors"""
return any(error is not None for error in self.errors())
def judgement(self) -> dict | None:
"""Get the judgement result as a dictionary if it exists"""
if self.history and len(self.history[-1].result) > 0:
last_result = self.history[-1].result[-1]
if last_result.judgement:
return last_result.judgement.model_dump()
return None
def is_judged(self) -> bool:
"""Check if the agent trace has been judged"""
if self.history and len(self.history[-1].result) > 0:
last_result = self.history[-1].result[-1]
return last_result.judgement is not None
return False
def is_validated(self) -> bool | None:
"""Check if the judge validated the agent execution (verdict is True). Returns None if not judged yet."""
if self.history and len(self.history[-1].result) > 0:
last_result = self.history[-1].result[-1]
if last_result.judgement:
return last_result.judgement.verdict
return None
def urls(self) -> list[str | None]:
"""Get all unique URLs from history"""
return [h.state.url if h.state.url is not None else None for h in self.history]
def screenshot_paths(self, n_last: int | None = None, return_none_if_not_screenshot: bool = True) -> list[str | None]:
"""Get all screenshot paths from history"""
if n_last == 0:
return []
if n_last is None:
if return_none_if_not_screenshot:
return [h.state.screenshot_path if h.state.screenshot_path is not None else None for h in self.history]
else:
return [h.state.screenshot_path for h in self.history if h.state.screenshot_path is not None]
else:
if return_none_if_not_screenshot:
return [h.state.screenshot_path if h.state.screenshot_path is not None else None for h in self.history[-n_last:]]
else:
return [h.state.screenshot_path for h in self.history[-n_last:] if h.state.screenshot_path is not None]
def screenshots(self, n_last: int | None = None, return_none_if_not_screenshot: bool = True) -> list[str | None]:
"""Get all screenshots from history as base64 strings"""
if n_last == 0:
return []
history_items = self.history if n_last is None else self.history[-n_last:]
screenshots = []
for item in history_items:
screenshot_b64 = item.state.get_screenshot()
if screenshot_b64:
screenshots.append(screenshot_b64)
else:
if return_none_if_not_screenshot:
screenshots.append(None)
# If return_none_if_not_screenshot is False, we skip None values
return screenshots
def action_names(self) -> list[str]:
"""Get all action names from history"""
action_names = []
for action in self.model_actions():
actions = list(action.keys())
if actions:
action_names.append(actions[0])
return action_names
def model_thoughts(self) -> list[AgentBrain]:
"""Get all thoughts from history"""
return [h.model_output.current_state for h in self.history if h.model_output]
def model_outputs(self) -> list[AgentOutput]:
"""Get all model outputs from history"""
return [h.model_output for h in self.history if h.model_output]
# get all actions with params
def model_actions(self) -> list[dict]:
"""Get all actions from history"""
outputs = []
for h in self.history:
if h.model_output:
# Guard against None interacted_element before zipping
interacted_elements = h.state.interacted_element or [None] * len(h.model_output.action)
for action, interacted_element in zip(h.model_output.action, interacted_elements):
output = action.model_dump(exclude_none=True, mode='json')
output['interacted_element'] = interacted_element
outputs.append(output)
return outputs
def action_history(self) -> list[list[dict]]:
"""Get truncated action history with only essential fields"""
step_outputs = []
for h in self.history:
step_actions = []
if h.model_output:
# Guard against None interacted_element before zipping
interacted_elements = h.state.interacted_element or [None] * len(h.model_output.action)
# Zip actions with interacted elements and results
for action, interacted_element, result in zip(h.model_output.action, interacted_elements, h.result):
action_output = action.model_dump(exclude_none=True, mode='json')
action_output['interacted_element'] = interacted_element
# Only keep long_term_memory from result
action_output['result'] = result.long_term_memory if result and result.long_term_memory else None
step_actions.append(action_output)
step_outputs.append(step_actions)
return step_outputs
def action_results(self) -> list[ActionResult]:
"""Get all results from history"""
results = []
for h in self.history:
results.extend([r for r in h.result if r])
return results
def extracted_content(self) -> list[str]:
"""Get all extracted content from history"""
content = []
for h in self.history:
content.extend([r.extracted_content for r in h.result if r.extracted_content])
return content
def model_actions_filtered(self, include: list[str] | None = None) -> list[dict]:
"""Get all model actions from history as JSON"""
if include is None:
include = []
outputs = self.model_actions()
result = []
for o in outputs:
for i in include:
if i == list(o.keys())[0]:
result.append(o)
return result
def number_of_steps(self) -> int:
"""Get the number of steps in the history"""
return len(self.history)
def agent_steps(self) -> list[str]:
"""Format agent history as readable step descriptions for judge evaluation."""
steps = []
# Iterate through history items (each is an AgentHistory)
for i, h in enumerate(self.history):
step_text = f'Step {i + 1}:\n'
# Get actions from model_output
if h.model_output and h.model_output.action:
# Use model_dump with mode='json' to serialize enums properly
actions_list = [action.model_dump(exclude_none=True, mode='json') for action in h.model_output.action]
action_json = json.dumps(actions_list, indent=1)
step_text += f'Actions: {action_json}\n'
# Get results (already a list[ActionResult] in h.result)
if h.result:
for j, result in enumerate(h.result):
if result.extracted_content:
content = str(result.extracted_content)
step_text += f'Result {j + 1}: {content}\n'
if result.error:
error = str(result.error)
step_text += f'Error {j + 1}: {error}\n'
steps.append(step_text)
return steps
@property
def structured_output(self) -> AgentStructuredOutput | None:
"""Get the structured output from the history
Returns:
The structured output if both final_result and _output_model_schema are available,
otherwise None
"""
final_result = self.final_result()
if final_result is not None and self._output_model_schema is not None:
return self._output_model_schema.model_validate_json(final_result)
return None
def get_structured_output(self, output_model: type[AgentStructuredOutput]) -> AgentStructuredOutput | None:
"""Get the structured output from history, parsing with the provided schema.
Use this method when accessing structured output from sandbox execution,
since the _output_model_schema private attribute is not preserved during serialization.
Args:
output_model: The Pydantic model class to parse the output with
Returns:
The parsed structured output, or None if no final result exists
"""
final_result = self.final_result()
if final_result is not None:
return output_model.model_validate_json(final_result)
return None
class AgentError:
"""Container for agent error handling"""
VALIDATION_ERROR = 'Invalid model output format. Please follow the correct schema.'
RATE_LIMIT_ERROR = 'Rate limit reached. Waiting before retry.'
NO_VALID_ACTION = 'No valid action found'
@staticmethod
def format_error(error: Exception, include_trace: bool = False) -> str:
"""Format error message based on error type and optionally include trace"""
message = ''
if isinstance(error, ValidationError):
return f'{AgentError.VALIDATION_ERROR}\nDetails: {str(error)}'
# Lazy import to avoid loading openai SDK (~800ms) at module level
from openai import RateLimitError
if isinstance(error, RateLimitError):
return AgentError.RATE_LIMIT_ERROR
# Handle LLM response validation errors from llm_use
error_str = str(error)
if 'LLM response missing required fields' in error_str or 'Expected format: AgentOutput' in error_str:
# Extract the main error message without the huge stacktrace
lines = error_str.split('\n')
main_error = lines[0] if lines else error_str
# Provide a clearer error message
helpful_msg = f'{main_error}\n\nThe previous response had an invalid output structure. Please stick to the required output format. \n\n'
if include_trace:
helpful_msg += f'\n\nFull stacktrace:\n{traceback.format_exc()}'
return helpful_msg
if include_trace:
return f'{str(error)}\nStacktrace:\n{traceback.format_exc()}'
return f'{str(error)}'
class DetectedVariable(BaseModel):
"""A detected variable in agent history"""
name: str
original_value: str
type: str = 'string'
format: str | None = None
class VariableMetadata(BaseModel):
"""Metadata about detected variables in history"""
detected_variables: dict[str, DetectedVariable] = Field(default_factory=dict)
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/agent/gif.py | browser_use/agent/gif.py | from __future__ import annotations
import base64
import io
import logging
import os
import platform
from typing import TYPE_CHECKING
from browser_use.agent.views import AgentHistoryList
from browser_use.browser.views import PLACEHOLDER_4PX_SCREENSHOT
from browser_use.config import CONFIG
if TYPE_CHECKING:
from PIL import Image, ImageFont
logger = logging.getLogger(__name__)
def decode_unicode_escapes_to_utf8(text: str) -> str:
"""Handle decoding any unicode escape sequences embedded in a string (needed to render non-ASCII languages like chinese or arabic in the GIF overlay text)"""
if r'\u' not in text:
# doesn't have any escape sequences that need to be decoded
return text
try:
# Try to decode Unicode escape sequences
return text.encode('latin1').decode('unicode_escape')
except (UnicodeEncodeError, UnicodeDecodeError):
# logger.debug(f"Failed to decode unicode escape sequences while generating gif text: {text}")
return text
def create_history_gif(
task: str,
history: AgentHistoryList,
#
output_path: str = 'agent_history.gif',
duration: int = 3000,
show_goals: bool = True,
show_task: bool = True,
show_logo: bool = False,
font_size: int = 40,
title_font_size: int = 56,
goal_font_size: int = 44,
margin: int = 40,
line_spacing: float = 1.5,
) -> None:
"""Create a GIF from the agent's history with overlaid task and goal text."""
if not history.history:
logger.warning('No history to create GIF from')
return
from PIL import Image, ImageFont
images = []
# if history is empty, we can't create a gif
if not history.history:
logger.warning('No history to create GIF from')
return
# Get all screenshots from history (including None placeholders)
screenshots = history.screenshots(return_none_if_not_screenshot=True)
if not screenshots:
logger.warning('No screenshots found in history')
return
# Find the first non-placeholder screenshot
# A screenshot is considered a placeholder if:
# 1. It's the exact 4px placeholder for about:blank pages, OR
# 2. It comes from a new tab page (chrome://newtab/, about:blank, etc.)
first_real_screenshot = None
for screenshot in screenshots:
if screenshot and screenshot != PLACEHOLDER_4PX_SCREENSHOT:
first_real_screenshot = screenshot
break
if not first_real_screenshot:
logger.warning('No valid screenshots found (all are placeholders or from new tab pages)')
return
# Try to load nicer fonts
try:
# Try different font options in order of preference
# ArialUni is a font that comes with Office and can render most non-alphabet characters
font_options = [
'PingFang',
'STHeiti Medium',
'Microsoft YaHei', # 微软雅黑
'SimHei', # 黑体
'SimSun', # 宋体
'Noto Sans CJK SC', # 思源黑体
'WenQuanYi Micro Hei', # 文泉驿微米黑
'Helvetica',
'Arial',
'DejaVuSans',
'Verdana',
]
font_loaded = False
for font_name in font_options:
try:
if platform.system() == 'Windows':
# Need to specify the abs font path on Windows
font_name = os.path.join(CONFIG.WIN_FONT_DIR, font_name + '.ttf')
regular_font = ImageFont.truetype(font_name, font_size)
title_font = ImageFont.truetype(font_name, title_font_size)
goal_font = ImageFont.truetype(font_name, goal_font_size)
font_loaded = True
break
except OSError:
continue
if not font_loaded:
raise OSError('No preferred fonts found')
except OSError:
regular_font = ImageFont.load_default()
title_font = ImageFont.load_default()
goal_font = regular_font
# Load logo if requested
logo = None
if show_logo:
try:
logo = Image.open('./static/browser-use.png')
# Resize logo to be small (e.g., 40px height)
logo_height = 150
aspect_ratio = logo.width / logo.height
logo_width = int(logo_height * aspect_ratio)
logo = logo.resize((logo_width, logo_height), Image.Resampling.LANCZOS)
except Exception as e:
logger.warning(f'Could not load logo: {e}')
# Create task frame if requested
if show_task and task:
# Find the first non-placeholder screenshot for the task frame
first_real_screenshot = None
for item in history.history:
screenshot_b64 = item.state.get_screenshot()
if screenshot_b64 and screenshot_b64 != PLACEHOLDER_4PX_SCREENSHOT:
first_real_screenshot = screenshot_b64
break
if first_real_screenshot:
task_frame = _create_task_frame(
task,
first_real_screenshot,
title_font, # type: ignore
regular_font, # type: ignore
logo,
line_spacing,
)
images.append(task_frame)
else:
logger.warning('No real screenshots found for task frame, skipping task frame')
# Process each history item with its corresponding screenshot
for i, (item, screenshot) in enumerate(zip(history.history, screenshots), 1):
if not screenshot:
continue
# Skip placeholder screenshots from about:blank pages
# These are 4x4 white PNGs encoded as a specific base64 string
if screenshot == PLACEHOLDER_4PX_SCREENSHOT:
logger.debug(f'Skipping placeholder screenshot from about:blank page at step {i}')
continue
# Skip screenshots from new tab pages
from browser_use.utils import is_new_tab_page
if is_new_tab_page(item.state.url):
logger.debug(f'Skipping screenshot from new tab page ({item.state.url}) at step {i}')
continue
# Convert base64 screenshot to PIL Image
img_data = base64.b64decode(screenshot)
image = Image.open(io.BytesIO(img_data))
if show_goals and item.model_output:
image = _add_overlay_to_image(
image=image,
step_number=i,
goal_text=item.model_output.current_state.next_goal,
regular_font=regular_font, # type: ignore
title_font=title_font, # type: ignore
margin=margin,
logo=logo,
)
images.append(image)
if images:
# Save the GIF
images[0].save(
output_path,
save_all=True,
append_images=images[1:],
duration=duration,
loop=0,
optimize=False,
)
logger.info(f'Created GIF at {output_path}')
else:
logger.warning('No images found in history to create GIF')
def _create_task_frame(
task: str,
first_screenshot: str,
title_font: ImageFont.FreeTypeFont,
regular_font: ImageFont.FreeTypeFont,
logo: Image.Image | None = None,
line_spacing: float = 1.5,
) -> Image.Image:
"""Create initial frame showing the task."""
from PIL import Image, ImageDraw, ImageFont
img_data = base64.b64decode(first_screenshot)
template = Image.open(io.BytesIO(img_data))
image = Image.new('RGB', template.size, (0, 0, 0))
draw = ImageDraw.Draw(image)
# Calculate vertical center of image
center_y = image.height // 2
# Draw task text with dynamic font size based on task length
margin = 140 # Increased margin
max_width = image.width - (2 * margin)
# Dynamic font size calculation based on task length
# Start with base font size (regular + 16)
base_font_size = regular_font.size + 16
min_font_size = max(regular_font.size - 10, 16) # Don't go below 16pt
max_font_size = base_font_size # Cap at the base font size
# Calculate dynamic font size based on text length and complexity
# Longer texts get progressively smaller fonts
text_length = len(task)
if text_length > 200:
# For very long text, reduce font size logarithmically
font_size = max(base_font_size - int(10 * (text_length / 200)), min_font_size)
else:
font_size = base_font_size
# Try to create a larger font, but fall back to regular font if it fails
try:
larger_font = ImageFont.truetype(regular_font.path, font_size) # type: ignore
except (OSError, AttributeError):
# Fall back to regular font if .path is not available or font loading fails
larger_font = regular_font
# Generate wrapped text with the calculated font size
wrapped_text = _wrap_text(task, larger_font, max_width)
# Calculate line height with spacing
line_height = larger_font.size * line_spacing
# Split text into lines and draw with custom spacing
lines = wrapped_text.split('\n')
total_height = line_height * len(lines)
# Start position for first line
text_y = center_y - (total_height / 2) + 50 # Shifted down slightly
for line in lines:
# Get line width for centering
line_bbox = draw.textbbox((0, 0), line, font=larger_font)
text_x = (image.width - (line_bbox[2] - line_bbox[0])) // 2
draw.text(
(text_x, text_y),
line,
font=larger_font,
fill=(255, 255, 255),
)
text_y += line_height
# Add logo if provided (top right corner)
if logo:
logo_margin = 20
logo_x = image.width - logo.width - logo_margin
image.paste(logo, (logo_x, logo_margin), logo if logo.mode == 'RGBA' else None)
return image
def _add_overlay_to_image(
image: Image.Image,
step_number: int,
goal_text: str,
regular_font: ImageFont.FreeTypeFont,
title_font: ImageFont.FreeTypeFont,
margin: int,
logo: Image.Image | None = None,
display_step: bool = True,
text_color: tuple[int, int, int, int] = (255, 255, 255, 255),
text_box_color: tuple[int, int, int, int] = (0, 0, 0, 255),
) -> Image.Image:
"""Add step number and goal overlay to an image."""
from PIL import Image, ImageDraw
goal_text = decode_unicode_escapes_to_utf8(goal_text)
image = image.convert('RGBA')
txt_layer = Image.new('RGBA', image.size, (0, 0, 0, 0))
draw = ImageDraw.Draw(txt_layer)
if display_step:
# Add step number (bottom left)
step_text = str(step_number)
step_bbox = draw.textbbox((0, 0), step_text, font=title_font)
step_width = step_bbox[2] - step_bbox[0]
step_height = step_bbox[3] - step_bbox[1]
# Position step number in bottom left
x_step = margin + 10 # Slight additional offset from edge
y_step = image.height - margin - step_height - 10 # Slight offset from bottom
# Draw rounded rectangle background for step number
padding = 20 # Increased padding
step_bg_bbox = (
x_step - padding,
y_step - padding,
x_step + step_width + padding,
y_step + step_height + padding,
)
draw.rounded_rectangle(
step_bg_bbox,
radius=15, # Add rounded corners
fill=text_box_color,
)
# Draw step number
draw.text(
(x_step, y_step),
step_text,
font=title_font,
fill=text_color,
)
# Draw goal text (centered, bottom)
max_width = image.width - (4 * margin)
wrapped_goal = _wrap_text(goal_text, title_font, max_width)
goal_bbox = draw.multiline_textbbox((0, 0), wrapped_goal, font=title_font)
goal_width = goal_bbox[2] - goal_bbox[0]
goal_height = goal_bbox[3] - goal_bbox[1]
# Center goal text horizontally, place above step number
x_goal = (image.width - goal_width) // 2
y_goal = y_step - goal_height - padding * 4 # More space between step and goal
# Draw rounded rectangle background for goal
padding_goal = 25 # Increased padding for goal
goal_bg_bbox = (
x_goal - padding_goal, # Remove extra space for logo
y_goal - padding_goal,
x_goal + goal_width + padding_goal,
y_goal + goal_height + padding_goal,
)
draw.rounded_rectangle(
goal_bg_bbox,
radius=15, # Add rounded corners
fill=text_box_color,
)
# Draw goal text
draw.multiline_text(
(x_goal, y_goal),
wrapped_goal,
font=title_font,
fill=text_color,
align='center',
)
# Add logo if provided (top right corner)
if logo:
logo_layer = Image.new('RGBA', image.size, (0, 0, 0, 0))
logo_margin = 20
logo_x = image.width - logo.width - logo_margin
logo_layer.paste(logo, (logo_x, logo_margin), logo if logo.mode == 'RGBA' else None)
txt_layer = Image.alpha_composite(logo_layer, txt_layer)
# Composite and convert
result = Image.alpha_composite(image, txt_layer)
return result.convert('RGB')
def _wrap_text(text: str, font: ImageFont.FreeTypeFont, max_width: int) -> str:
"""
Wrap text to fit within a given width.
Args:
text: Text to wrap
font: Font to use for text
max_width: Maximum width in pixels
Returns:
Wrapped text with newlines
"""
text = decode_unicode_escapes_to_utf8(text)
words = text.split()
lines = []
current_line = []
for word in words:
current_line.append(word)
line = ' '.join(current_line)
bbox = font.getbbox(line)
if bbox[2] > max_width:
if len(current_line) == 1:
lines.append(current_line.pop())
else:
current_line.pop()
lines.append(' '.join(current_line))
current_line = [word]
if current_line:
lines.append(' '.join(current_line))
return '\n'.join(lines)
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/agent/judge.py | browser_use/agent/judge.py | """Judge system for evaluating browser-use agent execution traces."""
import base64
import logging
from pathlib import Path
from browser_use.llm.messages import (
BaseMessage,
ContentPartImageParam,
ContentPartTextParam,
ImageURL,
SystemMessage,
UserMessage,
)
logger = logging.getLogger(__name__)
def _encode_image(image_path: str) -> str | None:
"""Encode image to base64 string."""
try:
path = Path(image_path)
if not path.exists():
return None
with open(path, 'rb') as f:
return base64.b64encode(f.read()).decode('utf-8')
except Exception as e:
logger.warning(f'Failed to encode image {image_path}: {e}')
return None
def _truncate_text(text: str, max_length: int, from_beginning: bool = False) -> str:
"""Truncate text to maximum length with eval system indicator."""
if len(text) <= max_length:
return text
if from_beginning:
return '...[text truncated]' + text[-max_length + 23 :]
else:
return text[: max_length - 23] + '...[text truncated]...'
def construct_judge_messages(
task: str,
final_result: str,
agent_steps: list[str],
screenshot_paths: list[str],
max_images: int = 10,
ground_truth: str | None = None,
) -> list[BaseMessage]:
"""
Construct messages for judge evaluation of agent trace.
Args:
task: The original task description
final_result: The final result returned to the user
agent_steps: List of formatted agent step descriptions
screenshot_paths: List of screenshot file paths
max_images: Maximum number of screenshots to include
ground_truth: Optional ground truth answer or criteria that must be satisfied for success
Returns:
List of messages for LLM judge evaluation
"""
task_truncated = _truncate_text(task, 40000)
final_result_truncated = _truncate_text(final_result, 40000)
steps_text = '\n'.join(agent_steps)
steps_text_truncated = _truncate_text(steps_text, 40000)
# Select last N screenshots
selected_screenshots = screenshot_paths[-max_images:] if len(screenshot_paths) > max_images else screenshot_paths
# Encode screenshots
encoded_images: list[ContentPartImageParam] = []
for img_path in selected_screenshots:
encoded = _encode_image(img_path)
if encoded:
encoded_images.append(
ContentPartImageParam(
image_url=ImageURL(
url=f'data:image/png;base64,{encoded}',
media_type='image/png',
)
)
)
# System prompt for judge - conditionally add ground truth section
ground_truth_section = ''
if ground_truth:
ground_truth_section = """
**GROUND TRUTH VALIDATION (HIGHEST PRIORITY):**
The <ground_truth> section contains verified correct information for this task. This can be:
- **Evaluation criteria**: Specific conditions that must be met (e.g., "The success popup should show up", "Must extract exactly 5 items")
- **Factual answers**: The correct answer to a question or information retrieval task (e.g. "10/11/24", "Paris")
- **Expected outcomes**: What should happen after task completion (e.g., "Google Doc must be created", "File should be downloaded")
The ground truth takes ABSOLUTE precedence over all other evaluation criteria. If the ground truth is not satisfied by the agent's execution and final response, the verdict MUST be false.
"""
system_prompt = f"""You are an expert judge evaluating browser automation agent performance.
<evaluation_framework>
{ground_truth_section}
**PRIMARY EVALUATION CRITERIA (in order of importance):**
1. **Task Satisfaction (Most Important)**: Did the agent accomplish what the user asked for? Break down the task into the key criteria and evaluate if the agent all of them. Focus on user intent and final outcome.
2. **Output Quality**: Is the final result in the correct format and complete? Does it match exactly what was requested?
3. **Tool Effectiveness**: Did the browser interactions work as expected? Were tools used appropriately? How many % of the tools failed?
4. **Agent Reasoning**: Quality of decision-making, planning, and problem-solving throughout the trajectory.
5. **Browser Handling**: Navigation stability, error recovery, and technical execution. If the browser crashes, does not load or a captcha blocks the task, the score must be very low.
**VERDICT GUIDELINES:**
- true: Task completed as requested, human-like execution, all of the users criteria were met and the agent did not make up any information.
- false: Task not completed, or only partially completed.
**Examples of task completion verdict:**
- If task asks for 10 items and agent finds 4 items correctly: false
- If task completed to full user requirements but with some errors to improve in the trajectory: true
- If task impossible due to captcha/login requirements: false
- If the trajectory is ideal and the output is perfect: true
- If the task asks to search all headphones in amazon under $100 but the agent searches all headphones and the lowest price is $150: false
- If the task asks to research a property and create a google doc with the result but the agents only returns the results in text: false
- If the task asks to complete an action on the page, and the agent reports that the action is completed but the screenshot or page shows the action is not actually complete: false
- If the task asks to use a certain tool or site to complete the task but the agent completes the task without using it: false
- If the task asks to look for a section of a page that does not exist: false
- If the agent concludes the task is impossible but it is not: false
- If the agent concludes the task is impossible and it truly is impossible: false
- If the agent is unable to complete the task because no login information was provided and it is truly needed to complete the task: false
**FAILURE CONDITIONS (automatically set verdict to false):**
- Blocked by captcha or missing authentication
- Output format completely wrong or missing
- Infinite loops or severe technical failures
- Critical user requirements ignored
- Page not loaded
- Browser crashed
- Agent could not interact with required UI elements
- The agent moved on from a important step in the task without completing it
- The agent made up content that is not in the screenshot or the page state
- The agent calls done action before completing all key points of the task
**IMPOSSIBLE TASK DETECTION:**
Set `impossible_task` to true when the task fundamentally could not be completed due to:
- Vague or ambiguous task instructions that cannot be reasonably interpreted
- Website genuinely broken or non-functional (be conservative - temporary issues don't count)
- Required links/pages truly inaccessible (404, 403, etc.)
- Task requires authentication/login but no credentials were provided
- Task asks for functionality that doesn't exist on the target site
- Other insurmountable external obstacles beyond the agent's control
Do NOT mark as impossible if:
- Agent made poor decisions but task was achievable
- Temporary page loading issues that could be retried
- Agent didn't try the right approach
- Website works but agent struggled with it
**CAPTCHA DETECTION:**
Set `reached_captcha` to true if:
- Screenshots show captcha challenges (reCAPTCHA, hCaptcha, etc.)
- Agent reports being blocked by bot detection
- Error messages indicate captcha/verification requirements
- Any evidence the agent encountered anti-bot measures during execution
**IMPORTANT EVALUATION NOTES:**
- **evaluate for action** - For each key step of the trace, double check whether the action that the agent tried to performed actually happened. If the required action did not actually occur, the verdict should be false.
- **screenshot is not entire content** - The agent has the entire DOM content, but the screenshot is only part of the content. If the agent extracts information from the page, but you do not see it in the screenshot, you can assume this information is there.
- **Penalize poor tool usage** - Wrong tools, inefficient approaches, ignoring available information.
- **ignore unexpected dates and times** - These agent traces are from varying dates, you can assume the dates the agent uses for search or filtering are correct.
- **IMPORTANT**: be very picky about the user's request - Have very high standard for the agent completing the task exactly to the user's request.
- **IMPORTANT**: be initially doubtful of the agent's self reported success, be sure to verify that its methods are valid and fulfill the user's desires to a tee.
</evaluation_framework>
<response_format>
Respond with EXACTLY this JSON structure (no additional text before or after):
{{
"reasoning": "Breakdown of user task into key points. Detailed analysis covering: what went well, what didn't work, trajectory quality assessment, tool usage evaluation, output quality review, and overall user satisfaction prediction.",
"verdict": true or false,
"failure_reason": "Max 5 sentences explanation of why the task was not completed successfully in case of failure. If verdict is true, use an empty string.",
"impossible_task": true or false,
"reached_captcha": true or false
}}
</response_format>
"""
# Build user prompt with conditional ground truth section
ground_truth_prompt = ''
if ground_truth:
ground_truth_prompt = f"""
<ground_truth>
{ground_truth}
</ground_truth>
"""
user_prompt = f"""
<task>
{task_truncated or 'No task provided'}
</task>
{ground_truth_prompt}
<agent_trajectory>
{steps_text_truncated or 'No agent trajectory provided'}
</agent_trajectory>
<final_result>
{final_result_truncated or 'No final result provided'}
</final_result>
{len(encoded_images)} screenshots from execution are attached.
Evaluate this agent execution given the criteria and respond with the exact JSON structure requested."""
# Build messages with screenshots
content_parts: list[ContentPartTextParam | ContentPartImageParam] = [ContentPartTextParam(text=user_prompt)]
content_parts.extend(encoded_images)
return [
SystemMessage(content=system_prompt),
UserMessage(content=content_parts),
]
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/agent/service.py | browser_use/agent/service.py | import asyncio
import gc
import inspect
import json
import logging
import re
import tempfile
import time
from collections.abc import Awaitable, Callable
from pathlib import Path
from typing import TYPE_CHECKING, Any, Generic, Literal, TypeVar, cast
from urllib.parse import urlparse
if TYPE_CHECKING:
from browser_use.skills.views import Skill
from dotenv import load_dotenv
from browser_use.agent.cloud_events import (
CreateAgentOutputFileEvent,
CreateAgentSessionEvent,
CreateAgentStepEvent,
CreateAgentTaskEvent,
UpdateAgentTaskEvent,
)
from browser_use.agent.message_manager.utils import save_conversation
from browser_use.llm.base import BaseChatModel
from browser_use.llm.exceptions import ModelProviderError, ModelRateLimitError
from browser_use.llm.messages import BaseMessage, ContentPartImageParam, ContentPartTextParam, UserMessage
from browser_use.tokens.service import TokenCost
load_dotenv()
from bubus import EventBus
from pydantic import BaseModel, ValidationError
from uuid_extensions import uuid7str
from browser_use import Browser, BrowserProfile, BrowserSession
from browser_use.agent.judge import construct_judge_messages
# Lazy import for gif to avoid heavy agent.views import at startup
# from browser_use.agent.gif import create_history_gif
from browser_use.agent.message_manager.service import (
MessageManager,
)
from browser_use.agent.prompts import SystemPrompt
from browser_use.agent.views import (
ActionResult,
AgentError,
AgentHistory,
AgentHistoryList,
AgentOutput,
AgentSettings,
AgentState,
AgentStepInfo,
AgentStructuredOutput,
BrowserStateHistory,
DetectedVariable,
JudgementResult,
StepMetadata,
)
from browser_use.browser.session import DEFAULT_BROWSER_PROFILE
from browser_use.browser.views import BrowserStateSummary
from browser_use.config import CONFIG
from browser_use.dom.views import DOMInteractedElement, MatchLevel
from browser_use.filesystem.file_system import FileSystem
from browser_use.observability import observe, observe_debug
from browser_use.telemetry.service import ProductTelemetry
from browser_use.telemetry.views import AgentTelemetryEvent
from browser_use.tools.registry.views import ActionModel
from browser_use.tools.service import Tools
from browser_use.utils import (
URL_PATTERN,
_log_pretty_path,
check_latest_browser_use_version,
get_browser_use_version,
time_execution_async,
time_execution_sync,
)
logger = logging.getLogger(__name__)
def log_response(response: AgentOutput, registry=None, logger=None) -> None:
"""Utility function to log the model's response."""
# Use module logger if no logger provided
if logger is None:
logger = logging.getLogger(__name__)
# Only log thinking if it's present
if response.current_state.thinking:
logger.debug(f'💡 Thinking:\n{response.current_state.thinking}')
# Only log evaluation if it's not empty
eval_goal = response.current_state.evaluation_previous_goal
if eval_goal:
if 'success' in eval_goal.lower():
emoji = '👍'
# Green color for success
logger.info(f' \033[32m{emoji} Eval: {eval_goal}\033[0m')
elif 'failure' in eval_goal.lower():
emoji = '⚠️'
# Red color for failure
logger.info(f' \033[31m{emoji} Eval: {eval_goal}\033[0m')
else:
emoji = '❔'
# No color for unknown/neutral
logger.info(f' {emoji} Eval: {eval_goal}')
# Always log memory if present
if response.current_state.memory:
logger.info(f' 🧠 Memory: {response.current_state.memory}')
# Only log next goal if it's not empty
next_goal = response.current_state.next_goal
if next_goal:
# Blue color for next goal
logger.info(f' \033[34m🎯 Next goal: {next_goal}\033[0m')
Context = TypeVar('Context')
AgentHookFunc = Callable[['Agent'], Awaitable[None]]
class Agent(Generic[Context, AgentStructuredOutput]):
@time_execution_sync('--init')
def __init__(
self,
task: str,
llm: BaseChatModel | None = None,
# Optional parameters
browser_profile: BrowserProfile | None = None,
browser_session: BrowserSession | None = None,
browser: Browser | None = None, # Alias for browser_session
tools: Tools[Context] | None = None,
controller: Tools[Context] | None = None, # Alias for tools
# Skills integration
skill_ids: list[str | Literal['*']] | None = None,
skills: list[str | Literal['*']] | None = None, # Alias for skill_ids
skill_service: Any | None = None,
# Initial agent run parameters
sensitive_data: dict[str, str | dict[str, str]] | None = None,
initial_actions: list[dict[str, dict[str, Any]]] | None = None,
# Cloud Callbacks
register_new_step_callback: (
Callable[['BrowserStateSummary', 'AgentOutput', int], None] # Sync callback
| Callable[['BrowserStateSummary', 'AgentOutput', int], Awaitable[None]] # Async callback
| None
) = None,
register_done_callback: (
Callable[['AgentHistoryList'], Awaitable[None]] # Async Callback
| Callable[['AgentHistoryList'], None] # Sync Callback
| None
) = None,
register_external_agent_status_raise_error_callback: Callable[[], Awaitable[bool]] | None = None,
register_should_stop_callback: Callable[[], Awaitable[bool]] | None = None,
# Agent settings
output_model_schema: type[AgentStructuredOutput] | None = None,
use_vision: bool | Literal['auto'] = True,
save_conversation_path: str | Path | None = None,
save_conversation_path_encoding: str | None = 'utf-8',
max_failures: int = 3,
override_system_message: str | None = None,
extend_system_message: str | None = None,
generate_gif: bool | str = False,
available_file_paths: list[str] | None = None,
include_attributes: list[str] | None = None,
max_actions_per_step: int = 3,
use_thinking: bool = True,
flash_mode: bool = False,
demo_mode: bool | None = None,
max_history_items: int | None = None,
page_extraction_llm: BaseChatModel | None = None,
fallback_llm: BaseChatModel | None = None,
use_judge: bool = True,
ground_truth: str | None = None,
judge_llm: BaseChatModel | None = None,
injected_agent_state: AgentState | None = None,
source: str | None = None,
file_system_path: str | None = None,
task_id: str | None = None,
calculate_cost: bool = False,
display_files_in_done_text: bool = True,
include_tool_call_examples: bool = False,
vision_detail_level: Literal['auto', 'low', 'high'] = 'auto',
llm_timeout: int | None = None,
step_timeout: int = 120,
directly_open_url: bool = True,
include_recent_events: bool = False,
sample_images: list[ContentPartTextParam | ContentPartImageParam] | None = None,
final_response_after_failure: bool = True,
llm_screenshot_size: tuple[int, int] | None = None,
_url_shortening_limit: int = 25,
**kwargs,
):
# Validate llm_screenshot_size
if llm_screenshot_size is not None:
if not isinstance(llm_screenshot_size, tuple) or len(llm_screenshot_size) != 2:
raise ValueError('llm_screenshot_size must be a tuple of (width, height)')
width, height = llm_screenshot_size
if not isinstance(width, int) or not isinstance(height, int):
raise ValueError('llm_screenshot_size dimensions must be integers')
if width < 100 or height < 100:
raise ValueError('llm_screenshot_size dimensions must be at least 100 pixels')
self.logger.info(f'🖼️ LLM screenshot resizing enabled: {width}x{height}')
if llm is None:
default_llm_name = CONFIG.DEFAULT_LLM
if default_llm_name:
from browser_use.llm.models import get_llm_by_name
llm = get_llm_by_name(default_llm_name)
else:
# No default LLM specified, use the original default
from browser_use import ChatBrowserUse
llm = ChatBrowserUse()
# set flashmode = True if llm is ChatBrowserUse
if llm.provider == 'browser-use':
flash_mode = True
# Auto-configure llm_screenshot_size for Claude Sonnet models
if llm_screenshot_size is None:
model_name = getattr(llm, 'model', '')
if isinstance(model_name, str) and model_name.startswith('claude-sonnet'):
llm_screenshot_size = (1400, 850)
logger.info('🖼️ Auto-configured LLM screenshot size for Claude Sonnet: 1400x850')
if page_extraction_llm is None:
page_extraction_llm = llm
if judge_llm is None:
judge_llm = llm
if available_file_paths is None:
available_file_paths = []
# Set timeout based on model name if not explicitly provided
if llm_timeout is None:
def _get_model_timeout(llm_model: BaseChatModel) -> int:
"""Determine timeout based on model name"""
model_name = getattr(llm_model, 'model', '').lower()
if 'gemini' in model_name:
if '3-pro' in model_name:
return 90
return 45
elif 'groq' in model_name:
return 30
elif 'o3' in model_name or 'claude' in model_name or 'sonnet' in model_name or 'deepseek' in model_name:
return 90
else:
return 60 # Default timeout
llm_timeout = _get_model_timeout(llm)
self.id = task_id or uuid7str()
self.task_id: str = self.id
self.session_id: str = uuid7str()
base_profile = browser_profile or DEFAULT_BROWSER_PROFILE
if base_profile is DEFAULT_BROWSER_PROFILE:
base_profile = base_profile.model_copy()
if demo_mode is not None and base_profile.demo_mode != demo_mode:
base_profile = base_profile.model_copy(update={'demo_mode': demo_mode})
browser_profile = base_profile
# Handle browser vs browser_session parameter (browser takes precedence)
if browser and browser_session:
raise ValueError('Cannot specify both "browser" and "browser_session" parameters. Use "browser" for the cleaner API.')
browser_session = browser or browser_session
if browser_session is not None and demo_mode is not None and browser_session.browser_profile.demo_mode != demo_mode:
browser_session.browser_profile = browser_session.browser_profile.model_copy(update={'demo_mode': demo_mode})
self.browser_session = browser_session or BrowserSession(
browser_profile=browser_profile,
id=uuid7str()[:-4] + self.id[-4:], # re-use the same 4-char suffix so they show up together in logs
)
self._demo_mode_enabled: bool = bool(self.browser_profile.demo_mode) if self.browser_session else False
if self._demo_mode_enabled and getattr(self.browser_profile, 'headless', False):
self.logger.warning(
'Demo mode is enabled but the browser is headless=True; set headless=False to view the in-browser panel.'
)
# Initialize available file paths as direct attribute
self.available_file_paths = available_file_paths
# Set up tools first (needed to detect output_model_schema)
if tools is not None:
self.tools = tools
elif controller is not None:
self.tools = controller
else:
# Exclude screenshot tool when use_vision is not auto
exclude_actions = ['screenshot'] if use_vision != 'auto' else []
self.tools = Tools(exclude_actions=exclude_actions, display_files_in_done_text=display_files_in_done_text)
# Enforce screenshot exclusion when use_vision != 'auto', even if user passed custom tools
if use_vision != 'auto':
self.tools.exclude_action('screenshot')
# Enable coordinate clicking for models that support it
model_name = getattr(llm, 'model', '').lower()
supports_coordinate_clicking = any(
pattern in model_name for pattern in ['claude-sonnet-4', 'claude-opus-4', 'gemini-3-pro', 'browser-use/']
)
if supports_coordinate_clicking:
self.tools.set_coordinate_clicking(True)
# Handle skills vs skill_ids parameter (skills takes precedence)
if skills and skill_ids:
raise ValueError('Cannot specify both "skills" and "skill_ids" parameters. Use "skills" for the cleaner API.')
skill_ids = skills or skill_ids
# Skills integration - use injected service or create from skill_ids
self.skill_service = None
self._skills_registered = False
if skill_service is not None:
self.skill_service = skill_service
elif skill_ids:
from browser_use.skills import SkillService
self.skill_service = SkillService(skill_ids=skill_ids)
# Structured output - use explicit param or detect from tools
tools_output_model = self.tools.get_output_model()
if output_model_schema is not None and tools_output_model is not None:
# Both provided - warn if they differ
if output_model_schema is not tools_output_model:
logger.warning(
f'output_model_schema ({output_model_schema.__name__}) differs from Tools output_model '
f'({tools_output_model.__name__}). Using Agent output_model_schema.'
)
elif output_model_schema is None and tools_output_model is not None:
# Only tools has it - use that (cast is safe: both are BaseModel subclasses)
output_model_schema = cast(type[AgentStructuredOutput], tools_output_model)
self.output_model_schema = output_model_schema
if self.output_model_schema is not None:
self.tools.use_structured_output_action(self.output_model_schema)
# Core components - task enhancement now has access to output_model_schema from tools
self.task = self._enhance_task_with_schema(task, output_model_schema)
self.llm = llm
self.judge_llm = judge_llm
# Fallback LLM configuration
self._fallback_llm: BaseChatModel | None = fallback_llm
self._using_fallback_llm: bool = False
self._original_llm: BaseChatModel = llm # Store original for reference
self.directly_open_url = directly_open_url
self.include_recent_events = include_recent_events
self._url_shortening_limit = _url_shortening_limit
self.sensitive_data = sensitive_data
self.sample_images = sample_images
self.settings = AgentSettings(
use_vision=use_vision,
vision_detail_level=vision_detail_level,
save_conversation_path=save_conversation_path,
save_conversation_path_encoding=save_conversation_path_encoding,
max_failures=max_failures,
override_system_message=override_system_message,
extend_system_message=extend_system_message,
generate_gif=generate_gif,
include_attributes=include_attributes,
max_actions_per_step=max_actions_per_step,
use_thinking=use_thinking,
flash_mode=flash_mode,
max_history_items=max_history_items,
page_extraction_llm=page_extraction_llm,
calculate_cost=calculate_cost,
include_tool_call_examples=include_tool_call_examples,
llm_timeout=llm_timeout,
step_timeout=step_timeout,
final_response_after_failure=final_response_after_failure,
use_judge=use_judge,
ground_truth=ground_truth,
)
# Token cost service
self.token_cost_service = TokenCost(include_cost=calculate_cost)
self.token_cost_service.register_llm(llm)
self.token_cost_service.register_llm(page_extraction_llm)
self.token_cost_service.register_llm(judge_llm)
# Initialize state
self.state = injected_agent_state or AgentState()
# Initialize history
self.history = AgentHistoryList(history=[], usage=None)
# Initialize agent directory
import time
timestamp = int(time.time())
base_tmp = Path(tempfile.gettempdir())
self.agent_directory = base_tmp / f'browser_use_agent_{self.id}_{timestamp}'
# Initialize file system and screenshot service
self._set_file_system(file_system_path)
self._set_screenshot_service()
# Action setup
self._setup_action_models()
self._set_browser_use_version_and_source(source)
initial_url = None
# only load url if no initial actions are provided
if self.directly_open_url and not self.state.follow_up_task and not initial_actions:
initial_url = self._extract_start_url(self.task)
if initial_url:
self.logger.info(f'🔗 Found URL in task: {initial_url}, adding as initial action...')
initial_actions = [{'navigate': {'url': initial_url, 'new_tab': False}}]
self.initial_url = initial_url
self.initial_actions = self._convert_initial_actions(initial_actions) if initial_actions else None
# Verify we can connect to the model
self._verify_and_setup_llm()
# TODO: move this logic to the LLMs
# Handle users trying to use use_vision=True with DeepSeek models
if 'deepseek' in self.llm.model.lower():
self.logger.warning('⚠️ DeepSeek models do not support use_vision=True yet. Setting use_vision=False for now...')
self.settings.use_vision = False
# Handle users trying to use use_vision=True with XAI models that don't support it
# grok-3 variants and grok-code don't support vision; grok-2 and grok-4 do
model_lower = self.llm.model.lower()
if 'grok-3' in model_lower or 'grok-code' in model_lower:
self.logger.warning('⚠️ This XAI model does not support use_vision=True yet. Setting use_vision=False for now...')
self.settings.use_vision = False
logger.debug(
f'{" +vision" if self.settings.use_vision else ""}'
f' extraction_model={self.settings.page_extraction_llm.model if self.settings.page_extraction_llm else "Unknown"}'
f'{" +file_system" if self.file_system else ""}'
)
# Store llm_screenshot_size in browser_session so tools can access it
self.browser_session.llm_screenshot_size = llm_screenshot_size
# Check if LLM is ChatAnthropic instance
from browser_use.llm.anthropic.chat import ChatAnthropic
is_anthropic = isinstance(self.llm, ChatAnthropic)
# Check if model is a browser-use fine-tuned model (uses simplified prompts)
is_browser_use_model = 'browser-use/' in self.llm.model.lower()
# Initialize message manager with state
# Initial system prompt with all actions - will be updated during each step
self._message_manager = MessageManager(
task=self.task,
system_message=SystemPrompt(
max_actions_per_step=self.settings.max_actions_per_step,
override_system_message=override_system_message,
extend_system_message=extend_system_message,
use_thinking=self.settings.use_thinking,
flash_mode=self.settings.flash_mode,
is_anthropic=is_anthropic,
is_browser_use_model=is_browser_use_model,
).get_system_message(),
file_system=self.file_system,
state=self.state.message_manager_state,
use_thinking=self.settings.use_thinking,
# Settings that were previously in MessageManagerSettings
include_attributes=self.settings.include_attributes,
sensitive_data=sensitive_data,
max_history_items=self.settings.max_history_items,
vision_detail_level=self.settings.vision_detail_level,
include_tool_call_examples=self.settings.include_tool_call_examples,
include_recent_events=self.include_recent_events,
sample_images=self.sample_images,
llm_screenshot_size=llm_screenshot_size,
)
if self.sensitive_data:
# Check if sensitive_data has domain-specific credentials
has_domain_specific_credentials = any(isinstance(v, dict) for v in self.sensitive_data.values())
# If no allowed_domains are configured, show a security warning
if not self.browser_profile.allowed_domains:
self.logger.warning(
'⚠️ Agent(sensitive_data=••••••••) was provided but Browser(allowed_domains=[...]) is not locked down! ⚠️\n'
' ☠️ If the agent visits a malicious website and encounters a prompt-injection attack, your sensitive_data may be exposed!\n\n'
' \n'
)
# If we're using domain-specific credentials, validate domain patterns
elif has_domain_specific_credentials:
# For domain-specific format, ensure all domain patterns are included in allowed_domains
domain_patterns = [k for k, v in self.sensitive_data.items() if isinstance(v, dict)]
# Validate each domain pattern against allowed_domains
for domain_pattern in domain_patterns:
is_allowed = False
for allowed_domain in self.browser_profile.allowed_domains:
# Special cases that don't require URL matching
if domain_pattern == allowed_domain or allowed_domain == '*':
is_allowed = True
break
# Need to create example URLs to compare the patterns
# Extract the domain parts, ignoring scheme
pattern_domain = domain_pattern.split('://')[-1] if '://' in domain_pattern else domain_pattern
allowed_domain_part = allowed_domain.split('://')[-1] if '://' in allowed_domain else allowed_domain
# Check if pattern is covered by an allowed domain
# Example: "google.com" is covered by "*.google.com"
if pattern_domain == allowed_domain_part or (
allowed_domain_part.startswith('*.')
and (
pattern_domain == allowed_domain_part[2:]
or pattern_domain.endswith('.' + allowed_domain_part[2:])
)
):
is_allowed = True
break
if not is_allowed:
self.logger.warning(
f'⚠️ Domain pattern "{domain_pattern}" in sensitive_data is not covered by any pattern in allowed_domains={self.browser_profile.allowed_domains}\n'
f' This may be a security risk as credentials could be used on unintended domains.'
)
# Callbacks
self.register_new_step_callback = register_new_step_callback
self.register_done_callback = register_done_callback
self.register_should_stop_callback = register_should_stop_callback
self.register_external_agent_status_raise_error_callback = register_external_agent_status_raise_error_callback
# Telemetry
self.telemetry = ProductTelemetry()
# Event bus with WAL persistence
# Default to ~/.config/browseruse/events/{agent_session_id}.jsonl
# wal_path = CONFIG.BROWSER_USE_CONFIG_DIR / 'events' / f'{self.session_id}.jsonl'
self.eventbus = EventBus(name=f'Agent_{str(self.id)[-4:]}')
if self.settings.save_conversation_path:
self.settings.save_conversation_path = Path(self.settings.save_conversation_path).expanduser().resolve()
self.logger.info(f'💬 Saving conversation to {_log_pretty_path(self.settings.save_conversation_path)}')
# Initialize download tracking
assert self.browser_session is not None, 'BrowserSession is not set up'
self.has_downloads_path = self.browser_session.browser_profile.downloads_path is not None
if self.has_downloads_path:
self._last_known_downloads: list[str] = []
self.logger.debug('📁 Initialized download tracking for agent')
# Event-based pause control (kept out of AgentState for serialization)
self._external_pause_event = asyncio.Event()
self._external_pause_event.set()
def _enhance_task_with_schema(self, task: str, output_model_schema: type[AgentStructuredOutput] | None) -> str:
"""Enhance task description with output schema information if provided."""
if output_model_schema is None:
return task
try:
schema = output_model_schema.model_json_schema()
import json
schema_json = json.dumps(schema, indent=2)
enhancement = f'\nExpected output format: {output_model_schema.__name__}\n{schema_json}'
return task + enhancement
except Exception as e:
self.logger.debug(f'Could not parse output schema: {e}')
return task
@property
def logger(self) -> logging.Logger:
"""Get instance-specific logger with task ID in the name"""
# logger may be called in __init__ so we don't assume self.* attributes have been initialized
_task_id = task_id[-4:] if (task_id := getattr(self, 'task_id', None)) else '----'
_browser_session_id = browser_session.id[-4:] if (browser_session := getattr(self, 'browser_session', None)) else '----'
_current_target_id = (
browser_session.agent_focus_target_id[-2:]
if (browser_session := getattr(self, 'browser_session', None)) and browser_session.agent_focus_target_id
else '--'
)
return logging.getLogger(f'browser_use.Agent🅰 {_task_id} ⇢ 🅑 {_browser_session_id} 🅣 {_current_target_id}')
@property
def browser_profile(self) -> BrowserProfile:
assert self.browser_session is not None, 'BrowserSession is not set up'
return self.browser_session.browser_profile
@property
def is_using_fallback_llm(self) -> bool:
"""Check if the agent is currently using the fallback LLM."""
return self._using_fallback_llm
@property
def current_llm_model(self) -> str:
"""Get the model name of the currently active LLM."""
return self.llm.model if hasattr(self.llm, 'model') else 'unknown'
async def _check_and_update_downloads(self, context: str = '') -> None:
"""Check for new downloads and update available file paths."""
if not self.has_downloads_path:
return
assert self.browser_session is not None, 'BrowserSession is not set up'
try:
current_downloads = self.browser_session.downloaded_files
if current_downloads != self._last_known_downloads:
self._update_available_file_paths(current_downloads)
self._last_known_downloads = current_downloads
if context:
self.logger.debug(f'📁 {context}: Updated available files')
except Exception as e:
error_context = f' {context}' if context else ''
self.logger.debug(f'📁 Failed to check for downloads{error_context}: {type(e).__name__}: {e}')
def _update_available_file_paths(self, downloads: list[str]) -> None:
"""Update available_file_paths with downloaded files."""
if not self.has_downloads_path:
return
current_files = set(self.available_file_paths or [])
new_files = set(downloads) - current_files
if new_files:
self.available_file_paths = list(current_files | new_files)
self.logger.info(
f'📁 Added {len(new_files)} downloaded files to available_file_paths (total: {len(self.available_file_paths)} files)'
)
for file_path in new_files:
self.logger.info(f'📄 New file available: {file_path}')
else:
self.logger.debug(f'📁 No new downloads detected (tracking {len(current_files)} files)')
def _set_file_system(self, file_system_path: str | None = None) -> None:
# Check for conflicting parameters
if self.state.file_system_state and file_system_path:
raise ValueError(
'Cannot provide both file_system_state (from agent state) and file_system_path. '
'Either restore from existing state or create new file system at specified path, not both.'
)
# Check if we should restore from existing state first
if self.state.file_system_state:
try:
# Restore file system from state at the exact same location
self.file_system = FileSystem.from_state(self.state.file_system_state)
# The parent directory of base_dir is the original file_system_path
self.file_system_path = str(self.file_system.base_dir)
self.logger.debug(f'💾 File system restored from state to: {self.file_system_path}')
return
except Exception as e:
self.logger.error(f'💾 Failed to restore file system from state: {e}')
raise e
# Initialize new file system
try:
if file_system_path:
self.file_system = FileSystem(file_system_path)
self.file_system_path = file_system_path
else:
# Use the agent directory for file system
self.file_system = FileSystem(self.agent_directory)
self.file_system_path = str(self.agent_directory)
except Exception as e:
self.logger.error(f'💾 Failed to initialize file system: {e}.')
raise e
# Save file system state to agent state
self.state.file_system_state = self.file_system.get_state()
self.logger.debug(f'💾 File system path: {self.file_system_path}')
def _set_screenshot_service(self) -> None:
"""Initialize screenshot service using agent directory"""
try:
from browser_use.screenshots.service import ScreenshotService
self.screenshot_service = ScreenshotService(self.agent_directory)
self.logger.debug(f'📸 Screenshot service initialized in: {self.agent_directory}/screenshots')
except Exception as e:
self.logger.error(f'📸 Failed to initialize screenshot service: {e}.')
raise e
def save_file_system_state(self) -> None:
"""Save current file system state to agent state"""
if self.file_system:
self.state.file_system_state = self.file_system.get_state()
else:
self.logger.error('💾 File system is not set up. Cannot save state.')
raise ValueError('File system is not set up. Cannot save state.')
def _set_browser_use_version_and_source(self, source_override: str | None = None) -> None:
"""Get the version from pyproject.toml and determine the source of the browser-use package"""
# Use the helper function for version detection
version = get_browser_use_version()
# Determine source
try:
package_root = Path(__file__).parent.parent.parent
repo_files = ['.git', 'README.md', 'docs', 'examples']
if all(Path(package_root / file).exists() for file in repo_files):
source = 'git'
else:
source = 'pip'
except Exception as e:
self.logger.debug(f'Error determining source: {e}')
source = 'unknown'
if source_override is not None:
source = source_override
# self.logger.debug(f'Version: {version}, Source: {source}') # moved later to _log_agent_run so that people are more likely to include it in copy-pasted support ticket logs
self.version = version
self.source = source
def _setup_action_models(self) -> None:
"""Setup dynamic action models from tools registry"""
# Initially only include actions with no filters
self.ActionModel = self.tools.registry.create_action_model()
# Create output model with the dynamic actions
if self.settings.flash_mode:
self.AgentOutput = AgentOutput.type_with_custom_actions_flash_mode(self.ActionModel)
elif self.settings.use_thinking:
self.AgentOutput = AgentOutput.type_with_custom_actions(self.ActionModel)
else:
self.AgentOutput = AgentOutput.type_with_custom_actions_no_thinking(self.ActionModel)
# used to force the done action when max_steps is reached
self.DoneActionModel = self.tools.registry.create_action_model(include_actions=['done'])
if self.settings.flash_mode:
self.DoneAgentOutput = AgentOutput.type_with_custom_actions_flash_mode(self.DoneActionModel)
elif self.settings.use_thinking:
self.DoneAgentOutput = AgentOutput.type_with_custom_actions(self.DoneActionModel)
else:
self.DoneAgentOutput = AgentOutput.type_with_custom_actions_no_thinking(self.DoneActionModel)
def _get_skill_slug(self, skill: 'Skill', all_skills: list['Skill']) -> str:
"""Generate a clean slug from skill title for action names
Converts title to lowercase, removes special characters, replaces spaces with underscores.
Adds UUID suffix if there are duplicate slugs.
Args:
skill: The skill to get slug for
all_skills: List of all skills to check for duplicates
Returns:
Slug like "cloned_github_stars_tracker" or "get_weather_data_a1b2" if duplicate
Examples:
"[Cloned] Github Stars Tracker" -> "cloned_github_stars_tracker"
"Get Weather Data" -> "get_weather_data"
"""
import re
# Remove special characters and convert to lowercase
slug = re.sub(r'[^\w\s]', '', skill.title.lower())
# Replace whitespace and hyphens with underscores
slug = re.sub(r'[\s\-]+', '_', slug)
# Remove leading/trailing underscores
slug = slug.strip('_')
# Check for duplicates and add UUID suffix if needed
same_slug_count = sum(
1 for s in all_skills if re.sub(r'[\s\-]+', '_', re.sub(r'[^\w\s]', '', s.title.lower()).strip('_')) == slug
)
if same_slug_count > 1:
return f'{slug}_{skill.id[:4]}'
else:
return slug
async def _register_skills_as_actions(self) -> None:
"""Register each skill as a separate action using slug as action name"""
if not self.skill_service or self._skills_registered:
return
self.logger.info('🔧 Registering skill actions...')
# Fetch all skills (auto-initializes if needed)
skills = await self.skill_service.get_all_skills()
if not skills:
self.logger.warning('No skills loaded from SkillService')
return
# Register each skill as its own action
for skill in skills:
slug = self._get_skill_slug(skill, skills)
param_model = skill.parameters_pydantic(exclude_cookies=True)
# Create description with skill title in quotes
description = f'{skill.description} (Skill: "{skill.title}")'
# Create handler for this specific skill
def make_skill_handler(skill_id: str):
async def skill_handler(params: BaseModel) -> ActionResult:
"""Execute a specific skill"""
assert self.skill_service is not None, 'SkillService not initialized'
# Convert parameters to dict
if isinstance(params, BaseModel):
skill_params = params.model_dump()
elif isinstance(params, dict):
skill_params = params
else:
return ActionResult(extracted_content=None, error=f'Invalid parameters type: {type(params)}')
# Get cookies from browser
_cookies = await self.browser_session.cookies()
try:
result = await self.skill_service.execute_skill(
skill_id=skill_id, parameters=skill_params, cookies=_cookies
)
if result.success:
return ActionResult(
extracted_content=str(result.result) if result.result else None,
error=None,
)
else:
return ActionResult(extracted_content=None, error=result.error or 'Skill execution failed')
except Exception as e:
# Check if it's a MissingCookieException
if type(e).__name__ == 'MissingCookieException':
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | true |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/agent/cloud_events.py | browser_use/agent/cloud_events.py | import base64
import os
from datetime import datetime, timezone
from pathlib import Path
import anyio
from bubus import BaseEvent
from pydantic import Field, field_validator
from uuid_extensions import uuid7str
MAX_STRING_LENGTH = 100000 # 100K chars ~ 25k tokens should be enough
MAX_URL_LENGTH = 100000
MAX_TASK_LENGTH = 100000
MAX_COMMENT_LENGTH = 2000
MAX_FILE_CONTENT_SIZE = 50 * 1024 * 1024 # 50MB
class UpdateAgentTaskEvent(BaseEvent):
# Required fields for identification
id: str # The task ID to update
user_id: str = Field(max_length=255) # For authorization
device_id: str | None = Field(None, max_length=255) # Device ID for auth lookup
# Optional fields that can be updated
stopped: bool | None = None
paused: bool | None = None
done_output: str | None = Field(None, max_length=MAX_STRING_LENGTH)
finished_at: datetime | None = None
agent_state: dict | None = None
user_feedback_type: str | None = Field(None, max_length=10) # UserFeedbackType enum value as string
user_comment: str | None = Field(None, max_length=MAX_COMMENT_LENGTH)
gif_url: str | None = Field(None, max_length=MAX_URL_LENGTH)
@classmethod
def from_agent(cls, agent) -> 'UpdateAgentTaskEvent':
"""Create an UpdateAgentTaskEvent from an Agent instance"""
if not hasattr(agent, '_task_start_time'):
raise ValueError('Agent must have _task_start_time attribute')
done_output = agent.history.final_result() if agent.history else None
return cls(
id=str(agent.task_id),
user_id='', # To be filled by cloud handler
device_id=agent.cloud_sync.auth_client.device_id
if hasattr(agent, 'cloud_sync') and agent.cloud_sync and agent.cloud_sync.auth_client
else None,
stopped=agent.state.stopped if hasattr(agent.state, 'stopped') else False,
paused=agent.state.paused if hasattr(agent.state, 'paused') else False,
done_output=done_output,
finished_at=datetime.now(timezone.utc) if agent.history and agent.history.is_done() else None,
agent_state=agent.state.model_dump() if hasattr(agent.state, 'model_dump') else {},
user_feedback_type=None,
user_comment=None,
gif_url=None,
# user_feedback_type and user_comment would be set by the API/frontend
# gif_url would be set after GIF generation if needed
)
class CreateAgentOutputFileEvent(BaseEvent):
# Model fields
id: str = Field(default_factory=uuid7str)
user_id: str = Field(max_length=255)
device_id: str | None = Field(None, max_length=255) # Device ID for auth lookup
task_id: str
file_name: str = Field(max_length=255)
file_content: str | None = None # Base64 encoded file content
content_type: str | None = Field(None, max_length=100) # MIME type for file uploads
created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
@field_validator('file_content')
@classmethod
def validate_file_size(cls, v: str | None) -> str | None:
"""Validate base64 file content size."""
if v is None:
return v
# Remove data URL prefix if present
if ',' in v:
v = v.split(',')[1]
# Estimate decoded size (base64 is ~33% larger)
estimated_size = len(v) * 3 / 4
if estimated_size > MAX_FILE_CONTENT_SIZE:
raise ValueError(f'File content exceeds maximum size of {MAX_FILE_CONTENT_SIZE / 1024 / 1024}MB')
return v
@classmethod
async def from_agent_and_file(cls, agent, output_path: str) -> 'CreateAgentOutputFileEvent':
"""Create a CreateAgentOutputFileEvent from a file path"""
gif_path = Path(output_path)
if not gif_path.exists():
raise FileNotFoundError(f'File not found: {output_path}')
gif_size = os.path.getsize(gif_path)
# Read GIF content for base64 encoding if needed
gif_content = None
if gif_size < 50 * 1024 * 1024: # Only read if < 50MB
async with await anyio.open_file(gif_path, 'rb') as f:
gif_bytes = await f.read()
gif_content = base64.b64encode(gif_bytes).decode('utf-8')
return cls(
user_id='', # To be filled by cloud handler
device_id=agent.cloud_sync.auth_client.device_id
if hasattr(agent, 'cloud_sync') and agent.cloud_sync and agent.cloud_sync.auth_client
else None,
task_id=str(agent.task_id),
file_name=gif_path.name,
file_content=gif_content, # Base64 encoded
content_type='image/gif',
)
class CreateAgentStepEvent(BaseEvent):
# Model fields
id: str = Field(default_factory=uuid7str)
user_id: str = Field(max_length=255) # Added for authorization checks
device_id: str | None = Field(None, max_length=255) # Device ID for auth lookup
created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
agent_task_id: str
step: int
evaluation_previous_goal: str = Field(max_length=MAX_STRING_LENGTH)
memory: str = Field(max_length=MAX_STRING_LENGTH)
next_goal: str = Field(max_length=MAX_STRING_LENGTH)
actions: list[dict]
screenshot_url: str | None = Field(None, max_length=MAX_FILE_CONTENT_SIZE) # ~50MB for base64 images
url: str = Field(default='', max_length=MAX_URL_LENGTH)
@field_validator('screenshot_url')
@classmethod
def validate_screenshot_size(cls, v: str | None) -> str | None:
"""Validate screenshot URL or base64 content size."""
if v is None or not v.startswith('data:'):
return v
# It's base64 data, check size
if ',' in v:
base64_part = v.split(',')[1]
estimated_size = len(base64_part) * 3 / 4
if estimated_size > MAX_FILE_CONTENT_SIZE:
raise ValueError(f'Screenshot content exceeds maximum size of {MAX_FILE_CONTENT_SIZE / 1024 / 1024}MB')
return v
@classmethod
def from_agent_step(
cls, agent, model_output, result: list, actions_data: list[dict], browser_state_summary
) -> 'CreateAgentStepEvent':
"""Create a CreateAgentStepEvent from agent step data"""
# Get first action details if available
first_action = model_output.action[0] if model_output.action else None
# Extract current state from model output
current_state = model_output.current_state if hasattr(model_output, 'current_state') else None
# Capture screenshot as base64 data URL if available
screenshot_url = None
if browser_state_summary.screenshot:
screenshot_url = f'data:image/png;base64,{browser_state_summary.screenshot}'
import logging
logger = logging.getLogger(__name__)
logger.debug(f'📸 Including screenshot in CreateAgentStepEvent, length: {len(browser_state_summary.screenshot)}')
else:
import logging
logger = logging.getLogger(__name__)
logger.debug('📸 No screenshot in browser_state_summary for CreateAgentStepEvent')
return cls(
user_id='', # To be filled by cloud handler
device_id=agent.cloud_sync.auth_client.device_id
if hasattr(agent, 'cloud_sync') and agent.cloud_sync and agent.cloud_sync.auth_client
else None,
agent_task_id=str(agent.task_id),
step=agent.state.n_steps,
evaluation_previous_goal=current_state.evaluation_previous_goal if current_state else '',
memory=current_state.memory if current_state else '',
next_goal=current_state.next_goal if current_state else '',
actions=actions_data, # List of action dicts
url=browser_state_summary.url,
screenshot_url=screenshot_url,
)
class CreateAgentTaskEvent(BaseEvent):
# Model fields
id: str = Field(default_factory=uuid7str)
user_id: str = Field(max_length=255) # Added for authorization checks
device_id: str | None = Field(None, max_length=255) # Device ID for auth lookup
agent_session_id: str
llm_model: str = Field(max_length=200) # LLMModel enum value as string
stopped: bool = False
paused: bool = False
task: str = Field(max_length=MAX_TASK_LENGTH)
done_output: str | None = Field(None, max_length=MAX_STRING_LENGTH)
scheduled_task_id: str | None = None
started_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
finished_at: datetime | None = None
agent_state: dict = Field(default_factory=dict)
user_feedback_type: str | None = Field(None, max_length=10) # UserFeedbackType enum value as string
user_comment: str | None = Field(None, max_length=MAX_COMMENT_LENGTH)
gif_url: str | None = Field(None, max_length=MAX_URL_LENGTH)
@classmethod
def from_agent(cls, agent) -> 'CreateAgentTaskEvent':
"""Create a CreateAgentTaskEvent from an Agent instance"""
return cls(
id=str(agent.task_id),
user_id='', # To be filled by cloud handler
device_id=agent.cloud_sync.auth_client.device_id
if hasattr(agent, 'cloud_sync') and agent.cloud_sync and agent.cloud_sync.auth_client
else None,
agent_session_id=str(agent.session_id),
task=agent.task,
llm_model=agent.llm.model_name,
agent_state=agent.state.model_dump() if hasattr(agent.state, 'model_dump') else {},
stopped=False,
paused=False,
done_output=None,
started_at=datetime.fromtimestamp(agent._task_start_time, tz=timezone.utc),
finished_at=None,
user_feedback_type=None,
user_comment=None,
gif_url=None,
)
class CreateAgentSessionEvent(BaseEvent):
# Model fields
id: str = Field(default_factory=uuid7str)
user_id: str = Field(max_length=255)
device_id: str | None = Field(None, max_length=255) # Device ID for auth lookup
browser_session_id: str = Field(max_length=255)
browser_session_live_url: str = Field(max_length=MAX_URL_LENGTH)
browser_session_cdp_url: str = Field(max_length=MAX_URL_LENGTH)
browser_session_stopped: bool = False
browser_session_stopped_at: datetime | None = None
is_source_api: bool | None = None
browser_state: dict = Field(default_factory=dict)
browser_session_data: dict | None = None
@classmethod
def from_agent(cls, agent) -> 'CreateAgentSessionEvent':
"""Create a CreateAgentSessionEvent from an Agent instance"""
return cls(
id=str(agent.session_id),
user_id='', # To be filled by cloud handler
device_id=agent.cloud_sync.auth_client.device_id
if hasattr(agent, 'cloud_sync') and agent.cloud_sync and agent.cloud_sync.auth_client
else None,
browser_session_id=agent.browser_session.id,
browser_session_live_url='', # To be filled by cloud handler
browser_session_cdp_url='', # To be filled by cloud handler
browser_state={
'viewport': agent.browser_profile.viewport if agent.browser_profile else {'width': 1280, 'height': 720},
'user_agent': agent.browser_profile.user_agent if agent.browser_profile else None,
'headless': agent.browser_profile.headless if agent.browser_profile else True,
'initial_url': None, # Will be updated during execution
'final_url': None, # Will be updated during execution
'total_pages_visited': 0, # Will be updated during execution
'session_duration_seconds': 0, # Will be updated during execution
},
browser_session_data={
'cookies': [],
'secrets': {},
# TODO: send secrets safely so tasks can be replayed on cloud seamlessly
# 'secrets': dict(agent.sensitive_data) if agent.sensitive_data else {},
'allowed_domains': agent.browser_profile.allowed_domains if agent.browser_profile else [],
},
)
class UpdateAgentSessionEvent(BaseEvent):
"""Event to update an existing agent session"""
# Model fields
id: str # Session ID to update
user_id: str = Field(max_length=255)
device_id: str | None = Field(None, max_length=255)
browser_session_stopped: bool | None = None
browser_session_stopped_at: datetime | None = None
end_reason: str | None = Field(None, max_length=100) # Why the session ended
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/agent/variable_detector.py | browser_use/agent/variable_detector.py | """Detect variables in agent history for reuse"""
import re
from browser_use.agent.views import AgentHistoryList, DetectedVariable
from browser_use.dom.views import DOMInteractedElement
def detect_variables_in_history(history: AgentHistoryList) -> dict[str, DetectedVariable]:
"""
Analyze agent history and detect reusable variables.
Uses two strategies:
1. Element attributes (id, name, type, placeholder, aria-label) - most reliable
2. Value pattern matching (email, phone, date formats) - fallback
Returns:
Dictionary mapping variable names to DetectedVariable objects
"""
detected: dict[str, DetectedVariable] = {}
detected_values: set[str] = set() # Track which values we've already detected
for step_idx, history_item in enumerate(history.history):
if not history_item.model_output:
continue
for action_idx, action in enumerate(history_item.model_output.action):
# Convert action to dict - handle both Pydantic models and dict-like objects
if hasattr(action, 'model_dump'):
action_dict = action.model_dump()
elif isinstance(action, dict):
action_dict = action
else:
# For SimpleNamespace or similar objects
action_dict = vars(action)
# Get the interacted element for this action (if available)
element = None
if history_item.state and history_item.state.interacted_element:
if len(history_item.state.interacted_element) > action_idx:
element = history_item.state.interacted_element[action_idx]
# Detect variables in this action
_detect_in_action(action_dict, element, detected, detected_values)
return detected
def _detect_in_action(
action_dict: dict,
element: DOMInteractedElement | None,
detected: dict[str, DetectedVariable],
detected_values: set[str],
) -> None:
"""Detect variables in a single action using element context"""
# Extract action type and parameters
for action_type, params in action_dict.items():
if not isinstance(params, dict):
continue
# Check fields that commonly contain variables
fields_to_check = ['text', 'query']
for field in fields_to_check:
if field not in params:
continue
value = params[field]
if not isinstance(value, str) or not value.strip():
continue
# Skip if we already detected this exact value
if value in detected_values:
continue
# Try to detect variable type (with element context)
var_info = _detect_variable_type(value, element)
if not var_info:
continue
var_name, var_format = var_info
# Ensure unique variable name
var_name = _ensure_unique_name(var_name, detected)
# Add detected variable
detected[var_name] = DetectedVariable(
name=var_name,
original_value=value,
type='string',
format=var_format,
)
detected_values.add(value)
def _detect_variable_type(
value: str,
element: DOMInteractedElement | None = None,
) -> tuple[str, str | None] | None:
"""
Detect if a value looks like a variable, using element context when available.
Priority:
1. Element attributes (id, name, type, placeholder, aria-label) - most reliable
2. Value pattern matching (email, phone, date formats) - fallback
Returns:
(variable_name, format) or None if not detected
"""
# STRATEGY 1: Use element attributes (most reliable)
if element and element.attributes:
attr_detection = _detect_from_attributes(element.attributes)
if attr_detection:
return attr_detection
# STRATEGY 2: Pattern matching on value (fallback)
return _detect_from_value_pattern(value)
def _detect_from_attributes(attributes: dict[str, str]) -> tuple[str, str | None] | None:
"""
Detect variable from element attributes.
Check attributes in priority order:
1. type attribute (HTML5 input types - most specific)
2. id, name, placeholder, aria-label (semantic hints)
"""
# Check 'type' attribute first (HTML5 input types)
input_type = attributes.get('type', '').lower()
if input_type == 'email':
return ('email', 'email')
elif input_type == 'tel':
return ('phone', 'phone')
elif input_type == 'date':
return ('date', 'date')
elif input_type == 'number':
return ('number', 'number')
elif input_type == 'url':
return ('url', 'url')
# Combine semantic attributes for keyword matching
semantic_attrs = [
attributes.get('id', ''),
attributes.get('name', ''),
attributes.get('placeholder', ''),
attributes.get('aria-label', ''),
]
combined_text = ' '.join(semantic_attrs).lower()
# Address detection
if any(keyword in combined_text for keyword in ['address', 'street', 'addr']):
if 'billing' in combined_text:
return ('billing_address', None)
elif 'shipping' in combined_text:
return ('shipping_address', None)
else:
return ('address', None)
# Comment/Note detection
if any(keyword in combined_text for keyword in ['comment', 'note', 'message', 'description']):
return ('comment', None)
# Email detection
if 'email' in combined_text or 'e-mail' in combined_text:
return ('email', 'email')
# Phone detection
if any(keyword in combined_text for keyword in ['phone', 'tel', 'mobile', 'cell']):
return ('phone', 'phone')
# Name detection (order matters - check specific before general)
if 'first' in combined_text and 'name' in combined_text:
return ('first_name', None)
elif 'last' in combined_text and 'name' in combined_text:
return ('last_name', None)
elif 'full' in combined_text and 'name' in combined_text:
return ('full_name', None)
elif 'name' in combined_text:
return ('name', None)
# Date detection
if any(keyword in combined_text for keyword in ['date', 'dob', 'birth']):
return ('date', 'date')
# City detection
if 'city' in combined_text:
return ('city', None)
# State/Province detection
if 'state' in combined_text or 'province' in combined_text:
return ('state', None)
# Country detection
if 'country' in combined_text:
return ('country', None)
# Zip code detection
if any(keyword in combined_text for keyword in ['zip', 'postal', 'postcode']):
return ('zip_code', 'postal_code')
# Company detection
if 'company' in combined_text or 'organization' in combined_text:
return ('company', None)
return None
def _detect_from_value_pattern(value: str) -> tuple[str, str | None] | None:
"""
Detect variable type from value pattern (fallback when no element context).
Patterns:
- Email: contains @ and . with valid format
- Phone: digits with separators, 10+ chars
- Date: YYYY-MM-DD format
- Name: Capitalized word(s), 2-30 chars, letters only
- Number: Pure digits, 1-9 chars
"""
# Email detection - most specific first
if '@' in value and '.' in value:
# Basic email validation
if re.match(r'^[\w\.-]+@[\w\.-]+\.\w+$', value):
return ('email', 'email')
# Phone detection (digits with separators, 10+ chars)
if re.match(r'^[\d\s\-\(\)\+]+$', value):
# Remove separators and check length
digits_only = re.sub(r'[\s\-\(\)\+]', '', value)
if len(digits_only) >= 10:
return ('phone', 'phone')
# Date detection (YYYY-MM-DD or similar)
if re.match(r'^\d{4}-\d{2}-\d{2}$', value):
return ('date', 'date')
# Name detection (capitalized, only letters/spaces, 2-30 chars)
if value and value[0].isupper() and value.replace(' ', '').replace('-', '').isalpha() and 2 <= len(value) <= 30:
words = value.split()
if len(words) == 1:
return ('first_name', None)
elif len(words) == 2:
return ('full_name', None)
else:
return ('name', None)
# Number detection (pure digits, not phone length)
if value.isdigit() and 1 <= len(value) <= 9:
return ('number', 'number')
return None
def _ensure_unique_name(base_name: str, existing: dict[str, DetectedVariable]) -> str:
"""
Ensure variable name is unique by adding suffix if needed.
Examples:
first_name → first_name
first_name (exists) → first_name_2
first_name_2 (exists) → first_name_3
"""
if base_name not in existing:
return base_name
# Add numeric suffix
counter = 2
while f'{base_name}_{counter}' in existing:
counter += 1
return f'{base_name}_{counter}'
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/agent/message_manager/views.py | browser_use/agent/message_manager/views.py | from __future__ import annotations
from typing import TYPE_CHECKING, Any
from pydantic import BaseModel, ConfigDict, Field
from browser_use.llm.messages import (
BaseMessage,
)
if TYPE_CHECKING:
pass
class HistoryItem(BaseModel):
"""Represents a single agent history item with its data and string representation"""
step_number: int | None = None
evaluation_previous_goal: str | None = None
memory: str | None = None
next_goal: str | None = None
action_results: str | None = None
error: str | None = None
system_message: str | None = None
model_config = ConfigDict(arbitrary_types_allowed=True)
def model_post_init(self, __context) -> None:
"""Validate that error and system_message are not both provided"""
if self.error is not None and self.system_message is not None:
raise ValueError('Cannot have both error and system_message at the same time')
def to_string(self) -> str:
"""Get string representation of the history item"""
step_str = 'step' if self.step_number is not None else 'step_unknown'
if self.error:
return f"""<{step_str}>
{self.error}"""
elif self.system_message:
return self.system_message
else:
content_parts = []
# Only include evaluation_previous_goal if it's not None/empty
if self.evaluation_previous_goal:
content_parts.append(f'{self.evaluation_previous_goal}')
# Always include memory
if self.memory:
content_parts.append(f'{self.memory}')
# Only include next_goal if it's not None/empty
if self.next_goal:
content_parts.append(f'{self.next_goal}')
if self.action_results:
content_parts.append(self.action_results)
content = '\n'.join(content_parts)
return f"""<{step_str}>
{content}"""
class MessageHistory(BaseModel):
"""History of messages"""
system_message: BaseMessage | None = None
state_message: BaseMessage | None = None
context_messages: list[BaseMessage] = Field(default_factory=list)
model_config = ConfigDict(arbitrary_types_allowed=True)
def get_messages(self) -> list[BaseMessage]:
"""Get all messages in the correct order: system -> state -> contextual"""
messages = []
if self.system_message:
messages.append(self.system_message)
if self.state_message:
messages.append(self.state_message)
messages.extend(self.context_messages)
return messages
class MessageManagerState(BaseModel):
"""Holds the state for MessageManager"""
history: MessageHistory = Field(default_factory=MessageHistory)
tool_id: int = 1
agent_history_items: list[HistoryItem] = Field(
default_factory=lambda: [HistoryItem(step_number=0, system_message='Agent initialized')]
)
read_state_description: str = ''
# Images to include in the next state message (cleared after each step)
read_state_images: list[dict[str, Any]] = Field(default_factory=list)
model_config = ConfigDict(arbitrary_types_allowed=True)
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/agent/message_manager/service.py | browser_use/agent/message_manager/service.py | from __future__ import annotations
import logging
from typing import Literal
from browser_use.agent.message_manager.views import (
HistoryItem,
)
from browser_use.agent.prompts import AgentMessagePrompt
from browser_use.agent.views import (
ActionResult,
AgentOutput,
AgentStepInfo,
MessageManagerState,
)
from browser_use.browser.views import BrowserStateSummary
from browser_use.filesystem.file_system import FileSystem
from browser_use.llm.messages import (
BaseMessage,
ContentPartImageParam,
ContentPartTextParam,
SystemMessage,
)
from browser_use.observability import observe_debug
from browser_use.utils import match_url_with_domain_pattern, time_execution_sync
logger = logging.getLogger(__name__)
# ========== Logging Helper Functions ==========
# These functions are used ONLY for formatting debug log output.
# They do NOT affect the actual message content sent to the LLM.
# All logging functions start with _log_ for easy identification.
def _log_get_message_emoji(message: BaseMessage) -> str:
"""Get emoji for a message type - used only for logging display"""
emoji_map = {
'UserMessage': '💬',
'SystemMessage': '🧠',
'AssistantMessage': '🔨',
}
return emoji_map.get(message.__class__.__name__, '🎮')
def _log_format_message_line(message: BaseMessage, content: str, is_last_message: bool, terminal_width: int) -> list[str]:
"""Format a single message for logging display"""
try:
lines = []
# Get emoji and token info
emoji = _log_get_message_emoji(message)
# token_str = str(message.metadata.tokens).rjust(4)
# TODO: fix the token count
token_str = '??? (TODO)'
prefix = f'{emoji}[{token_str}]: '
# Calculate available width (emoji=2 visual cols + [token]: =8 chars)
content_width = terminal_width - 10
# Handle last message wrapping
if is_last_message and len(content) > content_width:
# Find a good break point
break_point = content.rfind(' ', 0, content_width)
if break_point > content_width * 0.7: # Keep at least 70% of line
first_line = content[:break_point]
rest = content[break_point + 1 :]
else:
# No good break point, just truncate
first_line = content[:content_width]
rest = content[content_width:]
lines.append(prefix + first_line)
# Second line with 10-space indent
if rest:
if len(rest) > terminal_width - 10:
rest = rest[: terminal_width - 10]
lines.append(' ' * 10 + rest)
else:
# Single line - truncate if needed
if len(content) > content_width:
content = content[:content_width]
lines.append(prefix + content)
return lines
except Exception as e:
logger.warning(f'Failed to format message line for logging: {e}')
# Return a simple fallback line
return ['❓[ ?]: [Error formatting message]']
# ========== End of Logging Helper Functions ==========
class MessageManager:
vision_detail_level: Literal['auto', 'low', 'high']
def __init__(
self,
task: str,
system_message: SystemMessage,
file_system: FileSystem,
state: MessageManagerState = MessageManagerState(),
use_thinking: bool = True,
include_attributes: list[str] | None = None,
sensitive_data: dict[str, str | dict[str, str]] | None = None,
max_history_items: int | None = None,
vision_detail_level: Literal['auto', 'low', 'high'] = 'auto',
include_tool_call_examples: bool = False,
include_recent_events: bool = False,
sample_images: list[ContentPartTextParam | ContentPartImageParam] | None = None,
llm_screenshot_size: tuple[int, int] | None = None,
):
self.task = task
self.state = state
self.system_prompt = system_message
self.file_system = file_system
self.sensitive_data_description = ''
self.use_thinking = use_thinking
self.max_history_items = max_history_items
self.vision_detail_level = vision_detail_level
self.include_tool_call_examples = include_tool_call_examples
self.include_recent_events = include_recent_events
self.sample_images = sample_images
self.llm_screenshot_size = llm_screenshot_size
assert max_history_items is None or max_history_items > 5, 'max_history_items must be None or greater than 5'
# Store settings as direct attributes instead of in a settings object
self.include_attributes = include_attributes or []
self.sensitive_data = sensitive_data
self.last_input_messages = []
self.last_state_message_text: str | None = None
# Only initialize messages if state is empty
if len(self.state.history.get_messages()) == 0:
self._set_message_with_type(self.system_prompt, 'system')
@property
def agent_history_description(self) -> str:
"""Build agent history description from list of items, respecting max_history_items limit"""
if self.max_history_items is None:
# Include all items
return '\n'.join(item.to_string() for item in self.state.agent_history_items)
total_items = len(self.state.agent_history_items)
# If we have fewer items than the limit, just return all items
if total_items <= self.max_history_items:
return '\n'.join(item.to_string() for item in self.state.agent_history_items)
# We have more items than the limit, so we need to omit some
omitted_count = total_items - self.max_history_items
# Show first item + omitted message + most recent (max_history_items - 1) items
# The omitted message doesn't count against the limit, only real history items do
recent_items_count = self.max_history_items - 1 # -1 for first item
items_to_include = [
self.state.agent_history_items[0].to_string(), # Keep first item (initialization)
f'<sys>[... {omitted_count} previous steps omitted...]</sys>',
]
# Add most recent items
items_to_include.extend([item.to_string() for item in self.state.agent_history_items[-recent_items_count:]])
return '\n'.join(items_to_include)
def add_new_task(self, new_task: str) -> None:
new_task = '<follow_up_user_request> ' + new_task.strip() + ' </follow_up_user_request>'
if '<initial_user_request>' not in self.task:
self.task = '<initial_user_request>' + self.task + '</initial_user_request>'
self.task += '\n' + new_task
task_update_item = HistoryItem(system_message=new_task)
self.state.agent_history_items.append(task_update_item)
def _update_agent_history_description(
self,
model_output: AgentOutput | None = None,
result: list[ActionResult] | None = None,
step_info: AgentStepInfo | None = None,
) -> None:
"""Update the agent history description"""
if result is None:
result = []
step_number = step_info.step_number if step_info else None
self.state.read_state_description = ''
self.state.read_state_images = [] # Clear images from previous step
action_results = ''
result_len = len(result)
read_state_idx = 0
for idx, action_result in enumerate(result):
if action_result.include_extracted_content_only_once and action_result.extracted_content:
self.state.read_state_description += (
f'<read_state_{read_state_idx}>\n{action_result.extracted_content}\n</read_state_{read_state_idx}>\n'
)
read_state_idx += 1
logger.debug(f'Added extracted_content to read_state_description: {action_result.extracted_content}')
# Store images for one-time inclusion in the next message
if action_result.images:
self.state.read_state_images.extend(action_result.images)
logger.debug(f'Added {len(action_result.images)} image(s) to read_state_images')
if action_result.long_term_memory:
action_results += f'{action_result.long_term_memory}\n'
logger.debug(f'Added long_term_memory to action_results: {action_result.long_term_memory}')
elif action_result.extracted_content and not action_result.include_extracted_content_only_once:
action_results += f'{action_result.extracted_content}\n'
logger.debug(f'Added extracted_content to action_results: {action_result.extracted_content}')
if action_result.error:
if len(action_result.error) > 200:
error_text = action_result.error[:100] + '......' + action_result.error[-100:]
else:
error_text = action_result.error
action_results += f'{error_text}\n'
logger.debug(f'Added error to action_results: {error_text}')
# Simple 60k character limit for read_state_description
MAX_CONTENT_SIZE = 60000
if len(self.state.read_state_description) > MAX_CONTENT_SIZE:
self.state.read_state_description = (
self.state.read_state_description[:MAX_CONTENT_SIZE] + '\n... [Content truncated at 60k characters]'
)
logger.debug(f'Truncated read_state_description to {MAX_CONTENT_SIZE} characters')
self.state.read_state_description = self.state.read_state_description.strip('\n')
if action_results:
action_results = f'Result\n{action_results}'
action_results = action_results.strip('\n') if action_results else None
# Simple 60k character limit for action_results
if action_results and len(action_results) > MAX_CONTENT_SIZE:
action_results = action_results[:MAX_CONTENT_SIZE] + '\n... [Content truncated at 60k characters]'
logger.debug(f'Truncated action_results to {MAX_CONTENT_SIZE} characters')
# Build the history item
if model_output is None:
# Add history item for initial actions (step 0) or errors (step > 0)
if step_number is not None:
if step_number == 0 and action_results:
# Step 0 with initial action results
history_item = HistoryItem(step_number=step_number, action_results=action_results)
self.state.agent_history_items.append(history_item)
elif step_number > 0:
# Error case for steps > 0
history_item = HistoryItem(step_number=step_number, error='Agent failed to output in the right format.')
self.state.agent_history_items.append(history_item)
else:
history_item = HistoryItem(
step_number=step_number,
evaluation_previous_goal=model_output.current_state.evaluation_previous_goal,
memory=model_output.current_state.memory,
next_goal=model_output.current_state.next_goal,
action_results=action_results,
)
self.state.agent_history_items.append(history_item)
def _get_sensitive_data_description(self, current_page_url) -> str:
sensitive_data = self.sensitive_data
if not sensitive_data:
return ''
# Collect placeholders for sensitive data
placeholders: set[str] = set()
for key, value in sensitive_data.items():
if isinstance(value, dict):
# New format: {domain: {key: value}}
if current_page_url and match_url_with_domain_pattern(current_page_url, key, True):
placeholders.update(value.keys())
else:
# Old format: {key: value}
placeholders.add(key)
if placeholders:
placeholder_list = sorted(list(placeholders))
info = f'Here are placeholders for sensitive data:\n{placeholder_list}\n'
info += 'To use them, write <secret>the placeholder name</secret>'
return info
return ''
@observe_debug(ignore_input=True, ignore_output=True, name='create_state_messages')
@time_execution_sync('--create_state_messages')
def create_state_messages(
self,
browser_state_summary: BrowserStateSummary,
model_output: AgentOutput | None = None,
result: list[ActionResult] | None = None,
step_info: AgentStepInfo | None = None,
use_vision: bool | Literal['auto'] = True,
page_filtered_actions: str | None = None,
sensitive_data=None,
available_file_paths: list[str] | None = None, # Always pass current available_file_paths
unavailable_skills_info: str | None = None, # Information about skills that cannot be used yet
) -> None:
"""Create single state message with all content"""
# Clear contextual messages from previous steps to prevent accumulation
self.state.history.context_messages.clear()
# First, update the agent history items with the latest step results
self._update_agent_history_description(model_output, result, step_info)
# Use the passed sensitive_data parameter, falling back to instance variable
effective_sensitive_data = sensitive_data if sensitive_data is not None else self.sensitive_data
if effective_sensitive_data is not None:
# Update instance variable to keep it in sync
self.sensitive_data = effective_sensitive_data
self.sensitive_data_description = self._get_sensitive_data_description(browser_state_summary.url)
# Use only the current screenshot, but check if action results request screenshot inclusion
screenshots = []
include_screenshot_requested = False
# Check if any action results request screenshot inclusion
if result:
for action_result in result:
if action_result.metadata and action_result.metadata.get('include_screenshot'):
include_screenshot_requested = True
logger.debug('Screenshot inclusion requested by action result')
break
# Handle different use_vision modes:
# - "auto": Only include screenshot if explicitly requested by action (e.g., screenshot)
# - True: Always include screenshot
# - False: Never include screenshot
include_screenshot = False
if use_vision is True:
# Always include screenshot when use_vision=True
include_screenshot = True
elif use_vision == 'auto':
# Only include screenshot if explicitly requested by action when use_vision="auto"
include_screenshot = include_screenshot_requested
# else: use_vision is False, never include screenshot (include_screenshot stays False)
if include_screenshot and browser_state_summary.screenshot:
screenshots.append(browser_state_summary.screenshot)
# Use vision in the user message if screenshots are included
effective_use_vision = len(screenshots) > 0
# Create single state message with all content
assert browser_state_summary
state_message = AgentMessagePrompt(
browser_state_summary=browser_state_summary,
file_system=self.file_system,
agent_history_description=self.agent_history_description,
read_state_description=self.state.read_state_description,
task=self.task,
include_attributes=self.include_attributes,
step_info=step_info,
page_filtered_actions=page_filtered_actions,
sensitive_data=self.sensitive_data_description,
available_file_paths=available_file_paths,
screenshots=screenshots,
vision_detail_level=self.vision_detail_level,
include_recent_events=self.include_recent_events,
sample_images=self.sample_images,
read_state_images=self.state.read_state_images,
llm_screenshot_size=self.llm_screenshot_size,
unavailable_skills_info=unavailable_skills_info,
).get_user_message(effective_use_vision)
# Store state message text for history
self.last_state_message_text = state_message.text
# Set the state message with caching enabled
self._set_message_with_type(state_message, 'state')
def _log_history_lines(self) -> str:
"""Generate a formatted log string of message history for debugging / printing to terminal"""
# TODO: fix logging
# try:
# total_input_tokens = 0
# message_lines = []
# terminal_width = shutil.get_terminal_size((80, 20)).columns
# for i, m in enumerate(self.state.history.messages):
# try:
# total_input_tokens += m.metadata.tokens
# is_last_message = i == len(self.state.history.messages) - 1
# # Extract content for logging
# content = _log_extract_message_content(m.message, is_last_message, m.metadata)
# # Format the message line(s)
# lines = _log_format_message_line(m, content, is_last_message, terminal_width)
# message_lines.extend(lines)
# except Exception as e:
# logger.warning(f'Failed to format message {i} for logging: {e}')
# # Add a fallback line for this message
# message_lines.append('❓[ ?]: [Error formatting this message]')
# # Build final log message
# return (
# f'📜 LLM Message history ({len(self.state.history.messages)} messages, {total_input_tokens} tokens):\n'
# + '\n'.join(message_lines)
# )
# except Exception as e:
# logger.warning(f'Failed to generate history log: {e}')
# # Return a minimal fallback message
# return f'📜 LLM Message history (error generating log: {e})'
return ''
@time_execution_sync('--get_messages')
def get_messages(self) -> list[BaseMessage]:
"""Get current message list, potentially trimmed to max tokens"""
# Log message history for debugging
logger.debug(self._log_history_lines())
self.last_input_messages = self.state.history.get_messages()
return self.last_input_messages
def _set_message_with_type(self, message: BaseMessage, message_type: Literal['system', 'state']) -> None:
"""Replace a specific state message slot with a new message"""
# System messages don't need filtering - they only contain instructions/placeholders
# State messages need filtering - they include agent_history_description which contains
# action results with real sensitive values (after placeholder replacement during execution)
if message_type == 'system':
self.state.history.system_message = message
elif message_type == 'state':
if self.sensitive_data:
message = self._filter_sensitive_data(message)
self.state.history.state_message = message
else:
raise ValueError(f'Invalid state message type: {message_type}')
def _add_context_message(self, message: BaseMessage) -> None:
"""Add a contextual message specific to this step (e.g., validation errors, retry instructions, timeout warnings)"""
# Context messages typically contain error messages and validation info, not action results
# with sensitive data, so filtering is not needed here
self.state.history.context_messages.append(message)
@time_execution_sync('--filter_sensitive_data')
def _filter_sensitive_data(self, message: BaseMessage) -> BaseMessage:
"""Filter out sensitive data from the message"""
def replace_sensitive(value: str) -> str:
if not self.sensitive_data:
return value
# Collect all sensitive values, immediately converting old format to new format
sensitive_values: dict[str, str] = {}
# Process all sensitive data entries
for key_or_domain, content in self.sensitive_data.items():
if isinstance(content, dict):
# Already in new format: {domain: {key: value}}
for key, val in content.items():
if val: # Skip empty values
sensitive_values[key] = val
elif content: # Old format: {key: value} - convert to new format internally
# We treat this as if it was {'http*://*': {key_or_domain: content}}
sensitive_values[key_or_domain] = content
# If there are no valid sensitive data entries, just return the original value
if not sensitive_values:
logger.warning('No valid entries found in sensitive_data dictionary')
return value
# Replace all valid sensitive data values with their placeholder tags
for key, val in sensitive_values.items():
value = value.replace(val, f'<secret>{key}</secret>')
return value
if isinstance(message.content, str):
message.content = replace_sensitive(message.content)
elif isinstance(message.content, list):
for i, item in enumerate(message.content):
if isinstance(item, ContentPartTextParam):
item.text = replace_sensitive(item.text)
message.content[i] = item
return message
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/agent/message_manager/utils.py | browser_use/agent/message_manager/utils.py | from __future__ import annotations
import json
import logging
from pathlib import Path
from typing import Any
import anyio
from browser_use.llm.messages import BaseMessage
logger = logging.getLogger(__name__)
async def save_conversation(
input_messages: list[BaseMessage],
response: Any,
target: str | Path,
encoding: str | None = None,
) -> None:
"""Save conversation history to file asynchronously."""
target_path = Path(target)
# create folders if not exists
if target_path.parent:
await anyio.Path(target_path.parent).mkdir(parents=True, exist_ok=True)
await anyio.Path(target_path).write_text(
await _format_conversation(input_messages, response),
encoding=encoding or 'utf-8',
)
async def _format_conversation(messages: list[BaseMessage], response: Any) -> str:
"""Format the conversation including messages and response."""
lines = []
# Format messages
for message in messages:
lines.append(f' {message.role} ')
lines.append(message.text)
lines.append('') # Empty line after each message
# Format response
lines.append(json.dumps(json.loads(response.model_dump_json(exclude_unset=True)), indent=2, ensure_ascii=False))
return '\n'.join(lines)
# Note: _write_messages_to_file and _write_response_to_file have been merged into _format_conversation
# This is more efficient for async operations and reduces file I/O
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/agent/system_prompts/__init__.py | browser_use/agent/system_prompts/__init__.py | # System prompt templates for browser-use agent
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/telemetry/views.py | browser_use/telemetry/views.py | from abc import ABC, abstractmethod
from collections.abc import Sequence
from dataclasses import asdict, dataclass
from typing import Any, Literal
from browser_use.config import is_running_in_docker
@dataclass
class BaseTelemetryEvent(ABC):
@property
@abstractmethod
def name(self) -> str:
pass
@property
def properties(self) -> dict[str, Any]:
props = {k: v for k, v in asdict(self).items() if k != 'name'}
# Add Docker context if running in Docker
props['is_docker'] = is_running_in_docker()
return props
@dataclass
class AgentTelemetryEvent(BaseTelemetryEvent):
# start details
task: str
model: str
model_provider: str
max_steps: int
max_actions_per_step: int
use_vision: bool | Literal['auto']
version: str
source: str
cdp_url: str | None
agent_type: str | None # 'code' for CodeAgent, None for regular Agent
# step details
action_errors: Sequence[str | None]
action_history: Sequence[list[dict] | None]
urls_visited: Sequence[str | None]
# end details
steps: int
total_input_tokens: int
total_output_tokens: int
prompt_cached_tokens: int
total_tokens: int
total_duration_seconds: float
success: bool | None
final_result_response: str | None
error_message: str | None
# judge details
judge_verdict: bool | None = None
judge_reasoning: str | None = None
judge_failure_reason: str | None = None
judge_reached_captcha: bool | None = None
judge_impossible_task: bool | None = None
name: str = 'agent_event'
@dataclass
class MCPClientTelemetryEvent(BaseTelemetryEvent):
"""Telemetry event for MCP client usage"""
server_name: str
command: str
tools_discovered: int
version: str
action: str # 'connect', 'disconnect', 'tool_call'
tool_name: str | None = None
duration_seconds: float | None = None
error_message: str | None = None
name: str = 'mcp_client_event'
@dataclass
class MCPServerTelemetryEvent(BaseTelemetryEvent):
"""Telemetry event for MCP server usage"""
version: str
action: str # 'start', 'stop', 'tool_call'
tool_name: str | None = None
duration_seconds: float | None = None
error_message: str | None = None
parent_process_cmdline: str | None = None
name: str = 'mcp_server_event'
@dataclass
class CLITelemetryEvent(BaseTelemetryEvent):
"""Telemetry event for CLI usage"""
version: str
action: str # 'start', 'message_sent', 'task_completed', 'error'
mode: str # 'interactive', 'oneshot', 'mcp_server'
model: str | None = None
model_provider: str | None = None
duration_seconds: float | None = None
error_message: str | None = None
name: str = 'cli_event'
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/telemetry/service.py | browser_use/telemetry/service.py | import logging
import os
from dotenv import load_dotenv
from posthog import Posthog
from uuid_extensions import uuid7str
from browser_use.telemetry.views import BaseTelemetryEvent
from browser_use.utils import singleton
load_dotenv()
from browser_use.config import CONFIG
logger = logging.getLogger(__name__)
POSTHOG_EVENT_SETTINGS = {
'process_person_profile': True,
}
@singleton
class ProductTelemetry:
"""
Service for capturing anonymized telemetry data.
If the environment variable `ANONYMIZED_TELEMETRY=False`, anonymized telemetry will be disabled.
"""
USER_ID_PATH = str(CONFIG.BROWSER_USE_CONFIG_DIR / 'device_id')
PROJECT_API_KEY = 'phc_F8JMNjW1i2KbGUTaW1unnDdLSPCoyc52SGRU0JecaUh'
HOST = 'https://eu.i.posthog.com'
UNKNOWN_USER_ID = 'UNKNOWN'
_curr_user_id = None
def __init__(self) -> None:
telemetry_disabled = not CONFIG.ANONYMIZED_TELEMETRY
self.debug_logging = CONFIG.BROWSER_USE_LOGGING_LEVEL == 'debug'
if telemetry_disabled:
self._posthog_client = None
else:
logger.info('Using anonymized telemetry, see https://docs.browser-use.com/development/telemetry.')
self._posthog_client = Posthog(
project_api_key=self.PROJECT_API_KEY,
host=self.HOST,
disable_geoip=False,
enable_exception_autocapture=True,
)
# Silence posthog's logging
if not self.debug_logging:
posthog_logger = logging.getLogger('posthog')
posthog_logger.disabled = True
if self._posthog_client is None:
logger.debug('Telemetry disabled')
def capture(self, event: BaseTelemetryEvent) -> None:
if self._posthog_client is None:
return
self._direct_capture(event)
def _direct_capture(self, event: BaseTelemetryEvent) -> None:
"""
Should not be thread blocking because posthog magically handles it
"""
if self._posthog_client is None:
return
try:
self._posthog_client.capture(
distinct_id=self.user_id,
event=event.name,
properties={**event.properties, **POSTHOG_EVENT_SETTINGS},
)
except Exception as e:
logger.error(f'Failed to send telemetry event {event.name}: {e}')
def flush(self) -> None:
if self._posthog_client:
try:
self._posthog_client.flush()
logger.debug('PostHog client telemetry queue flushed.')
except Exception as e:
logger.error(f'Failed to flush PostHog client: {e}')
else:
logger.debug('PostHog client not available, skipping flush.')
@property
def user_id(self) -> str:
if self._curr_user_id:
return self._curr_user_id
# File access may fail due to permissions or other reasons. We don't want to
# crash so we catch all exceptions.
try:
if not os.path.exists(self.USER_ID_PATH):
os.makedirs(os.path.dirname(self.USER_ID_PATH), exist_ok=True)
with open(self.USER_ID_PATH, 'w') as f:
new_user_id = uuid7str()
f.write(new_user_id)
self._curr_user_id = new_user_id
else:
with open(self.USER_ID_PATH) as f:
self._curr_user_id = f.read()
except Exception:
self._curr_user_id = 'UNKNOWN_USER_ID'
return self._curr_user_id
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/telemetry/__init__.py | browser_use/telemetry/__init__.py | """
Telemetry for Browser Use.
"""
from typing import TYPE_CHECKING
# Type stubs for lazy imports
if TYPE_CHECKING:
from browser_use.telemetry.service import ProductTelemetry
from browser_use.telemetry.views import (
BaseTelemetryEvent,
CLITelemetryEvent,
MCPClientTelemetryEvent,
MCPServerTelemetryEvent,
)
# Lazy imports mapping
_LAZY_IMPORTS = {
'ProductTelemetry': ('browser_use.telemetry.service', 'ProductTelemetry'),
'BaseTelemetryEvent': ('browser_use.telemetry.views', 'BaseTelemetryEvent'),
'CLITelemetryEvent': ('browser_use.telemetry.views', 'CLITelemetryEvent'),
'MCPClientTelemetryEvent': ('browser_use.telemetry.views', 'MCPClientTelemetryEvent'),
'MCPServerTelemetryEvent': ('browser_use.telemetry.views', 'MCPServerTelemetryEvent'),
}
def __getattr__(name: str):
"""Lazy import mechanism for telemetry components."""
if name in _LAZY_IMPORTS:
module_path, attr_name = _LAZY_IMPORTS[name]
try:
from importlib import import_module
module = import_module(module_path)
attr = getattr(module, attr_name)
# Cache the imported attribute in the module's globals
globals()[name] = attr
return attr
except ImportError as e:
raise ImportError(f'Failed to import {name} from {module_path}: {e}') from e
raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
__all__ = [
'BaseTelemetryEvent',
'ProductTelemetry',
'CLITelemetryEvent',
'MCPClientTelemetryEvent',
'MCPServerTelemetryEvent',
]
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/actor/mouse.py | browser_use/actor/mouse.py | """Mouse class for mouse operations."""
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from cdp_use.cdp.input.commands import DispatchMouseEventParameters, SynthesizeScrollGestureParameters
from cdp_use.cdp.input.types import MouseButton
from browser_use.browser.session import BrowserSession
class Mouse:
"""Mouse operations for a target."""
def __init__(self, browser_session: 'BrowserSession', session_id: str | None = None, target_id: str | None = None):
self._browser_session = browser_session
self._client = browser_session.cdp_client
self._session_id = session_id
self._target_id = target_id
async def click(self, x: int, y: int, button: 'MouseButton' = 'left', click_count: int = 1) -> None:
"""Click at the specified coordinates."""
# Mouse press
press_params: 'DispatchMouseEventParameters' = {
'type': 'mousePressed',
'x': x,
'y': y,
'button': button,
'clickCount': click_count,
}
await self._client.send.Input.dispatchMouseEvent(
press_params,
session_id=self._session_id,
)
# Mouse release
release_params: 'DispatchMouseEventParameters' = {
'type': 'mouseReleased',
'x': x,
'y': y,
'button': button,
'clickCount': click_count,
}
await self._client.send.Input.dispatchMouseEvent(
release_params,
session_id=self._session_id,
)
async def down(self, button: 'MouseButton' = 'left', click_count: int = 1) -> None:
"""Press mouse button down."""
params: 'DispatchMouseEventParameters' = {
'type': 'mousePressed',
'x': 0, # Will use last mouse position
'y': 0,
'button': button,
'clickCount': click_count,
}
await self._client.send.Input.dispatchMouseEvent(
params,
session_id=self._session_id,
)
async def up(self, button: 'MouseButton' = 'left', click_count: int = 1) -> None:
"""Release mouse button."""
params: 'DispatchMouseEventParameters' = {
'type': 'mouseReleased',
'x': 0, # Will use last mouse position
'y': 0,
'button': button,
'clickCount': click_count,
}
await self._client.send.Input.dispatchMouseEvent(
params,
session_id=self._session_id,
)
async def move(self, x: int, y: int, steps: int = 1) -> None:
"""Move mouse to the specified coordinates."""
# TODO: Implement smooth movement with multiple steps if needed
_ = steps # Acknowledge parameter for future use
params: 'DispatchMouseEventParameters' = {'type': 'mouseMoved', 'x': x, 'y': y}
await self._client.send.Input.dispatchMouseEvent(params, session_id=self._session_id)
async def scroll(self, x: int = 0, y: int = 0, delta_x: int | None = None, delta_y: int | None = None) -> None:
"""Scroll the page using robust CDP methods."""
if not self._session_id:
raise RuntimeError('Session ID is required for scroll operations')
# Method 1: Try mouse wheel event (most reliable)
try:
# Get viewport dimensions
layout_metrics = await self._client.send.Page.getLayoutMetrics(session_id=self._session_id)
viewport_width = layout_metrics['layoutViewport']['clientWidth']
viewport_height = layout_metrics['layoutViewport']['clientHeight']
# Use provided coordinates or center of viewport
scroll_x = x if x > 0 else viewport_width / 2
scroll_y = y if y > 0 else viewport_height / 2
# Calculate scroll deltas (positive = down/right)
scroll_delta_x = delta_x or 0
scroll_delta_y = delta_y or 0
# Dispatch mouse wheel event
await self._client.send.Input.dispatchMouseEvent(
params={
'type': 'mouseWheel',
'x': scroll_x,
'y': scroll_y,
'deltaX': scroll_delta_x,
'deltaY': scroll_delta_y,
},
session_id=self._session_id,
)
return
except Exception:
pass
# Method 2: Fallback to synthesizeScrollGesture
try:
params: 'SynthesizeScrollGestureParameters' = {'x': x, 'y': y, 'xDistance': delta_x or 0, 'yDistance': delta_y or 0}
await self._client.send.Input.synthesizeScrollGesture(
params,
session_id=self._session_id,
)
except Exception:
# Method 3: JavaScript fallback
scroll_js = f'window.scrollBy({delta_x or 0}, {delta_y or 0})'
await self._client.send.Runtime.evaluate(
params={'expression': scroll_js, 'returnByValue': True},
session_id=self._session_id,
)
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/actor/element.py | browser_use/actor/element.py | """Element class for element operations."""
import asyncio
from typing import TYPE_CHECKING, Literal, Union
from cdp_use.client import logger
from typing_extensions import TypedDict
if TYPE_CHECKING:
from cdp_use.cdp.dom.commands import (
DescribeNodeParameters,
FocusParameters,
GetAttributesParameters,
GetBoxModelParameters,
PushNodesByBackendIdsToFrontendParameters,
RequestChildNodesParameters,
ResolveNodeParameters,
)
from cdp_use.cdp.input.commands import (
DispatchMouseEventParameters,
)
from cdp_use.cdp.input.types import MouseButton
from cdp_use.cdp.page.commands import CaptureScreenshotParameters
from cdp_use.cdp.page.types import Viewport
from cdp_use.cdp.runtime.commands import CallFunctionOnParameters
from browser_use.browser.session import BrowserSession
# Type definitions for element operations
ModifierType = Literal['Alt', 'Control', 'Meta', 'Shift']
class Position(TypedDict):
"""2D position coordinates."""
x: float
y: float
class BoundingBox(TypedDict):
"""Element bounding box with position and dimensions."""
x: float
y: float
width: float
height: float
class ElementInfo(TypedDict):
"""Basic information about a DOM element."""
backendNodeId: int
nodeId: int | None
nodeName: str
nodeType: int
nodeValue: str | None
attributes: dict[str, str]
boundingBox: BoundingBox | None
error: str | None
class Element:
"""Element operations using BackendNodeId."""
def __init__(
self,
browser_session: 'BrowserSession',
backend_node_id: int,
session_id: str | None = None,
):
self._browser_session = browser_session
self._client = browser_session.cdp_client
self._backend_node_id = backend_node_id
self._session_id = session_id
async def _get_node_id(self) -> int:
"""Get DOM node ID from backend node ID."""
params: 'PushNodesByBackendIdsToFrontendParameters' = {'backendNodeIds': [self._backend_node_id]}
result = await self._client.send.DOM.pushNodesByBackendIdsToFrontend(params, session_id=self._session_id)
return result['nodeIds'][0]
async def _get_remote_object_id(self) -> str | None:
"""Get remote object ID for this element."""
node_id = await self._get_node_id()
params: 'ResolveNodeParameters' = {'nodeId': node_id}
result = await self._client.send.DOM.resolveNode(params, session_id=self._session_id)
object_id = result['object'].get('objectId', None)
if not object_id:
return None
return object_id
async def click(
self,
button: 'MouseButton' = 'left',
click_count: int = 1,
modifiers: list[ModifierType] | None = None,
) -> None:
"""Click the element using the advanced watchdog implementation."""
try:
# Get viewport dimensions for visibility checks
layout_metrics = await self._client.send.Page.getLayoutMetrics(session_id=self._session_id)
viewport_width = layout_metrics['layoutViewport']['clientWidth']
viewport_height = layout_metrics['layoutViewport']['clientHeight']
# Try multiple methods to get element geometry
quads = []
# Method 1: Try DOM.getContentQuads first (best for inline elements and complex layouts)
try:
content_quads_result = await self._client.send.DOM.getContentQuads(
params={'backendNodeId': self._backend_node_id}, session_id=self._session_id
)
if 'quads' in content_quads_result and content_quads_result['quads']:
quads = content_quads_result['quads']
except Exception:
pass
# Method 2: Fall back to DOM.getBoxModel
if not quads:
try:
box_model = await self._client.send.DOM.getBoxModel(
params={'backendNodeId': self._backend_node_id}, session_id=self._session_id
)
if 'model' in box_model and 'content' in box_model['model']:
content_quad = box_model['model']['content']
if len(content_quad) >= 8:
# Convert box model format to quad format
quads = [
[
content_quad[0],
content_quad[1], # x1, y1
content_quad[2],
content_quad[3], # x2, y2
content_quad[4],
content_quad[5], # x3, y3
content_quad[6],
content_quad[7], # x4, y4
]
]
except Exception:
pass
# Method 3: Fall back to JavaScript getBoundingClientRect
if not quads:
try:
result = await self._client.send.DOM.resolveNode(
params={'backendNodeId': self._backend_node_id}, session_id=self._session_id
)
if 'object' in result and 'objectId' in result['object']:
object_id = result['object']['objectId']
# Get bounding rect via JavaScript
bounds_result = await self._client.send.Runtime.callFunctionOn(
params={
'functionDeclaration': """
function() {
const rect = this.getBoundingClientRect();
return {
x: rect.left,
y: rect.top,
width: rect.width,
height: rect.height
};
}
""",
'objectId': object_id,
'returnByValue': True,
},
session_id=self._session_id,
)
if 'result' in bounds_result and 'value' in bounds_result['result']:
rect = bounds_result['result']['value']
# Convert rect to quad format
x, y, w, h = rect['x'], rect['y'], rect['width'], rect['height']
quads = [
[
x,
y, # top-left
x + w,
y, # top-right
x + w,
y + h, # bottom-right
x,
y + h, # bottom-left
]
]
except Exception:
pass
# If we still don't have quads, fall back to JS click
if not quads:
try:
result = await self._client.send.DOM.resolveNode(
params={'backendNodeId': self._backend_node_id}, session_id=self._session_id
)
if 'object' not in result or 'objectId' not in result['object']:
raise Exception('Failed to find DOM element based on backendNodeId, maybe page content changed?')
object_id = result['object']['objectId']
await self._client.send.Runtime.callFunctionOn(
params={
'functionDeclaration': 'function() { this.click(); }',
'objectId': object_id,
},
session_id=self._session_id,
)
await asyncio.sleep(0.05)
return
except Exception as js_e:
raise Exception(f'Failed to click element: {js_e}')
# Find the largest visible quad within the viewport
best_quad = None
best_area = 0
for quad in quads:
if len(quad) < 8:
continue
# Calculate quad bounds
xs = [quad[i] for i in range(0, 8, 2)]
ys = [quad[i] for i in range(1, 8, 2)]
min_x, max_x = min(xs), max(xs)
min_y, max_y = min(ys), max(ys)
# Check if quad intersects with viewport
if max_x < 0 or max_y < 0 or min_x > viewport_width or min_y > viewport_height:
continue # Quad is completely outside viewport
# Calculate visible area (intersection with viewport)
visible_min_x = max(0, min_x)
visible_max_x = min(viewport_width, max_x)
visible_min_y = max(0, min_y)
visible_max_y = min(viewport_height, max_y)
visible_width = visible_max_x - visible_min_x
visible_height = visible_max_y - visible_min_y
visible_area = visible_width * visible_height
if visible_area > best_area:
best_area = visible_area
best_quad = quad
if not best_quad:
# No visible quad found, use the first quad anyway
best_quad = quads[0]
# Calculate center point of the best quad
center_x = sum(best_quad[i] for i in range(0, 8, 2)) / 4
center_y = sum(best_quad[i] for i in range(1, 8, 2)) / 4
# Ensure click point is within viewport bounds
center_x = max(0, min(viewport_width - 1, center_x))
center_y = max(0, min(viewport_height - 1, center_y))
# Scroll element into view
try:
await self._client.send.DOM.scrollIntoViewIfNeeded(
params={'backendNodeId': self._backend_node_id}, session_id=self._session_id
)
await asyncio.sleep(0.05) # Wait for scroll to complete
except Exception:
pass
# Calculate modifier bitmask for CDP
modifier_value = 0
if modifiers:
modifier_map = {'Alt': 1, 'Control': 2, 'Meta': 4, 'Shift': 8}
for mod in modifiers:
modifier_value |= modifier_map.get(mod, 0)
# Perform the click using CDP
try:
# Move mouse to element
await self._client.send.Input.dispatchMouseEvent(
params={
'type': 'mouseMoved',
'x': center_x,
'y': center_y,
},
session_id=self._session_id,
)
await asyncio.sleep(0.05)
# Mouse down
try:
await asyncio.wait_for(
self._client.send.Input.dispatchMouseEvent(
params={
'type': 'mousePressed',
'x': center_x,
'y': center_y,
'button': button,
'clickCount': click_count,
'modifiers': modifier_value,
},
session_id=self._session_id,
),
timeout=1.0, # 1 second timeout for mousePressed
)
await asyncio.sleep(0.08)
except TimeoutError:
pass # Don't sleep if we timed out
# Mouse up
try:
await asyncio.wait_for(
self._client.send.Input.dispatchMouseEvent(
params={
'type': 'mouseReleased',
'x': center_x,
'y': center_y,
'button': button,
'clickCount': click_count,
'modifiers': modifier_value,
},
session_id=self._session_id,
),
timeout=3.0, # 3 second timeout for mouseReleased
)
except TimeoutError:
pass
except Exception as e:
# Fall back to JavaScript click via CDP
try:
result = await self._client.send.DOM.resolveNode(
params={'backendNodeId': self._backend_node_id}, session_id=self._session_id
)
if 'object' not in result or 'objectId' not in result['object']:
raise Exception('Failed to find DOM element based on backendNodeId, maybe page content changed?')
object_id = result['object']['objectId']
await self._client.send.Runtime.callFunctionOn(
params={
'functionDeclaration': 'function() { this.click(); }',
'objectId': object_id,
},
session_id=self._session_id,
)
await asyncio.sleep(0.1)
return
except Exception as js_e:
raise Exception(f'Failed to click element: {e}')
except Exception as e:
# Extract key element info for error message
raise RuntimeError(f'Failed to click element: {e}')
async def fill(self, value: str, clear: bool = True) -> None:
"""Fill the input element using proper CDP methods with improved focus handling."""
try:
# Use the existing CDP client and session
cdp_client = self._client
session_id = self._session_id
backend_node_id = self._backend_node_id
# Track coordinates for metadata
input_coordinates = None
# Scroll element into view
try:
await cdp_client.send.DOM.scrollIntoViewIfNeeded(params={'backendNodeId': backend_node_id}, session_id=session_id)
await asyncio.sleep(0.01)
except Exception as e:
logger.warning(f'Failed to scroll element into view: {e}')
# Get object ID for the element
result = await cdp_client.send.DOM.resolveNode(
params={'backendNodeId': backend_node_id},
session_id=session_id,
)
if 'object' not in result or 'objectId' not in result['object']:
raise RuntimeError('Failed to get object ID for element')
object_id = result['object']['objectId']
# Get element coordinates for focus
try:
bounds_result = await cdp_client.send.Runtime.callFunctionOn(
params={
'functionDeclaration': 'function() { return this.getBoundingClientRect(); }',
'objectId': object_id,
'returnByValue': True,
},
session_id=session_id,
)
if bounds_result.get('result', {}).get('value'):
bounds = bounds_result['result']['value'] # type: ignore
center_x = bounds['x'] + bounds['width'] / 2
center_y = bounds['y'] + bounds['height'] / 2
input_coordinates = {'input_x': center_x, 'input_y': center_y}
logger.debug(f'Using element coordinates: x={center_x:.1f}, y={center_y:.1f}')
except Exception as e:
logger.debug(f'Could not get element coordinates: {e}')
# Ensure session_id is not None
if session_id is None:
raise RuntimeError('Session ID is required for fill operation')
# Step 1: Focus the element
focused_successfully = await self._focus_element_simple(
backend_node_id=backend_node_id,
object_id=object_id,
cdp_client=cdp_client,
session_id=session_id,
input_coordinates=input_coordinates,
)
# Step 2: Clear existing text if requested
if clear:
cleared_successfully = await self._clear_text_field(
object_id=object_id, cdp_client=cdp_client, session_id=session_id
)
if not cleared_successfully:
logger.warning('Text field clearing failed, typing may append to existing text')
# Step 3: Type the text character by character using proper human-like key events
logger.debug(f'Typing text character by character: "{value}"')
for i, char in enumerate(value):
# Handle newline characters as Enter key
if char == '\n':
# Send proper Enter key sequence
await cdp_client.send.Input.dispatchKeyEvent(
params={
'type': 'keyDown',
'key': 'Enter',
'code': 'Enter',
'windowsVirtualKeyCode': 13,
},
session_id=session_id,
)
# Small delay to emulate human typing speed
await asyncio.sleep(0.001)
# Send char event with carriage return
await cdp_client.send.Input.dispatchKeyEvent(
params={
'type': 'char',
'text': '\r',
'key': 'Enter',
},
session_id=session_id,
)
# Send keyUp event
await cdp_client.send.Input.dispatchKeyEvent(
params={
'type': 'keyUp',
'key': 'Enter',
'code': 'Enter',
'windowsVirtualKeyCode': 13,
},
session_id=session_id,
)
else:
# Handle regular characters
# Get proper modifiers, VK code, and base key for the character
modifiers, vk_code, base_key = self._get_char_modifiers_and_vk(char)
key_code = self._get_key_code_for_char(base_key)
# Step 1: Send keyDown event (NO text parameter)
await cdp_client.send.Input.dispatchKeyEvent(
params={
'type': 'keyDown',
'key': base_key,
'code': key_code,
'modifiers': modifiers,
'windowsVirtualKeyCode': vk_code,
},
session_id=session_id,
)
# Small delay to emulate human typing speed
await asyncio.sleep(0.001)
# Step 2: Send char event (WITH text parameter) - this is crucial for text input
await cdp_client.send.Input.dispatchKeyEvent(
params={
'type': 'char',
'text': char,
'key': char,
},
session_id=session_id,
)
# Step 3: Send keyUp event (NO text parameter)
await cdp_client.send.Input.dispatchKeyEvent(
params={
'type': 'keyUp',
'key': base_key,
'code': key_code,
'modifiers': modifiers,
'windowsVirtualKeyCode': vk_code,
},
session_id=session_id,
)
# Add 18ms delay between keystrokes
await asyncio.sleep(0.018)
except Exception as e:
raise Exception(f'Failed to fill element: {str(e)}')
async def hover(self) -> None:
"""Hover over the element."""
box = await self.get_bounding_box()
if not box:
raise RuntimeError('Element is not visible or has no bounding box')
x = box['x'] + box['width'] / 2
y = box['y'] + box['height'] / 2
params: 'DispatchMouseEventParameters' = {'type': 'mouseMoved', 'x': x, 'y': y}
await self._client.send.Input.dispatchMouseEvent(params, session_id=self._session_id)
async def focus(self) -> None:
"""Focus the element."""
node_id = await self._get_node_id()
params: 'FocusParameters' = {'nodeId': node_id}
await self._client.send.DOM.focus(params, session_id=self._session_id)
async def check(self) -> None:
"""Check or uncheck a checkbox/radio button."""
await self.click()
async def select_option(self, values: str | list[str]) -> None:
"""Select option(s) in a select element."""
if isinstance(values, str):
values = [values]
# Focus the element first
try:
await self.focus()
except Exception:
logger.warning('Failed to focus element')
# For select elements, we need to find option elements and click them
# This is a simplified approach - in practice, you might need to handle
# different select types (single vs multi-select) differently
node_id = await self._get_node_id()
# Request child nodes to get the options
params: 'RequestChildNodesParameters' = {'nodeId': node_id, 'depth': 1}
await self._client.send.DOM.requestChildNodes(params, session_id=self._session_id)
# Get the updated node description with children
describe_params: 'DescribeNodeParameters' = {'nodeId': node_id, 'depth': 1}
describe_result = await self._client.send.DOM.describeNode(describe_params, session_id=self._session_id)
select_node = describe_result['node']
# Find and select matching options
for child in select_node.get('children', []):
if child.get('nodeName', '').lower() == 'option':
# Get option attributes
attrs = child.get('attributes', [])
option_attrs = {}
for i in range(0, len(attrs), 2):
if i + 1 < len(attrs):
option_attrs[attrs[i]] = attrs[i + 1]
option_value = option_attrs.get('value', '')
option_text = child.get('nodeValue', '')
# Check if this option should be selected
should_select = option_value in values or option_text in values
if should_select:
# Click the option to select it
option_node_id = child.get('nodeId')
if option_node_id:
# Get backend node ID for the option
option_describe_params: 'DescribeNodeParameters' = {'nodeId': option_node_id}
option_backend_result = await self._client.send.DOM.describeNode(
option_describe_params, session_id=self._session_id
)
option_backend_id = option_backend_result['node']['backendNodeId']
# Create an Element for the option and click it
option_element = Element(self._browser_session, option_backend_id, self._session_id)
await option_element.click()
async def drag_to(
self,
target: Union['Element', Position],
source_position: Position | None = None,
target_position: Position | None = None,
) -> None:
"""Drag this element to another element or position."""
# Get source coordinates
if source_position:
source_x = source_position['x']
source_y = source_position['y']
else:
source_box = await self.get_bounding_box()
if not source_box:
raise RuntimeError('Source element is not visible')
source_x = source_box['x'] + source_box['width'] / 2
source_y = source_box['y'] + source_box['height'] / 2
# Get target coordinates
if isinstance(target, dict) and 'x' in target and 'y' in target:
target_x = target['x']
target_y = target['y']
else:
if target_position:
target_box = await target.get_bounding_box()
if not target_box:
raise RuntimeError('Target element is not visible')
target_x = target_box['x'] + target_position['x']
target_y = target_box['y'] + target_position['y']
else:
target_box = await target.get_bounding_box()
if not target_box:
raise RuntimeError('Target element is not visible')
target_x = target_box['x'] + target_box['width'] / 2
target_y = target_box['y'] + target_box['height'] / 2
# Perform drag operation
await self._client.send.Input.dispatchMouseEvent(
{'type': 'mousePressed', 'x': source_x, 'y': source_y, 'button': 'left'},
session_id=self._session_id,
)
await self._client.send.Input.dispatchMouseEvent(
{'type': 'mouseMoved', 'x': target_x, 'y': target_y},
session_id=self._session_id,
)
await self._client.send.Input.dispatchMouseEvent(
{'type': 'mouseReleased', 'x': target_x, 'y': target_y, 'button': 'left'},
session_id=self._session_id,
)
# Element properties and queries
async def get_attribute(self, name: str) -> str | None:
"""Get an attribute value."""
node_id = await self._get_node_id()
params: 'GetAttributesParameters' = {'nodeId': node_id}
result = await self._client.send.DOM.getAttributes(params, session_id=self._session_id)
attributes = result['attributes']
for i in range(0, len(attributes), 2):
if attributes[i] == name:
return attributes[i + 1]
return None
async def get_bounding_box(self) -> BoundingBox | None:
"""Get the bounding box of the element."""
try:
node_id = await self._get_node_id()
params: 'GetBoxModelParameters' = {'nodeId': node_id}
result = await self._client.send.DOM.getBoxModel(params, session_id=self._session_id)
if 'model' not in result:
return None
# Get content box (first 8 values are content quad: x1,y1,x2,y2,x3,y3,x4,y4)
content = result['model']['content']
if len(content) < 8:
return None
# Calculate bounding box from quad
x_coords = [content[i] for i in range(0, 8, 2)]
y_coords = [content[i] for i in range(1, 8, 2)]
x = min(x_coords)
y = min(y_coords)
width = max(x_coords) - x
height = max(y_coords) - y
return BoundingBox(x=x, y=y, width=width, height=height)
except Exception:
return None
async def screenshot(self, format: str = 'png', quality: int | None = None) -> str:
"""Take a screenshot of this element and return base64 encoded image.
Args:
format: Image format ('jpeg', 'png', 'webp')
quality: Quality 0-100 for JPEG format
Returns:
Base64-encoded image data
"""
# Get element's bounding box
box = await self.get_bounding_box()
if not box:
raise RuntimeError('Element is not visible or has no bounding box')
# Create viewport clip for the element
viewport: 'Viewport' = {'x': box['x'], 'y': box['y'], 'width': box['width'], 'height': box['height'], 'scale': 1.0}
# Prepare screenshot parameters
params: 'CaptureScreenshotParameters' = {'format': format, 'clip': viewport}
if quality is not None and format.lower() == 'jpeg':
params['quality'] = quality
# Take screenshot
result = await self._client.send.Page.captureScreenshot(params, session_id=self._session_id)
return result['data']
async def evaluate(self, page_function: str, *args) -> str:
"""Execute JavaScript code in the context of this element.
The JavaScript code executes with 'this' bound to the element, allowing direct
access to element properties and methods.
Args:
page_function: JavaScript code that MUST start with (...args) => format
*args: Arguments to pass to the function
Returns:
String representation of the JavaScript execution result.
Objects and arrays are JSON-stringified.
Example:
# Get element's text content
text = await element.evaluate("() => this.textContent")
# Set style with argument
await element.evaluate("(color) => this.style.color = color", "red")
# Get computed style
color = await element.evaluate("() => getComputedStyle(this).color")
# Async operations
result = await element.evaluate("async () => { await new Promise(r => setTimeout(r, 100)); return this.id; }")
"""
# Get remote object ID for this element
object_id = await self._get_remote_object_id()
if not object_id:
raise RuntimeError('Element has no remote object ID (element may be detached from DOM)')
# Validate arrow function format (allow async prefix)
page_function = page_function.strip()
# Check for arrow function with optional async prefix
if not ('=>' in page_function and (page_function.startswith('(') or page_function.startswith('async'))):
raise ValueError(
f'JavaScript code must start with (...args) => or async (...args) => format. Got: {page_function[:50]}...'
)
# Convert arrow function to function declaration for CallFunctionOn
# CallFunctionOn expects 'function(...args) { ... }' format, not arrow functions
# We need to convert: '() => expression' to 'function() { return expression; }'
# or: '(x, y) => { statements }' to 'function(x, y) { statements }'
# Extract parameters and body from arrow function
import re
# Check if it's an async arrow function
is_async = page_function.strip().startswith('async')
async_prefix = 'async ' if is_async else ''
# Match: (params) => body or async (params) => body
# Strip 'async' prefix if present for parsing
func_to_parse = page_function.strip()
if is_async:
func_to_parse = func_to_parse[5:].strip() # Remove 'async' prefix
arrow_match = re.match(r'\s*\(([^)]*)\)\s*=>\s*(.+)', func_to_parse, re.DOTALL)
if not arrow_match:
raise ValueError(f'Could not parse arrow function: {page_function[:50]}...')
params_str = arrow_match.group(1).strip() # e.g., '', 'x', 'x, y'
body = arrow_match.group(2).strip()
# If body doesn't start with {, it's an expression that needs implicit return
if not body.startswith('{'):
function_declaration = f'{async_prefix}function({params_str}) {{ return {body}; }}'
else:
# Body already has braces, use as-is
function_declaration = f'{async_prefix}function({params_str}) {body}'
# Build CallArgument list for args if provided
call_arguments = []
if args:
from cdp_use.cdp.runtime.types import CallArgument
for arg in args:
# Convert Python values to CallArgument format
call_arguments.append(CallArgument(value=arg))
# Prepare CallFunctionOn parameters
params: 'CallFunctionOnParameters' = {
'functionDeclaration': function_declaration,
'objectId': object_id,
'returnByValue': True,
'awaitPromise': True,
}
if call_arguments:
params['arguments'] = call_arguments
# Execute the function on the element
result = await self._client.send.Runtime.callFunctionOn(
params,
session_id=self._session_id,
)
# Handle exceptions
if 'exceptionDetails' in result:
raise RuntimeError(f'JavaScript evaluation failed: {result["exceptionDetails"]}')
# Extract and return value
value = result.get('result', {}).get('value')
# Return string representation (matching Page.evaluate behavior)
if value is None:
return ''
elif isinstance(value, str):
return value
else:
# Convert objects, numbers, booleans to string
import json
try:
return json.dumps(value) if isinstance(value, (dict, list)) else str(value)
except (TypeError, ValueError):
return str(value)
# Helpers for modifiers etc
def _get_char_modifiers_and_vk(self, char: str) -> tuple[int, int, str]:
"""Get modifiers, virtual key code, and base key for a character.
Returns:
(modifiers, windowsVirtualKeyCode, base_key)
"""
# Characters that require Shift modifier
shift_chars = {
'!': ('1', 49),
'@': ('2', 50),
'#': ('3', 51),
'$': ('4', 52),
'%': ('5', 53),
'^': ('6', 54),
'&': ('7', 55),
'*': ('8', 56),
'(': ('9', 57),
')': ('0', 48),
'_': ('-', 189),
'+': ('=', 187),
'{': ('[', 219),
'}': (']', 221),
'|': ('\\', 220),
':': (';', 186),
'"': ("'", 222),
'<': (',', 188),
'>': ('.', 190),
'?': ('/', 191),
'~': ('`', 192),
}
# Check if character requires Shift
if char in shift_chars:
base_key, vk_code = shift_chars[char]
return (8, vk_code, base_key) # Shift=8
# Uppercase letters require Shift
if char.isupper():
return (8, ord(char), char.lower()) # Shift=8
# Lowercase letters
if char.islower():
return (0, ord(char.upper()), char)
# Numbers
if char.isdigit():
return (0, ord(char), char)
# Special characters without Shift
no_shift_chars = {
' ': 32,
'-': 189,
'=': 187,
'[': 219,
']': 221,
'\\': 220,
';': 186,
"'": 222,
',': 188,
'.': 190,
'/': 191,
'`': 192,
}
if char in no_shift_chars:
return (0, no_shift_chars[char], char)
# Fallback
return (0, ord(char.upper()) if char.isalpha() else ord(char), char)
def _get_key_code_for_char(self, char: str) -> str:
"""Get the proper key code for a character (like Playwright does)."""
# Key code mapping for common characters (using proper base keys + modifiers)
key_codes = {
' ': 'Space',
'.': 'Period',
',': 'Comma',
'-': 'Minus',
'_': 'Minus', # Underscore uses Minus with Shift
'@': 'Digit2', # @ uses Digit2 with Shift
'!': 'Digit1', # ! uses Digit1 with Shift (not 'Exclamation')
'?': 'Slash', # ? uses Slash with Shift
':': 'Semicolon', # : uses Semicolon with Shift
';': 'Semicolon',
'(': 'Digit9', # ( uses Digit9 with Shift
')': 'Digit0', # ) uses Digit0 with Shift
'[': 'BracketLeft',
']': 'BracketRight',
'{': 'BracketLeft', # { uses BracketLeft with Shift
'}': 'BracketRight', # } uses BracketRight with Shift
'/': 'Slash',
'\\': 'Backslash',
'=': 'Equal',
'+': 'Equal', # + uses Equal with Shift
'*': 'Digit8', # * uses Digit8 with Shift
'&': 'Digit7', # & uses Digit7 with Shift
'%': 'Digit5', # % uses Digit5 with Shift
'$': 'Digit4', # $ uses Digit4 with Shift
'#': 'Digit3', # # uses Digit3 with Shift
'^': 'Digit6', # ^ uses Digit6 with Shift
'~': 'Backquote', # ~ uses Backquote with Shift
'`': 'Backquote',
'"': 'Quote', # " uses Quote with Shift
"'": 'Quote',
'<': 'Comma', # < uses Comma with Shift
'>': 'Period', # > uses Period with Shift
'|': 'Backslash', # | uses Backslash with Shift
}
if char in key_codes:
return key_codes[char]
elif char.isalpha():
return f'Key{char.upper()}'
elif char.isdigit():
return f'Digit{char}'
else:
# Fallback for unknown characters
return f'Key{char.upper()}' if char.isascii() and char.isalpha() else 'Unidentified'
async def _clear_text_field(self, object_id: str, cdp_client, session_id: str) -> bool:
"""Clear text field using multiple strategies, starting with the most reliable."""
try:
# Strategy 1: Direct JavaScript value setting (most reliable for modern web apps)
logger.debug('Clearing text field using JavaScript value setting')
await cdp_client.send.Runtime.callFunctionOn(
params={
'functionDeclaration': """
function() {
// Try to select all text first (only works on text-like inputs)
// This handles cases where cursor is in the middle of text
try {
this.select();
} catch (e) {
// Some input types (date, color, number, etc.) don't support select()
// That's fine, we'll just clear the value directly
}
// Set value to empty
this.value = "";
// Dispatch events to notify frameworks like React
this.dispatchEvent(new Event("input", { bubbles: true }));
this.dispatchEvent(new Event("change", { bubbles: true }));
return this.value;
}
""",
'objectId': object_id,
'returnByValue': True,
},
session_id=session_id,
)
# Verify clearing worked by checking the value
verify_result = await cdp_client.send.Runtime.callFunctionOn(
params={
'functionDeclaration': 'function() { return this.value; }',
'objectId': object_id,
'returnByValue': True,
},
session_id=session_id,
)
current_value = verify_result.get('result', {}).get('value', '')
if not current_value:
logger.debug('Text field cleared successfully using JavaScript')
return True
else:
logger.debug(f'JavaScript clear partially failed, field still contains: "{current_value}"')
except Exception as e:
logger.debug(f'JavaScript clear failed: {e}')
# Strategy 2: Triple-click + Delete (fallback for stubborn fields)
try:
logger.debug('Fallback: Clearing using triple-click + Delete')
# Get element center coordinates for triple-click
bounds_result = await cdp_client.send.Runtime.callFunctionOn(
params={
'functionDeclaration': 'function() { return this.getBoundingClientRect(); }',
'objectId': object_id,
'returnByValue': True,
},
session_id=session_id,
)
if bounds_result.get('result', {}).get('value'):
bounds = bounds_result['result']['value'] # type: ignore # type: ignore
center_x = bounds['x'] + bounds['width'] / 2
center_y = bounds['y'] + bounds['height'] / 2
# Triple-click to select all text
await cdp_client.send.Input.dispatchMouseEvent(
params={
'type': 'mousePressed',
'x': center_x,
'y': center_y,
'button': 'left',
'clickCount': 3,
},
session_id=session_id,
)
await cdp_client.send.Input.dispatchMouseEvent(
params={
'type': 'mouseReleased',
'x': center_x,
'y': center_y,
'button': 'left',
'clickCount': 3,
},
session_id=session_id,
)
# Delete selected text
await cdp_client.send.Input.dispatchKeyEvent(
params={
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | true |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/actor/utils.py | browser_use/actor/utils.py | """Utility functions for actor operations."""
class Utils:
"""Utility functions for actor operations."""
@staticmethod
def get_key_info(key: str) -> tuple[str, int | None]:
"""Get the code and windowsVirtualKeyCode for a key.
Args:
key: Key name (e.g., 'Enter', 'ArrowUp', 'a', 'A')
Returns:
Tuple of (code, windowsVirtualKeyCode)
Reference: Windows Virtual Key Codes
https://docs.microsoft.com/en-us/windows/win32/inputdev/virtual-key-codes
"""
# Complete mapping of key names to (code, virtualKeyCode)
# Based on standard Windows Virtual Key Codes
key_map = {
# Navigation keys
'Backspace': ('Backspace', 8),
'Tab': ('Tab', 9),
'Enter': ('Enter', 13),
'Escape': ('Escape', 27),
'Space': ('Space', 32),
' ': ('Space', 32),
'PageUp': ('PageUp', 33),
'PageDown': ('PageDown', 34),
'End': ('End', 35),
'Home': ('Home', 36),
'ArrowLeft': ('ArrowLeft', 37),
'ArrowUp': ('ArrowUp', 38),
'ArrowRight': ('ArrowRight', 39),
'ArrowDown': ('ArrowDown', 40),
'Insert': ('Insert', 45),
'Delete': ('Delete', 46),
# Modifier keys
'Shift': ('ShiftLeft', 16),
'ShiftLeft': ('ShiftLeft', 16),
'ShiftRight': ('ShiftRight', 16),
'Control': ('ControlLeft', 17),
'ControlLeft': ('ControlLeft', 17),
'ControlRight': ('ControlRight', 17),
'Alt': ('AltLeft', 18),
'AltLeft': ('AltLeft', 18),
'AltRight': ('AltRight', 18),
'Meta': ('MetaLeft', 91),
'MetaLeft': ('MetaLeft', 91),
'MetaRight': ('MetaRight', 92),
# Function keys F1-F24
'F1': ('F1', 112),
'F2': ('F2', 113),
'F3': ('F3', 114),
'F4': ('F4', 115),
'F5': ('F5', 116),
'F6': ('F6', 117),
'F7': ('F7', 118),
'F8': ('F8', 119),
'F9': ('F9', 120),
'F10': ('F10', 121),
'F11': ('F11', 122),
'F12': ('F12', 123),
'F13': ('F13', 124),
'F14': ('F14', 125),
'F15': ('F15', 126),
'F16': ('F16', 127),
'F17': ('F17', 128),
'F18': ('F18', 129),
'F19': ('F19', 130),
'F20': ('F20', 131),
'F21': ('F21', 132),
'F22': ('F22', 133),
'F23': ('F23', 134),
'F24': ('F24', 135),
# Numpad keys
'NumLock': ('NumLock', 144),
'Numpad0': ('Numpad0', 96),
'Numpad1': ('Numpad1', 97),
'Numpad2': ('Numpad2', 98),
'Numpad3': ('Numpad3', 99),
'Numpad4': ('Numpad4', 100),
'Numpad5': ('Numpad5', 101),
'Numpad6': ('Numpad6', 102),
'Numpad7': ('Numpad7', 103),
'Numpad8': ('Numpad8', 104),
'Numpad9': ('Numpad9', 105),
'NumpadMultiply': ('NumpadMultiply', 106),
'NumpadAdd': ('NumpadAdd', 107),
'NumpadSubtract': ('NumpadSubtract', 109),
'NumpadDecimal': ('NumpadDecimal', 110),
'NumpadDivide': ('NumpadDivide', 111),
# Lock keys
'CapsLock': ('CapsLock', 20),
'ScrollLock': ('ScrollLock', 145),
# OEM/Punctuation keys (US keyboard layout)
'Semicolon': ('Semicolon', 186),
';': ('Semicolon', 186),
'Equal': ('Equal', 187),
'=': ('Equal', 187),
'Comma': ('Comma', 188),
',': ('Comma', 188),
'Minus': ('Minus', 189),
'-': ('Minus', 189),
'Period': ('Period', 190),
'.': ('Period', 190),
'Slash': ('Slash', 191),
'/': ('Slash', 191),
'Backquote': ('Backquote', 192),
'`': ('Backquote', 192),
'BracketLeft': ('BracketLeft', 219),
'[': ('BracketLeft', 219),
'Backslash': ('Backslash', 220),
'\\': ('Backslash', 220),
'BracketRight': ('BracketRight', 221),
']': ('BracketRight', 221),
'Quote': ('Quote', 222),
"'": ('Quote', 222),
# Media/Browser keys
'AudioVolumeMute': ('AudioVolumeMute', 173),
'AudioVolumeDown': ('AudioVolumeDown', 174),
'AudioVolumeUp': ('AudioVolumeUp', 175),
'MediaTrackNext': ('MediaTrackNext', 176),
'MediaTrackPrevious': ('MediaTrackPrevious', 177),
'MediaStop': ('MediaStop', 178),
'MediaPlayPause': ('MediaPlayPause', 179),
'BrowserBack': ('BrowserBack', 166),
'BrowserForward': ('BrowserForward', 167),
'BrowserRefresh': ('BrowserRefresh', 168),
'BrowserStop': ('BrowserStop', 169),
'BrowserSearch': ('BrowserSearch', 170),
'BrowserFavorites': ('BrowserFavorites', 171),
'BrowserHome': ('BrowserHome', 172),
# Additional common keys
'Clear': ('Clear', 12),
'Pause': ('Pause', 19),
'Select': ('Select', 41),
'Print': ('Print', 42),
'Execute': ('Execute', 43),
'PrintScreen': ('PrintScreen', 44),
'Help': ('Help', 47),
'ContextMenu': ('ContextMenu', 93),
}
if key in key_map:
return key_map[key]
# Handle alphanumeric keys dynamically
if len(key) == 1:
if key.isalpha():
# Letter keys: A-Z have VK codes 65-90
return (f'Key{key.upper()}', ord(key.upper()))
elif key.isdigit():
# Digit keys: 0-9 have VK codes 48-57 (same as ASCII)
return (f'Digit{key}', ord(key))
# Fallback: use the key name as code, no virtual key code
return (key, None)
# Backward compatibility: provide standalone function
def get_key_info(key: str) -> tuple[str, int | None]:
"""Get the code and windowsVirtualKeyCode for a key.
Args:
key: Key name (e.g., 'Enter', 'ArrowUp', 'a', 'A')
Returns:
Tuple of (code, windowsVirtualKeyCode)
Reference: Windows Virtual Key Codes
https://docs.microsoft.com/en-us/windows/win32/inputdev/virtual-key-codes
"""
return Utils.get_key_info(key)
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/actor/__init__.py | browser_use/actor/__init__.py | """CDP-Use High-Level Library
A Playwright-like library built on top of CDP (Chrome DevTools Protocol).
"""
from .element import Element
from .mouse import Mouse
from .page import Page
from .utils import Utils
__all__ = ['Page', 'Element', 'Mouse', 'Utils']
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/actor/page.py | browser_use/actor/page.py | """Page class for page-level operations."""
from typing import TYPE_CHECKING, TypeVar
from pydantic import BaseModel
from browser_use import logger
from browser_use.actor.utils import get_key_info
from browser_use.dom.serializer.serializer import DOMTreeSerializer
from browser_use.dom.service import DomService
from browser_use.llm.messages import SystemMessage, UserMessage
T = TypeVar('T', bound=BaseModel)
if TYPE_CHECKING:
from cdp_use.cdp.dom.commands import (
DescribeNodeParameters,
QuerySelectorAllParameters,
)
from cdp_use.cdp.emulation.commands import SetDeviceMetricsOverrideParameters
from cdp_use.cdp.input.commands import (
DispatchKeyEventParameters,
)
from cdp_use.cdp.page.commands import CaptureScreenshotParameters, NavigateParameters, NavigateToHistoryEntryParameters
from cdp_use.cdp.runtime.commands import EvaluateParameters
from cdp_use.cdp.target.commands import (
AttachToTargetParameters,
GetTargetInfoParameters,
)
from cdp_use.cdp.target.types import TargetInfo
from browser_use.browser.session import BrowserSession
from browser_use.llm.base import BaseChatModel
from .element import Element
from .mouse import Mouse
class Page:
"""Page operations (tab or iframe)."""
def __init__(
self, browser_session: 'BrowserSession', target_id: str, session_id: str | None = None, llm: 'BaseChatModel | None' = None
):
self._browser_session = browser_session
self._client = browser_session.cdp_client
self._target_id = target_id
self._session_id: str | None = session_id
self._mouse: 'Mouse | None' = None
self._llm = llm
async def _ensure_session(self) -> str:
"""Ensure we have a session ID for this target."""
if not self._session_id:
params: 'AttachToTargetParameters' = {'targetId': self._target_id, 'flatten': True}
result = await self._client.send.Target.attachToTarget(params)
self._session_id = result['sessionId']
# Enable necessary domains
import asyncio
await asyncio.gather(
self._client.send.Page.enable(session_id=self._session_id),
self._client.send.DOM.enable(session_id=self._session_id),
self._client.send.Runtime.enable(session_id=self._session_id),
self._client.send.Network.enable(session_id=self._session_id),
)
return self._session_id
@property
async def session_id(self) -> str:
"""Get the session ID for this target.
@dev Pass this to an arbitrary CDP call
"""
return await self._ensure_session()
@property
async def mouse(self) -> 'Mouse':
"""Get the mouse interface for this target."""
if not self._mouse:
session_id = await self._ensure_session()
from .mouse import Mouse
self._mouse = Mouse(self._browser_session, session_id, self._target_id)
return self._mouse
async def reload(self) -> None:
"""Reload the target."""
session_id = await self._ensure_session()
await self._client.send.Page.reload(session_id=session_id)
async def get_element(self, backend_node_id: int) -> 'Element':
"""Get an element by its backend node ID."""
session_id = await self._ensure_session()
from .element import Element as Element_
return Element_(self._browser_session, backend_node_id, session_id)
async def evaluate(self, page_function: str, *args) -> str:
"""Execute JavaScript in the target.
Args:
page_function: JavaScript code that MUST start with (...args) => format
*args: Arguments to pass to the function
Returns:
String representation of the JavaScript execution result.
Objects and arrays are JSON-stringified.
"""
session_id = await self._ensure_session()
# Clean and fix common JavaScript string parsing issues
page_function = self._fix_javascript_string(page_function)
# Enforce arrow function format
if not (page_function.startswith('(') and '=>' in page_function):
raise ValueError(f'JavaScript code must start with (...args) => format. Got: {page_function[:50]}...')
# Build the expression - call the arrow function with provided args
if args:
# Convert args to JSON representation for safe passing
import json
arg_strs = [json.dumps(arg) for arg in args]
expression = f'({page_function})({", ".join(arg_strs)})'
else:
expression = f'({page_function})()'
# Debug: log the actual expression being evaluated
logger.debug(f'Evaluating JavaScript: {repr(expression)}')
params: 'EvaluateParameters' = {'expression': expression, 'returnByValue': True, 'awaitPromise': True}
result = await self._client.send.Runtime.evaluate(
params,
session_id=session_id,
)
if 'exceptionDetails' in result:
raise RuntimeError(f'JavaScript evaluation failed: {result["exceptionDetails"]}')
value = result.get('result', {}).get('value')
# Always return string representation
if value is None:
return ''
elif isinstance(value, str):
return value
else:
# Convert objects, numbers, booleans to string
import json
try:
return json.dumps(value) if isinstance(value, (dict, list)) else str(value)
except (TypeError, ValueError):
return str(value)
def _fix_javascript_string(self, js_code: str) -> str:
"""Fix common JavaScript string parsing issues when written as Python string."""
# Just do minimal, safe cleaning
js_code = js_code.strip()
# Only fix the most common and safe issues:
# 1. Remove obvious Python string wrapper quotes if they exist
if (js_code.startswith('"') and js_code.endswith('"')) or (js_code.startswith("'") and js_code.endswith("'")):
# Check if it's a wrapped string (not part of JS syntax)
inner = js_code[1:-1]
if inner.count('"') + inner.count("'") == 0 or '() =>' in inner:
js_code = inner
# 2. Only fix clearly escaped quotes that shouldn't be
# But be very conservative - only if we're sure it's a Python string artifact
if '\\"' in js_code and js_code.count('\\"') > js_code.count('"'):
js_code = js_code.replace('\\"', '"')
if "\\'" in js_code and js_code.count("\\'") > js_code.count("'"):
js_code = js_code.replace("\\'", "'")
# 3. Basic whitespace normalization only
js_code = js_code.strip()
# Final validation - ensure it's not empty
if not js_code:
raise ValueError('JavaScript code is empty after cleaning')
return js_code
async def screenshot(self, format: str = 'png', quality: int | None = None) -> str:
"""Take a screenshot and return base64 encoded image.
Args:
format: Image format ('jpeg', 'png', 'webp')
quality: Quality 0-100 for JPEG format
Returns:
Base64-encoded image data
"""
session_id = await self._ensure_session()
params: 'CaptureScreenshotParameters' = {'format': format}
if quality is not None and format.lower() == 'jpeg':
params['quality'] = quality
result = await self._client.send.Page.captureScreenshot(params, session_id=session_id)
return result['data']
async def press(self, key: str) -> None:
"""Press a key on the page (sends keyboard input to the focused element or page)."""
session_id = await self._ensure_session()
# Handle key combinations like "Control+A"
if '+' in key:
parts = key.split('+')
modifiers = parts[:-1]
main_key = parts[-1]
# Calculate modifier bitmask
modifier_value = 0
modifier_map = {'Alt': 1, 'Control': 2, 'Meta': 4, 'Shift': 8}
for mod in modifiers:
modifier_value |= modifier_map.get(mod, 0)
# Press modifier keys
for mod in modifiers:
code, vk_code = get_key_info(mod)
params: 'DispatchKeyEventParameters' = {'type': 'keyDown', 'key': mod, 'code': code}
if vk_code is not None:
params['windowsVirtualKeyCode'] = vk_code
await self._client.send.Input.dispatchKeyEvent(params, session_id=session_id)
# Press main key with modifiers bitmask
main_code, main_vk_code = get_key_info(main_key)
main_down_params: 'DispatchKeyEventParameters' = {
'type': 'keyDown',
'key': main_key,
'code': main_code,
'modifiers': modifier_value,
}
if main_vk_code is not None:
main_down_params['windowsVirtualKeyCode'] = main_vk_code
await self._client.send.Input.dispatchKeyEvent(main_down_params, session_id=session_id)
main_up_params: 'DispatchKeyEventParameters' = {
'type': 'keyUp',
'key': main_key,
'code': main_code,
'modifiers': modifier_value,
}
if main_vk_code is not None:
main_up_params['windowsVirtualKeyCode'] = main_vk_code
await self._client.send.Input.dispatchKeyEvent(main_up_params, session_id=session_id)
# Release modifier keys
for mod in reversed(modifiers):
code, vk_code = get_key_info(mod)
release_params: 'DispatchKeyEventParameters' = {'type': 'keyUp', 'key': mod, 'code': code}
if vk_code is not None:
release_params['windowsVirtualKeyCode'] = vk_code
await self._client.send.Input.dispatchKeyEvent(release_params, session_id=session_id)
else:
# Simple key press
code, vk_code = get_key_info(key)
key_down_params: 'DispatchKeyEventParameters' = {'type': 'keyDown', 'key': key, 'code': code}
if vk_code is not None:
key_down_params['windowsVirtualKeyCode'] = vk_code
await self._client.send.Input.dispatchKeyEvent(key_down_params, session_id=session_id)
key_up_params: 'DispatchKeyEventParameters' = {'type': 'keyUp', 'key': key, 'code': code}
if vk_code is not None:
key_up_params['windowsVirtualKeyCode'] = vk_code
await self._client.send.Input.dispatchKeyEvent(key_up_params, session_id=session_id)
async def set_viewport_size(self, width: int, height: int) -> None:
"""Set the viewport size."""
session_id = await self._ensure_session()
params: 'SetDeviceMetricsOverrideParameters' = {
'width': width,
'height': height,
'deviceScaleFactor': 1.0,
'mobile': False,
}
await self._client.send.Emulation.setDeviceMetricsOverride(
params,
session_id=session_id,
)
# Target properties (from CDP getTargetInfo)
async def get_target_info(self) -> 'TargetInfo':
"""Get target information."""
params: 'GetTargetInfoParameters' = {'targetId': self._target_id}
result = await self._client.send.Target.getTargetInfo(params)
return result['targetInfo']
async def get_url(self) -> str:
"""Get the current URL."""
info = await self.get_target_info()
return info.get('url', '')
async def get_title(self) -> str:
"""Get the current title."""
info = await self.get_target_info()
return info.get('title', '')
async def goto(self, url: str) -> None:
"""Navigate this target to a URL."""
session_id = await self._ensure_session()
params: 'NavigateParameters' = {'url': url}
await self._client.send.Page.navigate(params, session_id=session_id)
async def navigate(self, url: str) -> None:
"""Alias for goto."""
await self.goto(url)
async def go_back(self) -> None:
"""Navigate back in history."""
session_id = await self._ensure_session()
try:
# Get navigation history
history = await self._client.send.Page.getNavigationHistory(session_id=session_id)
current_index = history['currentIndex']
entries = history['entries']
# Check if we can go back
if current_index <= 0:
raise RuntimeError('Cannot go back - no previous entry in history')
# Navigate to the previous entry
previous_entry_id = entries[current_index - 1]['id']
params: 'NavigateToHistoryEntryParameters' = {'entryId': previous_entry_id}
await self._client.send.Page.navigateToHistoryEntry(params, session_id=session_id)
except Exception as e:
raise RuntimeError(f'Failed to navigate back: {e}')
async def go_forward(self) -> None:
"""Navigate forward in history."""
session_id = await self._ensure_session()
try:
# Get navigation history
history = await self._client.send.Page.getNavigationHistory(session_id=session_id)
current_index = history['currentIndex']
entries = history['entries']
# Check if we can go forward
if current_index >= len(entries) - 1:
raise RuntimeError('Cannot go forward - no next entry in history')
# Navigate to the next entry
next_entry_id = entries[current_index + 1]['id']
params: 'NavigateToHistoryEntryParameters' = {'entryId': next_entry_id}
await self._client.send.Page.navigateToHistoryEntry(params, session_id=session_id)
except Exception as e:
raise RuntimeError(f'Failed to navigate forward: {e}')
# Element finding methods (these would need to be implemented based on DOM queries)
async def get_elements_by_css_selector(self, selector: str) -> list['Element']:
"""Get elements by CSS selector."""
session_id = await self._ensure_session()
# Get document first
doc_result = await self._client.send.DOM.getDocument(session_id=session_id)
document_node_id = doc_result['root']['nodeId']
# Query selector all
query_params: 'QuerySelectorAllParameters' = {'nodeId': document_node_id, 'selector': selector}
result = await self._client.send.DOM.querySelectorAll(query_params, session_id=session_id)
elements = []
from .element import Element as Element_
# Convert node IDs to backend node IDs
for node_id in result['nodeIds']:
# Get backend node ID
describe_params: 'DescribeNodeParameters' = {'nodeId': node_id}
node_result = await self._client.send.DOM.describeNode(describe_params, session_id=session_id)
backend_node_id = node_result['node']['backendNodeId']
elements.append(Element_(self._browser_session, backend_node_id, session_id))
return elements
# AI METHODS
@property
def dom_service(self) -> 'DomService':
"""Get the DOM service for this target."""
return DomService(self._browser_session)
async def get_element_by_prompt(self, prompt: str, llm: 'BaseChatModel | None' = None) -> 'Element | None':
"""Get an element by a prompt."""
await self._ensure_session()
llm = llm or self._llm
if not llm:
raise ValueError('LLM not provided')
dom_service = self.dom_service
# Lazy fetch all_frames inside get_dom_tree if needed (for cross-origin iframes)
enhanced_dom_tree, _ = await dom_service.get_dom_tree(target_id=self._target_id, all_frames=None)
session_id = self._browser_session.id
serialized_dom_state, _ = DOMTreeSerializer(
enhanced_dom_tree, None, paint_order_filtering=True, session_id=session_id
).serialize_accessible_elements()
llm_representation = serialized_dom_state.llm_representation()
system_message = SystemMessage(
content="""You are an AI created to find an element on a page by a prompt.
<browser_state>
Interactive Elements: All interactive elements will be provided in format as [index]<type>text</type> where
- index: Numeric identifier for interaction
- type: HTML element type (button, input, etc.)
- text: Element description
Examples:
[33]<div>User form</div>
[35]<button aria-label='Submit form'>Submit</button>
Note that:
- Only elements with numeric indexes in [] are interactive
- (stacked) indentation (with \t) is important and means that the element is a (html) child of the element above (with a lower index)
- Pure text elements without [] are not interactive.
</browser_state>
Your task is to find an element index (if any) that matches the prompt (written in <prompt> tag).
If non of the elements matches the, return None.
Before you return the element index, reason about the state and elements for a sentence or two."""
)
state_message = UserMessage(
content=f"""
<browser_state>
{llm_representation}
</browser_state>
<prompt>
{prompt}
</prompt>
"""
)
class ElementResponse(BaseModel):
# thinking: str
element_highlight_index: int | None
llm_response = await llm.ainvoke(
[
system_message,
state_message,
],
output_format=ElementResponse,
)
element_highlight_index = llm_response.completion.element_highlight_index
if element_highlight_index is None or element_highlight_index not in serialized_dom_state.selector_map:
return None
element = serialized_dom_state.selector_map[element_highlight_index]
from .element import Element as Element_
return Element_(self._browser_session, element.backend_node_id, self._session_id)
async def must_get_element_by_prompt(self, prompt: str, llm: 'BaseChatModel | None' = None) -> 'Element':
"""Get an element by a prompt.
@dev LLM can still return None, this just raises an error if the element is not found.
"""
element = await self.get_element_by_prompt(prompt, llm)
if element is None:
raise ValueError(f'No element found for prompt: {prompt}')
return element
async def extract_content(self, prompt: str, structured_output: type[T], llm: 'BaseChatModel | None' = None) -> T:
"""Extract structured content from the current page using LLM.
Extracts clean markdown from the page and sends it to LLM for structured data extraction.
Args:
prompt: Description of what content to extract
structured_output: Pydantic BaseModel class defining the expected output structure
llm: Language model to use for extraction
Returns:
The structured BaseModel instance with extracted content
"""
llm = llm or self._llm
if not llm:
raise ValueError('LLM not provided')
# Extract clean markdown using the same method as in tools/service.py
try:
content, content_stats = await self._extract_clean_markdown()
except Exception as e:
raise RuntimeError(f'Could not extract clean markdown: {type(e).__name__}')
# System prompt for structured extraction
system_prompt = """
You are an expert at extracting structured data from the markdown of a webpage.
<input>
You will be given a query and the markdown of a webpage that has been filtered to remove noise and advertising content.
</input>
<instructions>
- You are tasked to extract information from the webpage that is relevant to the query.
- You should ONLY use the information available in the webpage to answer the query. Do not make up information or provide guess from your own knowledge.
- If the information relevant to the query is not available in the page, your response should mention that.
- If the query asks for all items, products, etc., make sure to directly list all of them.
- Return the extracted content in the exact structured format specified.
</instructions>
<output>
- Your output should present ALL the information relevant to the query in the specified structured format.
- Do not answer in conversational format - directly output the relevant information in the structured format.
</output>
""".strip()
# Build prompt with just query and content
prompt_content = f'<query>\n{prompt}\n</query>\n\n<webpage_content>\n{content}\n</webpage_content>'
# Send to LLM with structured output
import asyncio
try:
response = await asyncio.wait_for(
llm.ainvoke(
[SystemMessage(content=system_prompt), UserMessage(content=prompt_content)], output_format=structured_output
),
timeout=120.0,
)
# Return the structured output BaseModel instance
return response.completion
except Exception as e:
raise RuntimeError(str(e))
async def _extract_clean_markdown(self, extract_links: bool = False) -> tuple[str, dict]:
"""Extract clean markdown from the current page using enhanced DOM tree.
Uses the shared markdown extractor for consistency with tools/service.py.
"""
from browser_use.dom.markdown_extractor import extract_clean_markdown
dom_service = self.dom_service
return await extract_clean_markdown(dom_service=dom_service, target_id=self._target_id, extract_links=extract_links)
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/actor/playground/playground.py | browser_use/actor/playground/playground.py | #!/usr/bin/env python3
"""
Playground script to test the browser-use actor API.
This script demonstrates:
- Starting a browser session
- Using the actor API to navigate and interact
- Finding elements, clicking, scrolling, JavaScript evaluation
- Testing most of the available methods
"""
import asyncio
import json
import logging
from browser_use import Browser
# Configure logging to see what's happening
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
async def main():
"""Main playground function."""
logger.info('🚀 Starting browser actor playground')
# Create browser session
browser = Browser()
try:
# Start the browser
await browser.start()
logger.info('✅ Browser session started')
# Navigate to Wikipedia using integrated methods
logger.info('📖 Navigating to Wikipedia...')
page = await browser.new_page('https://en.wikipedia.org')
# Get basic page info
url = await page.get_url()
title = await page.get_title()
logger.info(f'📄 Page loaded: {title} ({url})')
# Take a screenshot
logger.info('📸 Taking initial screenshot...')
screenshot_b64 = await page.screenshot()
logger.info(f'📸 Screenshot captured: {len(screenshot_b64)} bytes')
# Set viewport size
logger.info('🖥️ Setting viewport to 1920x1080...')
await page.set_viewport_size(1920, 1080)
# Execute some JavaScript to count links
logger.info('🔍 Counting article links using JavaScript...')
js_code = """() => {
// Find all article links on the page
const links = Array.from(document.querySelectorAll('a[href*="/wiki/"]:not([href*=":"])'))
.filter(link => !link.href.includes('Main_Page') && !link.href.includes('Special:'));
return {
total: links.length,
sample: links.slice(0, 3).map(link => ({
href: link.href,
text: link.textContent.trim()
}))
};
}"""
link_info = json.loads(await page.evaluate(js_code))
logger.info(f'🔗 Found {link_info["total"]} article links')
# Try to find and interact with links using CSS selector
try:
# Find article links on the page
links = await page.get_elements_by_css_selector('a[href*="/wiki/"]:not([href*=":"])')
if links:
logger.info(f'📋 Found {len(links)} wiki links via CSS selector')
# Pick the first link
link_element = links[0]
# Get link info using available methods
basic_info = await link_element.get_basic_info()
link_href = await link_element.get_attribute('href')
logger.info(f'🎯 Selected element: <{basic_info["nodeName"]}>')
logger.info(f'🔗 Link href: {link_href}')
if basic_info['boundingBox']:
bbox = basic_info['boundingBox']
logger.info(f'📏 Position: ({bbox["x"]}, {bbox["y"]}) Size: {bbox["width"]}x{bbox["height"]}')
# Test element interactions with robust implementations
logger.info('👆 Hovering over the element...')
await link_element.hover()
await asyncio.sleep(1)
logger.info('🔍 Focusing the element...')
await link_element.focus()
await asyncio.sleep(0.5)
# Click the link using robust click method
logger.info('🖱️ Clicking the link with robust fallbacks...')
await link_element.click()
# Wait for navigation
await asyncio.sleep(3)
# Get new page info
new_url = await page.get_url()
new_title = await page.get_title()
logger.info(f'📄 Navigated to: {new_title}')
logger.info(f'🌐 New URL: {new_url}')
else:
logger.warning('❌ No links found to interact with')
except Exception as e:
logger.warning(f'⚠️ Link interaction failed: {e}')
# Scroll down the page
logger.info('📜 Scrolling down the page...')
mouse = await page.mouse
await mouse.scroll(x=0, y=100, delta_y=500)
await asyncio.sleep(1)
# Test mouse operations
logger.info('🖱️ Testing mouse operations...')
await mouse.move(x=100, y=200)
await mouse.click(x=150, y=250)
# Execute more JavaScript examples
logger.info('🧪 Testing JavaScript evaluation...')
# Simple expressions
page_height = await page.evaluate('() => document.body.scrollHeight')
current_scroll = await page.evaluate('() => window.pageYOffset')
logger.info(f'📏 Page height: {page_height}px, current scroll: {current_scroll}px')
# JavaScript with arguments
result = await page.evaluate('(x) => x * 2', 21)
logger.info(f'🧮 JavaScript with args: 21 * 2 = {result}')
# More complex JavaScript
page_stats = json.loads(
await page.evaluate("""() => {
return {
url: window.location.href,
title: document.title,
links: document.querySelectorAll('a').length,
images: document.querySelectorAll('img').length,
scrollTop: window.pageYOffset,
viewportHeight: window.innerHeight
};
}""")
)
logger.info(f'📊 Page stats: {page_stats}')
# Get page title using different methods
title_via_js = await page.evaluate('() => document.title')
title_via_api = await page.get_title()
logger.info(f'📝 Title via JS: "{title_via_js}"')
logger.info(f'📝 Title via API: "{title_via_api}"')
# Take a final screenshot
logger.info('📸 Taking final screenshot...')
final_screenshot = await page.screenshot()
logger.info(f'📸 Final screenshot: {len(final_screenshot)} bytes')
# Test browser navigation with error handling
logger.info('⬅️ Testing browser back navigation...')
try:
await page.go_back()
await asyncio.sleep(2)
back_url = await page.get_url()
back_title = await page.get_title()
logger.info(f'📄 After going back: {back_title}')
logger.info(f'🌐 Back URL: {back_url}')
except RuntimeError as e:
logger.info(f'ℹ️ Navigation back failed as expected: {e}')
# Test creating new page
logger.info('🆕 Creating new blank page...')
new_page = await browser.new_page()
new_page_url = await new_page.get_url()
logger.info(f'🆕 New page created with URL: {new_page_url}')
# Get all pages
all_pages = await browser.get_pages()
logger.info(f'📑 Total pages: {len(all_pages)}')
# Test form interaction if we can find a form
try:
# Look for search input on the page
search_inputs = await page.get_elements_by_css_selector('input[type="search"], input[name*="search"]')
if search_inputs:
search_input = search_inputs[0]
logger.info('🔍 Found search input, testing form interaction...')
await search_input.focus()
await search_input.fill('test search query')
await page.press('Enter')
logger.info('✅ Form interaction test completed')
else:
logger.info('ℹ️ No search inputs found for form testing')
except Exception as e:
logger.info(f'ℹ️ Form interaction test skipped: {e}')
# wait 2 seconds before closing the new page
logger.info('🕒 Waiting 2 seconds before closing the new page...')
await asyncio.sleep(2)
logger.info('🗑️ Closing new page...')
await browser.close_page(new_page)
logger.info('✅ Playground completed successfully!')
input('Press Enter to continue...')
except Exception as e:
logger.error(f'❌ Error in playground: {e}', exc_info=True)
finally:
# Clean up
logger.info('🧹 Cleaning up...')
try:
await browser.stop()
logger.info('✅ Browser session stopped')
except Exception as e:
logger.error(f'❌ Error stopping browser: {e}')
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/actor/playground/flights.py | browser_use/actor/playground/flights.py | import asyncio
from browser_use import Agent, Browser, ChatOpenAI
llm = ChatOpenAI('gpt-4.1-mini')
async def main():
"""
Main function demonstrating mixed automation with Browser-Use and Playwright.
"""
print('🚀 Mixed Automation with Browser-Use and Actor API')
browser = Browser(keep_alive=True)
await browser.start()
page = await browser.get_current_page() or await browser.new_page()
# Go to apple wikipedia page
await page.goto('https://www.google.com/travel/flights')
await asyncio.sleep(1)
round_trip_button = await page.must_get_element_by_prompt('round trip button', llm)
await round_trip_button.click()
one_way_button = await page.must_get_element_by_prompt('one way button', llm)
await one_way_button.click()
await asyncio.sleep(1)
agent = Agent(task='Find the cheapest flight from London to Paris on 2025-10-15', llm=llm, browser_session=browser)
await agent.run()
input('Press Enter to continue...')
await browser.stop()
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/actor/playground/mixed_automation.py | browser_use/actor/playground/mixed_automation.py | import asyncio
from pydantic import BaseModel
from browser_use import Browser, ChatOpenAI
TASK = """
On the current wikipedia page, find the latest huge edit and tell me what is was about.
"""
class LatestEditFinder(BaseModel):
"""Find the latest huge edit on the current wikipedia page."""
latest_edit: str
edit_time: str
edit_author: str
edit_summary: str
edit_url: str
llm = ChatOpenAI('gpt-4.1-mini')
async def main():
"""
Main function demonstrating mixed automation with Browser-Use and Playwright.
"""
print('🚀 Mixed Automation with Browser-Use and Actor API')
browser = Browser(keep_alive=True)
await browser.start()
page = await browser.get_current_page() or await browser.new_page()
# Go to apple wikipedia page
await page.goto('https://browser-use.github.io/stress-tests/challenges/angularjs-form.html')
await asyncio.sleep(1)
element = await page.get_element_by_prompt('zip code input', llm)
print('Element found', element)
if element:
await element.click()
else:
print('No element found')
await browser.stop()
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/mcp/controller.py | browser_use/mcp/controller.py | """MCP (Model Context Protocol) tool wrapper for browser-use.
This module provides integration between MCP tools and browser-use's action registry system.
MCP tools are dynamically discovered and registered as browser-use actions.
"""
import asyncio
import logging
from typing import Any
from pydantic import Field, create_model
from browser_use.agent.views import ActionResult
from browser_use.tools.registry.service import Registry
logger = logging.getLogger(__name__)
try:
from mcp import ClientSession, StdioServerParameters
from mcp.client.stdio import stdio_client
from mcp.types import TextContent, Tool
MCP_AVAILABLE = True
except ImportError:
MCP_AVAILABLE = False
logger.warning('MCP SDK not installed. Install with: pip install mcp')
class MCPToolWrapper:
"""Wrapper to integrate MCP tools as browser-use actions."""
def __init__(self, registry: Registry, mcp_command: str, mcp_args: list[str] | None = None):
"""Initialize MCP tool wrapper.
Args:
registry: Browser-use action registry to register MCP tools
mcp_command: Command to start MCP server (e.g., "npx")
mcp_args: Arguments for MCP command (e.g., ["@playwright/mcp@latest"])
"""
if not MCP_AVAILABLE:
raise ImportError('MCP SDK not installed. Install with: pip install mcp')
self.registry = registry
self.mcp_command = mcp_command
self.mcp_args = mcp_args or []
self.session: ClientSession | None = None
self._tools: dict[str, Tool] = {}
self._registered_actions: set[str] = set()
self._shutdown_event = asyncio.Event()
async def connect(self):
"""Connect to MCP server and discover available tools."""
if self.session:
return # Already connected
logger.info(f'🔌 Connecting to MCP server: {self.mcp_command} {" ".join(self.mcp_args)}')
# Create server parameters
server_params = StdioServerParameters(command=self.mcp_command, args=self.mcp_args, env=None)
# Connect to the MCP server
async with stdio_client(server_params) as (read, write):
async with ClientSession(read, write) as session:
self.session = session
# Initialize the connection
await session.initialize()
# Discover available tools
tools_response = await session.list_tools()
self._tools = {tool.name: tool for tool in tools_response.tools}
logger.info(f'📦 Discovered {len(self._tools)} MCP tools: {list(self._tools.keys())}')
# Register all discovered tools as actions
for tool_name, tool in self._tools.items():
self._register_tool_as_action(tool_name, tool)
# Keep session alive while tools are being used
await self._keep_session_alive()
async def _keep_session_alive(self):
"""Keep the MCP session alive."""
# This will block until the session is closed
# In practice, you'd want to manage this lifecycle better
try:
await self._shutdown_event.wait()
except asyncio.CancelledError:
pass
def _register_tool_as_action(self, tool_name: str, tool: Tool):
"""Register an MCP tool as a browser-use action.
Args:
tool_name: Name of the MCP tool
tool: MCP Tool object with schema information
"""
if tool_name in self._registered_actions:
return # Already registered
# Parse tool parameters to create Pydantic model
param_fields = {}
if tool.inputSchema:
# MCP tools use JSON Schema for parameters
properties = tool.inputSchema.get('properties', {})
required = set(tool.inputSchema.get('required', []))
for param_name, param_schema in properties.items():
# Convert JSON Schema type to Python type
param_type = self._json_schema_to_python_type(param_schema)
# Determine if field is required
if param_name in required:
default = ... # Required field
else:
default = param_schema.get('default', None)
# Add field description if available
field_kwargs = {}
if 'description' in param_schema:
field_kwargs['description'] = param_schema['description']
param_fields[param_name] = (param_type, Field(default, **field_kwargs))
# Create Pydantic model for the tool parameters
param_model = create_model(f'{tool_name}_Params', **param_fields) if param_fields else None
# Determine if this is a browser-specific tool
is_browser_tool = tool_name.startswith('browser_')
domains = None
# Note: page_filter has been removed since we no longer use Page objects
# Create wrapper function for the MCP tool
async def mcp_action_wrapper(**kwargs):
"""Wrapper function that calls the MCP tool."""
if not self.session:
raise RuntimeError(f'MCP session not connected for tool {tool_name}')
# Extract parameters (excluding special injected params)
special_params = {
'page',
'browser_session',
'context',
'page_extraction_llm',
'file_system',
'available_file_paths',
'has_sensitive_data',
'browser',
'browser_context',
}
tool_params = {k: v for k, v in kwargs.items() if k not in special_params}
logger.debug(f'🔧 Calling MCP tool {tool_name} with params: {tool_params}')
try:
# Call the MCP tool
result = await self.session.call_tool(tool_name, tool_params)
# Convert MCP result to ActionResult
# MCP tools return results in various formats
if hasattr(result, 'content'):
# Handle structured content responses
if isinstance(result.content, list):
# Multiple content items
content_parts = []
for item in result.content:
if isinstance(item, TextContent):
content_parts.append(item.text) # type: ignore[reportAttributeAccessIssue]
else:
content_parts.append(str(item))
extracted_content = '\n'.join(content_parts)
else:
extracted_content = str(result.content)
else:
# Direct result
extracted_content = str(result)
return ActionResult(extracted_content=extracted_content)
except Exception as e:
logger.error(f'❌ MCP tool {tool_name} failed: {e}')
return ActionResult(extracted_content=f'MCP tool {tool_name} failed: {str(e)}', error=str(e))
# Set function name for better debugging
mcp_action_wrapper.__name__ = tool_name
mcp_action_wrapper.__qualname__ = f'mcp.{tool_name}'
# Register the action with browser-use
description = tool.description or f'MCP tool: {tool_name}'
# Use the decorator to register the action
decorated_wrapper = self.registry.action(description=description, param_model=param_model, domains=domains)(
mcp_action_wrapper
)
self._registered_actions.add(tool_name)
logger.info(f'✅ Registered MCP tool as action: {tool_name}')
async def disconnect(self):
"""Disconnect from the MCP server and clean up resources."""
self._shutdown_event.set()
if self.session:
# Session cleanup will be handled by the context manager
self.session = None
def _json_schema_to_python_type(self, schema: dict) -> Any:
"""Convert JSON Schema type to Python type.
Args:
schema: JSON Schema definition
Returns:
Python type corresponding to the schema
"""
json_type = schema.get('type', 'string')
type_mapping = {
'string': str,
'number': float,
'integer': int,
'boolean': bool,
'array': list,
'object': dict,
}
base_type = type_mapping.get(json_type, str)
# Handle nullable types
if schema.get('nullable', False):
return base_type | None
return base_type
# Convenience function for easy integration
async def register_mcp_tools(registry: Registry, mcp_command: str, mcp_args: list[str] | None = None) -> MCPToolWrapper:
"""Register MCP tools with a browser-use registry.
Args:
registry: Browser-use action registry
mcp_command: Command to start MCP server
mcp_args: Arguments for MCP command
Returns:
MCPToolWrapper instance (connected)
Example:
```python
from browser_use import Tools
from browser_use.mcp.tools import register_mcp_tools
tools = Tools()
# Register Playwright MCP tools
mcp = await register_mcp_tools(tools.registry, 'npx', ['@playwright/mcp@latest', '--headless'])
# Now all MCP tools are available as browser-use actions
```
"""
wrapper = MCPToolWrapper(registry, mcp_command, mcp_args)
await wrapper.connect()
return wrapper
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/mcp/client.py | browser_use/mcp/client.py | """MCP (Model Context Protocol) client integration for browser-use.
This module provides integration between external MCP servers and browser-use's action registry.
MCP tools are dynamically discovered and registered as browser-use actions.
Example usage:
from browser_use import Tools
from browser_use.mcp.client import MCPClient
tools = Tools()
# Connect to an MCP server
mcp_client = MCPClient(
server_name="my-server",
command="npx",
args=["@mycompany/mcp-server@latest"]
)
# Register all MCP tools as browser-use actions
await mcp_client.register_to_tools(tools)
# Now use with Agent as normal - MCP tools are available as actions
"""
import asyncio
import logging
import time
from typing import Any
from pydantic import BaseModel, ConfigDict, Field, create_model
from browser_use.agent.views import ActionResult
from browser_use.telemetry import MCPClientTelemetryEvent, ProductTelemetry
from browser_use.tools.registry.service import Registry
from browser_use.tools.service import Tools
from browser_use.utils import create_task_with_error_handling, get_browser_use_version
logger = logging.getLogger(__name__)
# Import MCP SDK
from mcp import ClientSession, StdioServerParameters, types
from mcp.client.stdio import stdio_client
MCP_AVAILABLE = True
class MCPClient:
"""Client for connecting to MCP servers and exposing their tools as browser-use actions."""
def __init__(
self,
server_name: str,
command: str,
args: list[str] | None = None,
env: dict[str, str] | None = None,
):
"""Initialize MCP client.
Args:
server_name: Name of the MCP server (for logging and identification)
command: Command to start the MCP server (e.g., "npx", "python")
args: Arguments for the command (e.g., ["@playwright/mcp@latest"])
env: Environment variables for the server process
"""
self.server_name = server_name
self.command = command
self.args = args or []
self.env = env
self.session: ClientSession | None = None
self._stdio_task = None
self._read_stream = None
self._write_stream = None
self._tools: dict[str, types.Tool] = {}
self._registered_actions: set[str] = set()
self._connected = False
self._disconnect_event = asyncio.Event()
self._telemetry = ProductTelemetry()
async def connect(self) -> None:
"""Connect to the MCP server and discover available tools."""
if self._connected:
logger.debug(f'Already connected to {self.server_name}')
return
start_time = time.time()
error_msg = None
try:
logger.info(f"🔌 Connecting to MCP server '{self.server_name}': {self.command} {' '.join(self.args)}")
# Create server parameters
server_params = StdioServerParameters(command=self.command, args=self.args, env=self.env)
# Start stdio client in background task
self._stdio_task = create_task_with_error_handling(
self._run_stdio_client(server_params), name='mcp_stdio_client', suppress_exceptions=True
)
# Wait for connection to be established
retries = 0
max_retries = 100 # 10 second timeout (increased for parallel test execution)
while not self._connected and retries < max_retries:
await asyncio.sleep(0.1)
retries += 1
if not self._connected:
error_msg = f"Failed to connect to MCP server '{self.server_name}' after {max_retries * 0.1} seconds"
raise RuntimeError(error_msg)
logger.info(f"📦 Discovered {len(self._tools)} tools from '{self.server_name}': {list(self._tools.keys())}")
except Exception as e:
error_msg = str(e)
raise
finally:
# Capture telemetry for connect action
duration = time.time() - start_time
self._telemetry.capture(
MCPClientTelemetryEvent(
server_name=self.server_name,
command=self.command,
tools_discovered=len(self._tools),
version=get_browser_use_version(),
action='connect',
duration_seconds=duration,
error_message=error_msg,
)
)
async def _run_stdio_client(self, server_params: StdioServerParameters):
"""Run the stdio client connection in a background task."""
try:
async with stdio_client(server_params) as (read_stream, write_stream):
self._read_stream = read_stream
self._write_stream = write_stream
# Create and initialize session
async with ClientSession(read_stream, write_stream) as session:
self.session = session
# Initialize the connection
await session.initialize()
# Discover available tools
tools_response = await session.list_tools()
self._tools = {tool.name: tool for tool in tools_response.tools}
# Mark as connected
self._connected = True
# Keep the connection alive until disconnect is called
await self._disconnect_event.wait()
except Exception as e:
logger.error(f'MCP server connection error: {e}')
self._connected = False
raise
finally:
self._connected = False
self.session = None
async def disconnect(self) -> None:
"""Disconnect from the MCP server."""
if not self._connected:
return
start_time = time.time()
error_msg = None
try:
logger.info(f"🔌 Disconnecting from MCP server '{self.server_name}'")
# Signal disconnect
self._connected = False
self._disconnect_event.set()
# Wait for stdio task to finish
if self._stdio_task:
try:
await asyncio.wait_for(self._stdio_task, timeout=2.0)
except TimeoutError:
logger.warning(f"Timeout waiting for MCP server '{self.server_name}' to disconnect")
self._stdio_task.cancel()
try:
await self._stdio_task
except asyncio.CancelledError:
pass
self._tools.clear()
self._registered_actions.clear()
except Exception as e:
error_msg = str(e)
logger.error(f'Error disconnecting from MCP server: {e}')
finally:
# Capture telemetry for disconnect action
duration = time.time() - start_time
self._telemetry.capture(
MCPClientTelemetryEvent(
server_name=self.server_name,
command=self.command,
tools_discovered=0, # Tools cleared on disconnect
version=get_browser_use_version(),
action='disconnect',
duration_seconds=duration,
error_message=error_msg,
)
)
self._telemetry.flush()
async def register_to_tools(
self,
tools: Tools,
tool_filter: list[str] | None = None,
prefix: str | None = None,
) -> None:
"""Register MCP tools as actions in the browser-use tools.
Args:
tools: Browser-use tools to register actions to
tool_filter: Optional list of tool names to register (None = all tools)
prefix: Optional prefix to add to action names (e.g., "playwright_")
"""
if not self._connected:
await self.connect()
registry = tools.registry
for tool_name, tool in self._tools.items():
# Skip if not in filter
if tool_filter and tool_name not in tool_filter:
continue
# Apply prefix if specified
action_name = f'{prefix}{tool_name}' if prefix else tool_name
# Skip if already registered
if action_name in self._registered_actions:
continue
# Register the tool as an action
self._register_tool_as_action(registry, action_name, tool)
self._registered_actions.add(action_name)
logger.info(f"✅ Registered {len(self._registered_actions)} MCP tools from '{self.server_name}' as browser-use actions")
def _register_tool_as_action(self, registry: Registry, action_name: str, tool: Any) -> None:
"""Register a single MCP tool as a browser-use action.
Args:
registry: Browser-use registry to register action to
action_name: Name for the registered action
tool: MCP Tool object with schema information
"""
# Parse tool parameters to create Pydantic model
param_fields = {}
if tool.inputSchema:
# MCP tools use JSON Schema for parameters
properties = tool.inputSchema.get('properties', {})
required = set(tool.inputSchema.get('required', []))
for param_name, param_schema in properties.items():
# Convert JSON Schema type to Python type
param_type = self._json_schema_to_python_type(param_schema, f'{action_name}_{param_name}')
# Determine if field is required and handle defaults
if param_name in required:
default = ... # Required field
else:
# Optional field - make type optional and handle default
param_type = param_type | None
if 'default' in param_schema:
default = param_schema['default']
else:
default = None
# Add field with description if available
field_kwargs = {}
if 'description' in param_schema:
field_kwargs['description'] = param_schema['description']
param_fields[param_name] = (param_type, Field(default, **field_kwargs))
# Create Pydantic model for the tool parameters
if param_fields:
# Create a BaseModel class with proper configuration
class ConfiguredBaseModel(BaseModel):
model_config = ConfigDict(extra='forbid', validate_by_name=True, validate_by_alias=True)
param_model = create_model(f'{action_name}_Params', __base__=ConfiguredBaseModel, **param_fields)
else:
# No parameters - create empty model
param_model = None
# Determine if this is a browser-specific tool
is_browser_tool = tool.name.startswith('browser_') or 'page' in tool.name.lower()
# Set up action filters
domains = None
# Note: page_filter has been removed since we no longer use Page objects
# Browser tools filtering would need to be done via domain filters instead
# Create async wrapper function for the MCP tool
# Need to define function with explicit parameters to satisfy registry validation
if param_model:
# Type 1: Function takes param model as first parameter
async def mcp_action_wrapper(params: param_model) -> ActionResult: # type: ignore[no-redef]
"""Wrapper function that calls the MCP tool."""
if not self.session or not self._connected:
return ActionResult(error=f"MCP server '{self.server_name}' not connected", success=False)
# Convert pydantic model to dict for MCP call
tool_params = params.model_dump(exclude_none=True)
logger.debug(f"🔧 Calling MCP tool '{tool.name}' with params: {tool_params}")
start_time = time.time()
error_msg = None
try:
# Call the MCP tool
result = await self.session.call_tool(tool.name, tool_params)
# Convert MCP result to ActionResult
extracted_content = self._format_mcp_result(result)
return ActionResult(
extracted_content=extracted_content,
long_term_memory=f"Used MCP tool '{tool.name}' from {self.server_name}",
)
except Exception as e:
error_msg = f"MCP tool '{tool.name}' failed: {str(e)}"
logger.error(error_msg)
return ActionResult(error=error_msg, success=False)
finally:
# Capture telemetry for tool call
duration = time.time() - start_time
self._telemetry.capture(
MCPClientTelemetryEvent(
server_name=self.server_name,
command=self.command,
tools_discovered=len(self._tools),
version=get_browser_use_version(),
action='tool_call',
tool_name=tool.name,
duration_seconds=duration,
error_message=error_msg,
)
)
else:
# No parameters - empty function signature
async def mcp_action_wrapper() -> ActionResult: # type: ignore[no-redef]
"""Wrapper function that calls the MCP tool."""
if not self.session or not self._connected:
return ActionResult(error=f"MCP server '{self.server_name}' not connected", success=False)
logger.debug(f"🔧 Calling MCP tool '{tool.name}' with no params")
start_time = time.time()
error_msg = None
try:
# Call the MCP tool with empty params
result = await self.session.call_tool(tool.name, {})
# Convert MCP result to ActionResult
extracted_content = self._format_mcp_result(result)
return ActionResult(
extracted_content=extracted_content,
long_term_memory=f"Used MCP tool '{tool.name}' from {self.server_name}",
)
except Exception as e:
error_msg = f"MCP tool '{tool.name}' failed: {str(e)}"
logger.error(error_msg)
return ActionResult(error=error_msg, success=False)
finally:
# Capture telemetry for tool call
duration = time.time() - start_time
self._telemetry.capture(
MCPClientTelemetryEvent(
server_name=self.server_name,
command=self.command,
tools_discovered=len(self._tools),
version=get_browser_use_version(),
action='tool_call',
tool_name=tool.name,
duration_seconds=duration,
error_message=error_msg,
)
)
# Set function metadata for better debugging
mcp_action_wrapper.__name__ = action_name
mcp_action_wrapper.__qualname__ = f'mcp.{self.server_name}.{action_name}'
# Register the action with browser-use
description = tool.description or f'MCP tool from {self.server_name}: {tool.name}'
# Use the registry's action decorator
registry.action(description=description, param_model=param_model, domains=domains)(mcp_action_wrapper)
logger.debug(f"✅ Registered MCP tool '{tool.name}' as action '{action_name}'")
def _format_mcp_result(self, result: Any) -> str:
"""Format MCP tool result into a string for ActionResult.
Args:
result: Raw result from MCP tool call
Returns:
Formatted string representation of the result
"""
# Handle different MCP result formats
if hasattr(result, 'content'):
# Structured content response
if isinstance(result.content, list):
# Multiple content items
parts = []
for item in result.content:
if hasattr(item, 'text'):
parts.append(item.text)
elif hasattr(item, 'type') and item.type == 'text':
parts.append(str(item))
else:
parts.append(str(item))
return '\n'.join(parts)
else:
return str(result.content)
elif isinstance(result, list):
# List of content items
parts = []
for item in result:
if hasattr(item, 'text'):
parts.append(item.text)
else:
parts.append(str(item))
return '\n'.join(parts)
else:
# Direct result or unknown format
return str(result)
def _json_schema_to_python_type(self, schema: dict, model_name: str = 'NestedModel') -> Any:
"""Convert JSON Schema type to Python type.
Args:
schema: JSON Schema definition
model_name: Name for nested models
Returns:
Python type corresponding to the schema
"""
json_type = schema.get('type', 'string')
# Basic type mapping
type_mapping = {
'string': str,
'number': float,
'integer': int,
'boolean': bool,
'array': list,
'null': type(None),
}
# Handle enums (they're still strings)
if 'enum' in schema:
return str
# Handle objects with nested properties
if json_type == 'object':
properties = schema.get('properties', {})
if properties:
# Create nested pydantic model for objects with properties
nested_fields = {}
required_fields = set(schema.get('required', []))
for prop_name, prop_schema in properties.items():
# Recursively process nested properties
prop_type = self._json_schema_to_python_type(prop_schema, f'{model_name}_{prop_name}')
# Determine if field is required and handle defaults
if prop_name in required_fields:
default = ... # Required field
else:
# Optional field - make type optional and handle default
prop_type = prop_type | None
if 'default' in prop_schema:
default = prop_schema['default']
else:
default = None
# Add field with description if available
field_kwargs = {}
if 'description' in prop_schema:
field_kwargs['description'] = prop_schema['description']
nested_fields[prop_name] = (prop_type, Field(default, **field_kwargs))
# Create a BaseModel class with proper configuration
class ConfiguredBaseModel(BaseModel):
model_config = ConfigDict(extra='forbid', validate_by_name=True, validate_by_alias=True)
try:
# Create and return nested pydantic model
return create_model(model_name, __base__=ConfiguredBaseModel, **nested_fields)
except Exception as e:
logger.error(f'Failed to create nested model {model_name}: {e}')
logger.debug(f'Fields: {nested_fields}')
# Fallback to basic dict if model creation fails
return dict
else:
# Object without properties - just return dict
return dict
# Handle arrays with specific item types
if json_type == 'array':
if 'items' in schema:
# Get the item type recursively
item_type = self._json_schema_to_python_type(schema['items'], f'{model_name}_item')
# Return properly typed list
return list[item_type]
else:
# Array without item type specification
return list
# Get base type for non-object types
base_type = type_mapping.get(json_type, str)
# Handle nullable/optional types
if schema.get('nullable', False) or json_type == 'null':
return base_type | None
return base_type
async def __aenter__(self):
"""Async context manager entry."""
await self.connect()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
"""Async context manager exit."""
await self.disconnect()
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/mcp/__main__.py | browser_use/mcp/__main__.py | """Entry point for running MCP server as a module.
Usage:
python -m browser_use.mcp
"""
import asyncio
from browser_use.mcp.server import main
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/mcp/__init__.py | browser_use/mcp/__init__.py | """MCP (Model Context Protocol) support for browser-use.
This module provides integration with MCP servers and clients for browser automation.
"""
from browser_use.mcp.client import MCPClient
from browser_use.mcp.controller import MCPToolWrapper
__all__ = ['MCPClient', 'MCPToolWrapper', 'BrowserUseServer'] # type: ignore
def __getattr__(name):
"""Lazy import to avoid importing server module when only client is needed."""
if name == 'BrowserUseServer':
from browser_use.mcp.server import BrowserUseServer
return BrowserUseServer
raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/mcp/server.py | browser_use/mcp/server.py | """MCP Server for browser-use - exposes browser automation capabilities via Model Context Protocol.
This server provides tools for:
- Running autonomous browser tasks with an AI agent
- Direct browser control (navigation, clicking, typing, etc.)
- Content extraction from web pages
- File system operations
Usage:
uvx browser-use --mcp
Or as an MCP server in Claude Desktop or other MCP clients:
{
"mcpServers": {
"browser-use": {
"command": "uvx",
"args": ["browser-use[cli]", "--mcp"],
"env": {
"OPENAI_API_KEY": "sk-proj-1234567890",
}
}
}
}
"""
import os
import sys
from browser_use.llm import ChatAWSBedrock
# Set environment variables BEFORE any browser_use imports to prevent early logging
os.environ['BROWSER_USE_LOGGING_LEVEL'] = 'critical'
os.environ['BROWSER_USE_SETUP_LOGGING'] = 'false'
import asyncio
import json
import logging
import time
from pathlib import Path
from typing import Any
# Configure logging for MCP mode - redirect to stderr but preserve critical diagnostics
logging.basicConfig(
stream=sys.stderr, level=logging.WARNING, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', force=True
)
try:
import psutil
PSUTIL_AVAILABLE = True
except ImportError:
PSUTIL_AVAILABLE = False
# Add browser-use to path if running from source
sys.path.insert(0, str(Path(__file__).parent.parent))
# Import and configure logging to use stderr before other imports
from browser_use.logging_config import setup_logging
def _configure_mcp_server_logging():
"""Configure logging for MCP server mode - redirect all logs to stderr to prevent JSON RPC interference."""
# Set environment to suppress browser-use logging during server mode
os.environ['BROWSER_USE_LOGGING_LEVEL'] = 'warning'
os.environ['BROWSER_USE_SETUP_LOGGING'] = 'false' # Prevent automatic logging setup
# Configure logging to stderr for MCP mode - preserve warnings and above for troubleshooting
setup_logging(stream=sys.stderr, log_level='warning', force_setup=True)
# Also configure the root logger and all existing loggers to use stderr
logging.root.handlers = []
stderr_handler = logging.StreamHandler(sys.stderr)
stderr_handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logging.root.addHandler(stderr_handler)
logging.root.setLevel(logging.CRITICAL)
# Configure all existing loggers to use stderr and CRITICAL level
for name in list(logging.root.manager.loggerDict.keys()):
logger_obj = logging.getLogger(name)
logger_obj.handlers = []
logger_obj.setLevel(logging.CRITICAL)
logger_obj.addHandler(stderr_handler)
logger_obj.propagate = False
# Configure MCP server logging before any browser_use imports to capture early log lines
_configure_mcp_server_logging()
# Additional suppression - disable all logging completely for MCP mode
logging.disable(logging.CRITICAL)
# Import browser_use modules
from browser_use import ActionModel, Agent
from browser_use.browser import BrowserProfile, BrowserSession
from browser_use.config import get_default_llm, get_default_profile, load_browser_use_config
from browser_use.filesystem.file_system import FileSystem
from browser_use.llm.openai.chat import ChatOpenAI
from browser_use.tools.service import Tools
logger = logging.getLogger(__name__)
def _ensure_all_loggers_use_stderr():
"""Ensure ALL loggers only output to stderr, not stdout."""
# Get the stderr handler
stderr_handler = None
for handler in logging.root.handlers:
if hasattr(handler, 'stream') and handler.stream == sys.stderr: # type: ignore
stderr_handler = handler
break
if not stderr_handler:
stderr_handler = logging.StreamHandler(sys.stderr)
stderr_handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
# Configure root logger
logging.root.handlers = [stderr_handler]
logging.root.setLevel(logging.CRITICAL)
# Configure all existing loggers
for name in list(logging.root.manager.loggerDict.keys()):
logger_obj = logging.getLogger(name)
logger_obj.handlers = [stderr_handler]
logger_obj.setLevel(logging.CRITICAL)
logger_obj.propagate = False
# Ensure stderr logging after all imports
_ensure_all_loggers_use_stderr()
# Try to import MCP SDK
try:
import mcp.server.stdio
import mcp.types as types
from mcp.server import NotificationOptions, Server
from mcp.server.models import InitializationOptions
MCP_AVAILABLE = True
# Configure MCP SDK logging to stderr as well
mcp_logger = logging.getLogger('mcp')
mcp_logger.handlers = []
mcp_logger.addHandler(logging.root.handlers[0] if logging.root.handlers else logging.StreamHandler(sys.stderr))
mcp_logger.setLevel(logging.ERROR)
mcp_logger.propagate = False
except ImportError:
MCP_AVAILABLE = False
logger.error('MCP SDK not installed. Install with: pip install mcp')
sys.exit(1)
from browser_use.telemetry import MCPServerTelemetryEvent, ProductTelemetry
from browser_use.utils import create_task_with_error_handling, get_browser_use_version
def get_parent_process_cmdline() -> str | None:
"""Get the command line of all parent processes up the chain."""
if not PSUTIL_AVAILABLE:
return None
try:
cmdlines = []
current_process = psutil.Process()
parent = current_process.parent()
while parent:
try:
cmdline = parent.cmdline()
if cmdline:
cmdlines.append(' '.join(cmdline))
except (psutil.AccessDenied, psutil.NoSuchProcess):
# Skip processes we can't access (like system processes)
pass
try:
parent = parent.parent()
except (psutil.AccessDenied, psutil.NoSuchProcess):
# Can't go further up the chain
break
return ';'.join(cmdlines) if cmdlines else None
except Exception:
# If we can't get parent process info, just return None
return None
class BrowserUseServer:
"""MCP Server for browser-use capabilities."""
def __init__(self, session_timeout_minutes: int = 10):
# Ensure all logging goes to stderr (in case new loggers were created)
_ensure_all_loggers_use_stderr()
self.server = Server('browser-use')
self.config = load_browser_use_config()
self.agent: Agent | None = None
self.browser_session: BrowserSession | None = None
self.tools: Tools | None = None
self.llm: ChatOpenAI | None = None
self.file_system: FileSystem | None = None
self._telemetry = ProductTelemetry()
self._start_time = time.time()
# Session management
self.active_sessions: dict[str, dict[str, Any]] = {} # session_id -> session info
self.session_timeout_minutes = session_timeout_minutes
self._cleanup_task: Any = None
# Setup handlers
self._setup_handlers()
def _setup_handlers(self):
"""Setup MCP server handlers."""
@self.server.list_tools()
async def handle_list_tools() -> list[types.Tool]:
"""List all available browser-use tools."""
return [
# Agent tools
# Direct browser control tools
types.Tool(
name='browser_navigate',
description='Navigate to a URL in the browser',
inputSchema={
'type': 'object',
'properties': {
'url': {'type': 'string', 'description': 'The URL to navigate to'},
'new_tab': {'type': 'boolean', 'description': 'Whether to open in a new tab', 'default': False},
},
'required': ['url'],
},
),
types.Tool(
name='browser_click',
description='Click an element on the page by its index',
inputSchema={
'type': 'object',
'properties': {
'index': {
'type': 'integer',
'description': 'The index of the link or element to click (from browser_get_state)',
},
'new_tab': {
'type': 'boolean',
'description': 'Whether to open any resulting navigation in a new tab',
'default': False,
},
},
'required': ['index'],
},
),
types.Tool(
name='browser_type',
description='Type text into an input field',
inputSchema={
'type': 'object',
'properties': {
'index': {
'type': 'integer',
'description': 'The index of the input element (from browser_get_state)',
},
'text': {'type': 'string', 'description': 'The text to type'},
},
'required': ['index', 'text'],
},
),
types.Tool(
name='browser_get_state',
description='Get the current state of the page including all interactive elements',
inputSchema={
'type': 'object',
'properties': {
'include_screenshot': {
'type': 'boolean',
'description': 'Whether to include a screenshot of the current page',
'default': False,
}
},
},
),
types.Tool(
name='browser_extract_content',
description='Extract structured content from the current page based on a query',
inputSchema={
'type': 'object',
'properties': {
'query': {'type': 'string', 'description': 'What information to extract from the page'},
'extract_links': {
'type': 'boolean',
'description': 'Whether to include links in the extraction',
'default': False,
},
},
'required': ['query'],
},
),
types.Tool(
name='browser_scroll',
description='Scroll the page',
inputSchema={
'type': 'object',
'properties': {
'direction': {
'type': 'string',
'enum': ['up', 'down'],
'description': 'Direction to scroll',
'default': 'down',
}
},
},
),
types.Tool(
name='browser_go_back',
description='Go back to the previous page',
inputSchema={'type': 'object', 'properties': {}},
),
# Tab management
types.Tool(
name='browser_list_tabs', description='List all open tabs', inputSchema={'type': 'object', 'properties': {}}
),
types.Tool(
name='browser_switch_tab',
description='Switch to a different tab',
inputSchema={
'type': 'object',
'properties': {'tab_id': {'type': 'string', 'description': '4 Character Tab ID of the tab to switch to'}},
'required': ['tab_id'],
},
),
types.Tool(
name='browser_close_tab',
description='Close a tab',
inputSchema={
'type': 'object',
'properties': {'tab_id': {'type': 'string', 'description': '4 Character Tab ID of the tab to close'}},
'required': ['tab_id'],
},
),
# types.Tool(
# name="browser_close",
# description="Close the browser session",
# inputSchema={
# "type": "object",
# "properties": {}
# }
# ),
types.Tool(
name='retry_with_browser_use_agent',
description='Retry a task using the browser-use agent. Only use this as a last resort if you fail to interact with a page multiple times.',
inputSchema={
'type': 'object',
'properties': {
'task': {
'type': 'string',
'description': 'The high-level goal and detailed step-by-step description of the task the AI browser agent needs to attempt, along with any relevant data needed to complete the task and info about previous attempts.',
},
'max_steps': {
'type': 'integer',
'description': 'Maximum number of steps an agent can take.',
'default': 100,
},
'model': {
'type': 'string',
'description': 'LLM model to use (e.g., gpt-4o, claude-3-opus-20240229)',
'default': 'gpt-4o',
},
'allowed_domains': {
'type': 'array',
'items': {'type': 'string'},
'description': 'List of domains the agent is allowed to visit (security feature)',
'default': [],
},
'use_vision': {
'type': 'boolean',
'description': 'Whether to use vision capabilities (screenshots) for the agent',
'default': True,
},
},
'required': ['task'],
},
),
# Browser session management tools
types.Tool(
name='browser_list_sessions',
description='List all active browser sessions with their details and last activity time',
inputSchema={'type': 'object', 'properties': {}},
),
types.Tool(
name='browser_close_session',
description='Close a specific browser session by its ID',
inputSchema={
'type': 'object',
'properties': {
'session_id': {
'type': 'string',
'description': 'The browser session ID to close (get from browser_list_sessions)',
}
},
'required': ['session_id'],
},
),
types.Tool(
name='browser_close_all',
description='Close all active browser sessions and clean up resources',
inputSchema={'type': 'object', 'properties': {}},
),
]
@self.server.list_resources()
async def handle_list_resources() -> list[types.Resource]:
"""List available resources (none for browser-use)."""
return []
@self.server.list_prompts()
async def handle_list_prompts() -> list[types.Prompt]:
"""List available prompts (none for browser-use)."""
return []
@self.server.call_tool()
async def handle_call_tool(name: str, arguments: dict[str, Any] | None) -> list[types.TextContent]:
"""Handle tool execution."""
start_time = time.time()
error_msg = None
try:
result = await self._execute_tool(name, arguments or {})
return [types.TextContent(type='text', text=result)]
except Exception as e:
error_msg = str(e)
logger.error(f'Tool execution failed: {e}', exc_info=True)
return [types.TextContent(type='text', text=f'Error: {str(e)}')]
finally:
# Capture telemetry for tool calls
duration = time.time() - start_time
self._telemetry.capture(
MCPServerTelemetryEvent(
version=get_browser_use_version(),
action='tool_call',
tool_name=name,
duration_seconds=duration,
error_message=error_msg,
)
)
async def _execute_tool(self, tool_name: str, arguments: dict[str, Any]) -> str:
"""Execute a browser-use tool."""
# Agent-based tools
if tool_name == 'retry_with_browser_use_agent':
return await self._retry_with_browser_use_agent(
task=arguments['task'],
max_steps=arguments.get('max_steps', 100),
model=arguments.get('model', 'gpt-4o'),
allowed_domains=arguments.get('allowed_domains', []),
use_vision=arguments.get('use_vision', True),
)
# Browser session management tools (don't require active session)
if tool_name == 'browser_list_sessions':
return await self._list_sessions()
elif tool_name == 'browser_close_session':
return await self._close_session(arguments['session_id'])
elif tool_name == 'browser_close_all':
return await self._close_all_sessions()
# Direct browser control tools (require active session)
elif tool_name.startswith('browser_'):
# Ensure browser session exists
if not self.browser_session:
await self._init_browser_session()
if tool_name == 'browser_navigate':
return await self._navigate(arguments['url'], arguments.get('new_tab', False))
elif tool_name == 'browser_click':
return await self._click(arguments['index'], arguments.get('new_tab', False))
elif tool_name == 'browser_type':
return await self._type_text(arguments['index'], arguments['text'])
elif tool_name == 'browser_get_state':
return await self._get_browser_state(arguments.get('include_screenshot', False))
elif tool_name == 'browser_extract_content':
return await self._extract_content(arguments['query'], arguments.get('extract_links', False))
elif tool_name == 'browser_scroll':
return await self._scroll(arguments.get('direction', 'down'))
elif tool_name == 'browser_go_back':
return await self._go_back()
elif tool_name == 'browser_close':
return await self._close_browser()
elif tool_name == 'browser_list_tabs':
return await self._list_tabs()
elif tool_name == 'browser_switch_tab':
return await self._switch_tab(arguments['tab_id'])
elif tool_name == 'browser_close_tab':
return await self._close_tab(arguments['tab_id'])
return f'Unknown tool: {tool_name}'
async def _init_browser_session(self, allowed_domains: list[str] | None = None, **kwargs):
"""Initialize browser session using config"""
if self.browser_session:
return
# Ensure all logging goes to stderr before browser initialization
_ensure_all_loggers_use_stderr()
logger.debug('Initializing browser session...')
# Get profile config
profile_config = get_default_profile(self.config)
# Merge profile config with defaults and overrides
profile_data = {
'downloads_path': str(Path.home() / 'Downloads' / 'browser-use-mcp'),
'wait_between_actions': 0.5,
'keep_alive': True,
'user_data_dir': '~/.config/browseruse/profiles/default',
'device_scale_factor': 1.0,
'disable_security': False,
'headless': False,
**profile_config, # Config values override defaults
}
# Tool parameter overrides (highest priority)
if allowed_domains is not None:
profile_data['allowed_domains'] = allowed_domains
# Merge any additional kwargs that are valid BrowserProfile fields
for key, value in kwargs.items():
profile_data[key] = value
# Create browser profile
profile = BrowserProfile(**profile_data)
# Create browser session
self.browser_session = BrowserSession(browser_profile=profile)
await self.browser_session.start()
# Track the session for management
self._track_session(self.browser_session)
# Create tools for direct actions
self.tools = Tools()
# Initialize LLM from config
llm_config = get_default_llm(self.config)
base_url = llm_config.get('base_url', None)
kwargs = {}
if base_url:
kwargs['base_url'] = base_url
if api_key := llm_config.get('api_key'):
self.llm = ChatOpenAI(
model=llm_config.get('model', 'gpt-o4-mini'),
api_key=api_key,
temperature=llm_config.get('temperature', 0.7),
**kwargs,
)
# Initialize FileSystem for extraction actions
file_system_path = profile_config.get('file_system_path', '~/.browser-use-mcp')
self.file_system = FileSystem(base_dir=Path(file_system_path).expanduser())
logger.debug('Browser session initialized')
async def _retry_with_browser_use_agent(
self,
task: str,
max_steps: int = 100,
model: str = 'gpt-4o',
allowed_domains: list[str] | None = None,
use_vision: bool = True,
) -> str:
"""Run an autonomous agent task."""
logger.debug(f'Running agent task: {task}')
# Get LLM config
llm_config = get_default_llm(self.config)
# Get LLM provider
model_provider = llm_config.get('model_provider') or os.getenv('MODEL_PROVIDER')
# 如果model_provider不等于空,且等Bedrock
if model_provider and model_provider.lower() == 'bedrock':
llm_model = llm_config.get('model') or os.getenv('MODEL') or 'us.anthropic.claude-sonnet-4-20250514-v1:0'
aws_region = llm_config.get('region') or os.getenv('REGION')
if not aws_region:
aws_region = 'us-east-1'
llm = ChatAWSBedrock(
model=llm_model, # or any Bedrock model
aws_region=aws_region,
aws_sso_auth=True,
)
else:
api_key = llm_config.get('api_key') or os.getenv('OPENAI_API_KEY')
if not api_key:
return 'Error: OPENAI_API_KEY not set in config or environment'
# Override model if provided in tool call
if model != llm_config.get('model', 'gpt-4o'):
llm_model = model
else:
llm_model = llm_config.get('model', 'gpt-4o')
base_url = llm_config.get('base_url', None)
kwargs = {}
if base_url:
kwargs['base_url'] = base_url
llm = ChatOpenAI(
model=llm_model,
api_key=api_key,
temperature=llm_config.get('temperature', 0.7),
**kwargs,
)
# Get profile config and merge with tool parameters
profile_config = get_default_profile(self.config)
# Override allowed_domains if provided in tool call
if allowed_domains is not None:
profile_config['allowed_domains'] = allowed_domains
# Create browser profile using config
profile = BrowserProfile(**profile_config)
# Create and run agent
agent = Agent(
task=task,
llm=llm,
browser_profile=profile,
use_vision=use_vision,
)
try:
history = await agent.run(max_steps=max_steps)
# Format results
results = []
results.append(f'Task completed in {len(history.history)} steps')
results.append(f'Success: {history.is_successful()}')
# Get final result if available
final_result = history.final_result()
if final_result:
results.append(f'\nFinal result:\n{final_result}')
# Include any errors
errors = history.errors()
if errors:
results.append(f'\nErrors encountered:\n{json.dumps(errors, indent=2)}')
# Include URLs visited
urls = history.urls()
if urls:
# Filter out None values and convert to strings
valid_urls = [str(url) for url in urls if url is not None]
if valid_urls:
results.append(f'\nURLs visited: {", ".join(valid_urls)}')
return '\n'.join(results)
except Exception as e:
logger.error(f'Agent task failed: {e}', exc_info=True)
return f'Agent task failed: {str(e)}'
finally:
# Clean up
await agent.close()
async def _navigate(self, url: str, new_tab: bool = False) -> str:
"""Navigate to a URL."""
if not self.browser_session:
return 'Error: No browser session active'
# Update session activity
self._update_session_activity(self.browser_session.id)
from browser_use.browser.events import NavigateToUrlEvent
if new_tab:
event = self.browser_session.event_bus.dispatch(NavigateToUrlEvent(url=url, new_tab=True))
await event
return f'Opened new tab with URL: {url}'
else:
event = self.browser_session.event_bus.dispatch(NavigateToUrlEvent(url=url))
await event
return f'Navigated to: {url}'
async def _click(self, index: int, new_tab: bool = False) -> str:
"""Click an element by index."""
if not self.browser_session:
return 'Error: No browser session active'
# Update session activity
self._update_session_activity(self.browser_session.id)
# Get the element
element = await self.browser_session.get_dom_element_by_index(index)
if not element:
return f'Element with index {index} not found'
if new_tab:
# For links, extract href and open in new tab
href = element.attributes.get('href')
if href:
# Convert relative href to absolute URL
state = await self.browser_session.get_browser_state_summary()
current_url = state.url
if href.startswith('/'):
# Relative URL - construct full URL
from urllib.parse import urlparse
parsed = urlparse(current_url)
full_url = f'{parsed.scheme}://{parsed.netloc}{href}'
else:
full_url = href
# Open link in new tab
from browser_use.browser.events import NavigateToUrlEvent
event = self.browser_session.event_bus.dispatch(NavigateToUrlEvent(url=full_url, new_tab=True))
await event
return f'Clicked element {index} and opened in new tab {full_url[:20]}...'
else:
# For non-link elements, just do a normal click
# Opening in new tab without href is not reliably supported
from browser_use.browser.events import ClickElementEvent
event = self.browser_session.event_bus.dispatch(ClickElementEvent(node=element))
await event
return f'Clicked element {index} (new tab not supported for non-link elements)'
else:
# Normal click
from browser_use.browser.events import ClickElementEvent
event = self.browser_session.event_bus.dispatch(ClickElementEvent(node=element))
await event
return f'Clicked element {index}'
async def _type_text(self, index: int, text: str) -> str:
"""Type text into an element."""
if not self.browser_session:
return 'Error: No browser session active'
element = await self.browser_session.get_dom_element_by_index(index)
if not element:
return f'Element with index {index} not found'
from browser_use.browser.events import TypeTextEvent
# Conservative heuristic to detect potentially sensitive data
# Only flag very obvious patterns to minimize false positives
is_potentially_sensitive = len(text) >= 6 and (
# Email pattern: contains @ and a domain-like suffix
('@' in text and '.' in text.split('@')[-1] if '@' in text else False)
# Mixed alphanumeric with reasonable complexity (likely API keys/tokens)
or (
len(text) >= 16
and any(char.isdigit() for char in text)
and any(char.isalpha() for char in text)
and any(char in '.-_' for char in text)
)
)
# Use generic key names to avoid information leakage about detection patterns
sensitive_key_name = None
if is_potentially_sensitive:
if '@' in text and '.' in text.split('@')[-1]:
sensitive_key_name = 'email'
else:
sensitive_key_name = 'credential'
event = self.browser_session.event_bus.dispatch(
TypeTextEvent(node=element, text=text, is_sensitive=is_potentially_sensitive, sensitive_key_name=sensitive_key_name)
)
await event
if is_potentially_sensitive:
if sensitive_key_name:
return f'Typed <{sensitive_key_name}> into element {index}'
else:
return f'Typed <sensitive> into element {index}'
else:
return f"Typed '{text}' into element {index}"
async def _get_browser_state(self, include_screenshot: bool = False) -> str:
"""Get current browser state."""
if not self.browser_session:
return 'Error: No browser session active'
state = await self.browser_session.get_browser_state_summary()
result = {
'url': state.url,
'title': state.title,
'tabs': [{'url': tab.url, 'title': tab.title} for tab in state.tabs],
'interactive_elements': [],
}
# Add interactive elements with their indices
for index, element in state.dom_state.selector_map.items():
elem_info = {
'index': index,
'tag': element.tag_name,
'text': element.get_all_children_text(max_depth=2)[:100],
}
if element.attributes.get('placeholder'):
elem_info['placeholder'] = element.attributes['placeholder']
if element.attributes.get('href'):
elem_info['href'] = element.attributes['href']
result['interactive_elements'].append(elem_info)
if include_screenshot and state.screenshot:
result['screenshot'] = state.screenshot
return json.dumps(result, indent=2)
async def _extract_content(self, query: str, extract_links: bool = False) -> str:
"""Extract content from current page."""
if not self.llm:
return 'Error: LLM not initialized (set OPENAI_API_KEY)'
if not self.file_system:
return 'Error: FileSystem not initialized'
if not self.browser_session:
return 'Error: No browser session active'
if not self.tools:
return 'Error: Tools not initialized'
state = await self.browser_session.get_browser_state_summary()
# Use the extract action
# Create a dynamic action model that matches the tools's expectations
from pydantic import create_model
# Create action model dynamically
ExtractAction = create_model(
'ExtractAction',
__base__=ActionModel,
extract=dict[str, Any],
)
# Use model_validate because Pyright does not understand the dynamic model
action = ExtractAction.model_validate(
{
'extract': {'query': query, 'extract_links': extract_links},
}
)
action_result = await self.tools.act(
action=action,
browser_session=self.browser_session,
page_extraction_llm=self.llm,
file_system=self.file_system,
)
return action_result.extracted_content or 'No content extracted'
async def _scroll(self, direction: str = 'down') -> str:
"""Scroll the page."""
if not self.browser_session:
return 'Error: No browser session active'
from browser_use.browser.events import ScrollEvent
# Scroll by a standard amount (500 pixels)
event = self.browser_session.event_bus.dispatch(
ScrollEvent(
direction=direction, # type: ignore
amount=500,
)
)
await event
return f'Scrolled {direction}'
async def _go_back(self) -> str:
"""Go back in browser history."""
if not self.browser_session:
return 'Error: No browser session active'
from browser_use.browser.events import GoBackEvent
event = self.browser_session.event_bus.dispatch(GoBackEvent())
await event
return 'Navigated back'
async def _close_browser(self) -> str:
"""Close the browser session."""
if self.browser_session:
from browser_use.browser.events import BrowserStopEvent
event = self.browser_session.event_bus.dispatch(BrowserStopEvent())
await event
self.browser_session = None
self.tools = None
return 'Browser closed'
return 'No browser session to close'
async def _list_tabs(self) -> str:
"""List all open tabs."""
if not self.browser_session:
return 'Error: No browser session active'
tabs_info = await self.browser_session.get_tabs()
tabs = []
for i, tab in enumerate(tabs_info):
tabs.append({'tab_id': tab.target_id[-4:], 'url': tab.url, 'title': tab.title or ''})
return json.dumps(tabs, indent=2)
async def _switch_tab(self, tab_id: str) -> str:
"""Switch to a different tab."""
if not self.browser_session:
return 'Error: No browser session active'
from browser_use.browser.events import SwitchTabEvent
target_id = await self.browser_session.get_target_id_from_tab_id(tab_id)
event = self.browser_session.event_bus.dispatch(SwitchTabEvent(target_id=target_id))
await event
state = await self.browser_session.get_browser_state_summary()
return f'Switched to tab {tab_id}: {state.url}'
async def _close_tab(self, tab_id: str) -> str:
"""Close a specific tab."""
if not self.browser_session:
return 'Error: No browser session active'
from browser_use.browser.events import CloseTabEvent
target_id = await self.browser_session.get_target_id_from_tab_id(tab_id)
event = self.browser_session.event_bus.dispatch(CloseTabEvent(target_id=target_id))
await event
current_url = await self.browser_session.get_current_page_url()
return f'Closed tab # {tab_id}, now on {current_url}'
def _track_session(self, session: BrowserSession) -> None:
"""Track a browser session for management."""
self.active_sessions[session.id] = {
'session': session,
'created_at': time.time(),
'last_activity': time.time(),
'url': getattr(session, 'current_url', None),
}
def _update_session_activity(self, session_id: str) -> None:
"""Update the last activity time for a session."""
if session_id in self.active_sessions:
self.active_sessions[session_id]['last_activity'] = time.time()
async def _list_sessions(self) -> str:
"""List all active browser sessions."""
if not self.active_sessions:
return 'No active browser sessions'
sessions_info = []
for session_id, session_data in self.active_sessions.items():
session = session_data['session']
created_at = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(session_data['created_at']))
last_activity = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(session_data['last_activity']))
# Check if session is still active
is_active = hasattr(session, 'cdp_client') and session.cdp_client is not None
sessions_info.append(
{
'session_id': session_id,
'created_at': created_at,
'last_activity': last_activity,
'active': is_active,
'current_url': session_data.get('url', 'Unknown'),
'age_minutes': (time.time() - session_data['created_at']) / 60,
}
)
return json.dumps(sessions_info, indent=2)
async def _close_session(self, session_id: str) -> str:
"""Close a specific browser session."""
if session_id not in self.active_sessions:
return f'Session {session_id} not found'
session_data = self.active_sessions[session_id]
session = session_data['session']
try:
# Close the session
if hasattr(session, 'kill'):
await session.kill()
elif hasattr(session, 'close'):
await session.close()
# Remove from tracking
del self.active_sessions[session_id]
# If this was the current session, clear it
if self.browser_session and self.browser_session.id == session_id:
self.browser_session = None
self.tools = None
return f'Successfully closed session {session_id}'
except Exception as e:
return f'Error closing session {session_id}: {str(e)}'
async def _close_all_sessions(self) -> str:
"""Close all active browser sessions."""
if not self.active_sessions:
return 'No active sessions to close'
closed_count = 0
errors = []
for session_id in list(self.active_sessions.keys()):
try:
result = await self._close_session(session_id)
if 'Successfully closed' in result:
closed_count += 1
else:
errors.append(f'{session_id}: {result}')
except Exception as e:
errors.append(f'{session_id}: {str(e)}')
# Clear current session references
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | true |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/sync/service.py | browser_use/sync/service.py | """
Cloud sync service for sending events to the Browser Use cloud.
"""
import logging
import httpx
from bubus import BaseEvent
from browser_use.config import CONFIG
from browser_use.sync.auth import TEMP_USER_ID, DeviceAuthClient
logger = logging.getLogger(__name__)
class CloudSync:
"""Service for syncing events to the Browser Use cloud"""
def __init__(self, base_url: str | None = None, allow_session_events_for_auth: bool = False):
# Backend API URL for all API requests - can be passed directly or defaults to env var
self.base_url = base_url or CONFIG.BROWSER_USE_CLOUD_API_URL
self.auth_client = DeviceAuthClient(base_url=self.base_url)
self.session_id: str | None = None
self.allow_session_events_for_auth = allow_session_events_for_auth
self.auth_flow_active = False # Flag to indicate auth flow is running
# Check if cloud sync is actually enabled - if not, we should remain silent
self.enabled = CONFIG.BROWSER_USE_CLOUD_SYNC
async def handle_event(self, event: BaseEvent) -> None:
"""Handle an event by sending it to the cloud"""
try:
# If cloud sync is disabled, don't handle any events
if not self.enabled:
return
# Extract session ID from CreateAgentSessionEvent
if event.event_type == 'CreateAgentSessionEvent' and hasattr(event, 'id'):
self.session_id = str(event.id) # type: ignore
# Send events based on authentication status and context
if self.auth_client.is_authenticated:
# User is authenticated - send all events
await self._send_event(event)
elif self.allow_session_events_for_auth:
# Special case: allow ALL events during auth flow
await self._send_event(event)
# Mark auth flow as active when we see a session event
if event.event_type == 'CreateAgentSessionEvent':
self.auth_flow_active = True
else:
# User is not authenticated and no auth in progress - don't send anything
logger.debug(f'Skipping event {event.event_type} - user not authenticated')
except Exception as e:
logger.error(f'Failed to handle {event.event_type} event: {type(e).__name__}: {e}', exc_info=True)
async def _send_event(self, event: BaseEvent) -> None:
"""Send event to cloud API"""
try:
headers = {}
# Override user_id only if it's not already set to a specific value
# This allows CLI and other code to explicitly set temp user_id when needed
if self.auth_client and self.auth_client.is_authenticated:
# Only override if we're fully authenticated and event doesn't have temp user_id
current_user_id = getattr(event, 'user_id', None)
if current_user_id != TEMP_USER_ID:
setattr(event, 'user_id', str(self.auth_client.user_id))
else:
# Set temp user_id if not already set
if not hasattr(event, 'user_id') or not getattr(event, 'user_id', None):
setattr(event, 'user_id', TEMP_USER_ID)
# Add auth headers if available
if self.auth_client:
headers.update(self.auth_client.get_headers())
# Send event (batch format with direct BaseEvent serialization)
async with httpx.AsyncClient() as client:
# Serialize event and add device_id to all events
event_data = event.model_dump(mode='json')
if self.auth_client and self.auth_client.device_id:
event_data['device_id'] = self.auth_client.device_id
response = await client.post(
f'{self.base_url.rstrip("/")}/api/v1/events',
json={'events': [event_data]},
headers=headers,
timeout=10.0,
)
if response.status_code >= 400:
# Log error but don't raise - we want to fail silently
logger.debug(
f'Failed to send sync event: POST {response.request.url} {response.status_code} - {response.text}'
)
except httpx.TimeoutException:
logger.debug(f'Event send timed out after 10 seconds: {event}')
except httpx.ConnectError as e:
# logger.warning(f'⚠️ Failed to connect to cloud service at {self.base_url}: {e}')
pass
except httpx.HTTPError as e:
logger.debug(f'HTTP error sending event {event}: {type(e).__name__}: {e}')
except Exception as e:
logger.debug(f'Unexpected error sending event {event}: {type(e).__name__}: {e}')
# async def _update_wal_user_ids(self, session_id: str) -> None:
# """Update user IDs in WAL file after authentication"""
# try:
# assert self.auth_client, 'Cloud sync must be authenticated to update WAL user ID'
# wal_path = CONFIG.BROWSER_USE_CONFIG_DIR / 'events' / f'{session_id}.jsonl'
# if not await anyio.Path(wal_path).exists():
# raise FileNotFoundError(
# f'CloudSync failed to update saved event user_ids after auth: Agent EventBus WAL file not found: {wal_path}'
# )
# # Read all events
# events = []
# content = await anyio.Path(wal_path).read_text()
# for line in content.splitlines():
# if line.strip():
# events.append(json.loads(line))
# # Update user_id and device_id
# user_id = self.auth_client.user_id
# device_id = self.auth_client.device_id
# for event in events:
# if 'user_id' in event:
# event['user_id'] = user_id
# # Add device_id to all events
# event['device_id'] = device_id
# # Write back
# updated_content = '\n'.join(json.dumps(event) for event in events) + '\n'
# await anyio.Path(wal_path).write_text(updated_content)
# except Exception as e:
# logger.warning(f'Failed to update WAL user IDs: {e}')
def set_auth_flow_active(self) -> None:
"""Mark auth flow as active to allow all events"""
self.auth_flow_active = True
async def authenticate(self, show_instructions: bool = True) -> bool:
"""Authenticate with the cloud service"""
# If cloud sync is disabled, don't authenticate
if not self.enabled:
return False
# Check if already authenticated first
if self.auth_client.is_authenticated:
import logging
logger = logging.getLogger(__name__)
if show_instructions:
logger.info('✅ Already authenticated! Skipping OAuth flow.')
return True
# Not authenticated - run OAuth flow
return await self.auth_client.authenticate(agent_session_id=self.session_id, show_instructions=show_instructions)
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/sync/__init__.py | browser_use/sync/__init__.py | """Cloud sync module for Browser Use."""
from browser_use.sync.auth import CloudAuthConfig, DeviceAuthClient
from browser_use.sync.service import CloudSync
__all__ = ['CloudAuthConfig', 'DeviceAuthClient', 'CloudSync']
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/sync/auth.py | browser_use/sync/auth.py | """
OAuth2 Device Authorization Grant flow client for browser-use.
"""
import asyncio
import json
import os
import shutil
import time
from datetime import datetime
import httpx
from pydantic import BaseModel
from uuid_extensions import uuid7str
from browser_use.config import CONFIG
# Temporary user ID for pre-auth events (matches cloud backend)
TEMP_USER_ID = '99999999-9999-9999-9999-999999999999'
def get_or_create_device_id() -> str:
"""Get or create a persistent device ID for this installation."""
device_id_path = CONFIG.BROWSER_USE_CONFIG_DIR / 'device_id'
# Try to read existing device ID
if device_id_path.exists():
try:
device_id = device_id_path.read_text().strip()
if device_id: # Make sure it's not empty
return device_id
except Exception:
# If we can't read it, we'll create a new one
pass
# Create new device ID
device_id = uuid7str()
# Ensure config directory exists
CONFIG.BROWSER_USE_CONFIG_DIR.mkdir(parents=True, exist_ok=True)
# Write device ID to file
device_id_path.write_text(device_id)
return device_id
class CloudAuthConfig(BaseModel):
"""Configuration for cloud authentication"""
api_token: str | None = None
user_id: str | None = None
authorized_at: datetime | None = None
@classmethod
def load_from_file(cls) -> 'CloudAuthConfig':
"""Load auth config from local file"""
config_path = CONFIG.BROWSER_USE_CONFIG_DIR / 'cloud_auth.json'
if config_path.exists():
try:
with open(config_path) as f:
data = json.load(f)
return cls.model_validate(data)
except Exception:
# Return empty config if file is corrupted
pass
return cls()
def save_to_file(self) -> None:
"""Save auth config to local file"""
CONFIG.BROWSER_USE_CONFIG_DIR.mkdir(parents=True, exist_ok=True)
config_path = CONFIG.BROWSER_USE_CONFIG_DIR / 'cloud_auth.json'
with open(config_path, 'w') as f:
json.dump(self.model_dump(mode='json'), f, indent=2, default=str)
# Set restrictive permissions (owner read/write only) for security
try:
os.chmod(config_path, 0o600)
except Exception:
# Some systems may not support chmod, continue anyway
pass
class DeviceAuthClient:
"""Client for OAuth2 device authorization flow"""
def __init__(self, base_url: str | None = None, http_client: httpx.AsyncClient | None = None):
# Backend API URL for OAuth requests - can be passed directly or defaults to env var
self.base_url = base_url or CONFIG.BROWSER_USE_CLOUD_API_URL
self.client_id = 'library'
self.scope = 'read write'
# If no client provided, we'll create one per request
self.http_client = http_client
# Temporary user ID for pre-auth events
self.temp_user_id = TEMP_USER_ID
# Get or create persistent device ID
self.device_id = get_or_create_device_id()
# Load existing auth if available
self.auth_config = CloudAuthConfig.load_from_file()
@property
def is_authenticated(self) -> bool:
"""Check if we have valid authentication"""
return bool(self.auth_config.api_token and self.auth_config.user_id)
@property
def api_token(self) -> str | None:
"""Get the current API token"""
return self.auth_config.api_token
@property
def user_id(self) -> str:
"""Get the current user ID (temporary or real)"""
return self.auth_config.user_id or self.temp_user_id
async def start_device_authorization(
self,
agent_session_id: str | None = None,
) -> dict:
"""
Start the device authorization flow.
Returns device authorization details including user code and verification URL.
"""
if self.http_client:
response = await self.http_client.post(
f'{self.base_url.rstrip("/")}/api/v1/oauth/device/authorize',
data={
'client_id': self.client_id,
'scope': self.scope,
'agent_session_id': agent_session_id or '',
'device_id': self.device_id,
},
)
response.raise_for_status()
return response.json()
else:
async with httpx.AsyncClient() as client:
response = await client.post(
f'{self.base_url.rstrip("/")}/api/v1/oauth/device/authorize',
data={
'client_id': self.client_id,
'scope': self.scope,
'agent_session_id': agent_session_id or '',
'device_id': self.device_id,
},
)
response.raise_for_status()
return response.json()
async def poll_for_token(
self,
device_code: str,
interval: float = 3.0,
timeout: float = 1800.0,
) -> dict | None:
"""
Poll for the access token.
Returns token info when authorized, None if timeout.
"""
start_time = time.time()
if self.http_client:
# Use injected client for all requests
while time.time() - start_time < timeout:
try:
response = await self.http_client.post(
f'{self.base_url.rstrip("/")}/api/v1/oauth/device/token',
data={
'grant_type': 'urn:ietf:params:oauth:grant-type:device_code',
'device_code': device_code,
'client_id': self.client_id,
},
)
if response.status_code == 200:
data = response.json()
# Check for pending authorization
if data.get('error') == 'authorization_pending':
await asyncio.sleep(interval)
continue
# Check for slow down
if data.get('error') == 'slow_down':
interval = data.get('interval', interval * 2)
await asyncio.sleep(interval)
continue
# Check for other errors
if 'error' in data:
print(f'Error: {data.get("error_description", data["error"])}')
return None
# Success! We have a token
if 'access_token' in data:
return data
elif response.status_code == 400:
# Error response
data = response.json()
if data.get('error') not in ['authorization_pending', 'slow_down']:
print(f'Error: {data.get("error_description", "Unknown error")}')
return None
else:
print(f'Unexpected status code: {response.status_code}')
return None
except Exception as e:
print(f'Error polling for token: {e}')
await asyncio.sleep(interval)
else:
# Create a new client for polling
async with httpx.AsyncClient() as client:
while time.time() - start_time < timeout:
try:
response = await client.post(
f'{self.base_url.rstrip("/")}/api/v1/oauth/device/token',
data={
'grant_type': 'urn:ietf:params:oauth:grant-type:device_code',
'device_code': device_code,
'client_id': self.client_id,
},
)
if response.status_code == 200:
data = response.json()
# Check for pending authorization
if data.get('error') == 'authorization_pending':
await asyncio.sleep(interval)
continue
# Check for slow down
if data.get('error') == 'slow_down':
interval = data.get('interval', interval * 2)
await asyncio.sleep(interval)
continue
# Check for other errors
if 'error' in data:
print(f'Error: {data.get("error_description", data["error"])}')
return None
# Success! We have a token
if 'access_token' in data:
return data
elif response.status_code == 400:
# Error response
data = response.json()
if data.get('error') not in ['authorization_pending', 'slow_down']:
print(f'Error: {data.get("error_description", "Unknown error")}')
return None
else:
print(f'Unexpected status code: {response.status_code}')
return None
except Exception as e:
print(f'Error polling for token: {e}')
await asyncio.sleep(interval)
return None
async def authenticate(
self,
agent_session_id: str | None = None,
show_instructions: bool = True,
) -> bool:
"""
Run the full authentication flow.
Returns True if authentication successful.
"""
import logging
logger = logging.getLogger(__name__)
try:
# Start device authorization
device_auth = await self.start_device_authorization(agent_session_id)
# Use frontend URL for user-facing links
frontend_url = CONFIG.BROWSER_USE_CLOUD_UI_URL or self.base_url.replace('//api.', '//cloud.')
# Replace backend URL with frontend URL in verification URIs
verification_uri = device_auth['verification_uri'].replace(self.base_url, frontend_url)
verification_uri_complete = device_auth['verification_uri_complete'].replace(self.base_url, frontend_url)
terminal_width, _terminal_height = shutil.get_terminal_size((80, 20))
if show_instructions and CONFIG.BROWSER_USE_CLOUD_SYNC:
logger.info('─' * max(terminal_width - 40, 20))
logger.info('🌐 View the details of this run in Browser Use Cloud:')
logger.info(f' 👉 {verification_uri_complete}')
logger.info('─' * max(terminal_width - 40, 20) + '\n')
# Poll for token
token_data = await self.poll_for_token(
device_code=device_auth['device_code'],
interval=device_auth.get('interval', 5),
)
if token_data and token_data.get('access_token'):
# Save authentication
self.auth_config.api_token = token_data['access_token']
self.auth_config.user_id = token_data.get('user_id', self.temp_user_id)
self.auth_config.authorized_at = datetime.now()
self.auth_config.save_to_file()
if show_instructions:
logger.debug('✅ Authentication successful! Cloud sync is now enabled with your browser-use account.')
return True
except httpx.HTTPStatusError as e:
# HTTP error with response
if e.response.status_code == 404:
logger.warning(
'Cloud sync authentication endpoint not found (404). Check your BROWSER_USE_CLOUD_API_URL setting.'
)
else:
logger.warning(f'Failed to authenticate with cloud service: HTTP {e.response.status_code} - {e.response.text}')
except httpx.RequestError as e:
# Connection/network errors
# logger.warning(f'Failed to connect to cloud service: {type(e).__name__}: {e}')
pass
except Exception as e:
# Other unexpected errors
logger.warning(f'❌ Unexpected error during cloud sync authentication: {type(e).__name__}: {e}')
if show_instructions:
logger.debug(f'❌ Sync authentication failed or timed out with {CONFIG.BROWSER_USE_CLOUD_API_URL}')
return False
def get_headers(self) -> dict:
"""Get headers for API requests"""
if self.api_token:
return {'Authorization': f'Bearer {self.api_token}'}
return {}
def clear_auth(self) -> None:
"""Clear stored authentication"""
self.auth_config = CloudAuthConfig()
# Remove the config file entirely instead of saving empty values
config_path = CONFIG.BROWSER_USE_CONFIG_DIR / 'cloud_auth.json'
config_path.unlink(missing_ok=True)
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/screenshots/service.py | browser_use/screenshots/service.py | """
Screenshot storage service for browser-use agents.
"""
import base64
from pathlib import Path
import anyio
from browser_use.observability import observe_debug
class ScreenshotService:
"""Simple screenshot storage service that saves screenshots to disk"""
def __init__(self, agent_directory: str | Path):
"""Initialize with agent directory path"""
self.agent_directory = Path(agent_directory) if isinstance(agent_directory, str) else agent_directory
# Create screenshots subdirectory
self.screenshots_dir = self.agent_directory / 'screenshots'
self.screenshots_dir.mkdir(parents=True, exist_ok=True)
@observe_debug(ignore_input=True, ignore_output=True, name='store_screenshot')
async def store_screenshot(self, screenshot_b64: str, step_number: int) -> str:
"""Store screenshot to disk and return the full path as string"""
screenshot_filename = f'step_{step_number}.png'
screenshot_path = self.screenshots_dir / screenshot_filename
# Decode base64 and save to disk
screenshot_data = base64.b64decode(screenshot_b64)
async with await anyio.open_file(screenshot_path, 'wb') as f:
await f.write(screenshot_data)
return str(screenshot_path)
@observe_debug(ignore_input=True, ignore_output=True, name='get_screenshot_from_disk')
async def get_screenshot(self, screenshot_path: str) -> str | None:
"""Load screenshot from disk path and return as base64"""
if not screenshot_path:
return None
path = Path(screenshot_path)
if not path.exists():
return None
# Load from disk and encode to base64
async with await anyio.open_file(path, 'rb') as f:
screenshot_data = await f.read()
return base64.b64encode(screenshot_data).decode('utf-8')
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/browser_use/screenshots/__init__.py | browser_use/screenshots/__init__.py | # Screenshots package for browser-use
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/scripts/debug_iframe_scrolling.py | tests/scripts/debug_iframe_scrolling.py | """
Debug test for iframe scrolling issue where DOM tree only shows top elements after scrolling.
This test verifies that after scrolling inside an iframe, the selector_map correctly
contains lower input elements like City, State, Zip Code, etc.
"""
import asyncio
import sys
from pathlib import Path
# Add parent directory to path to import browser_use modules
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
from browser_use.agent.service import Agent
from browser_use.agent.views import ActionModel
from browser_use.browser import BrowserProfile, BrowserSession
from browser_use.browser.events import BrowserStateRequestEvent
# Import the mock LLM helper from conftest
from tests.ci.conftest import create_mock_llm
async def debug_iframe_scrolling():
"""Debug iframe scrolling and DOM visibility issue."""
print('Starting iframe scrolling debug test...')
# Create the sequence of actions for the mock LLM
# We need to format these as the LLM would return them
actions = [
# First action: Navigate to the test URL
"""
{
"thinking": "Navigating to the iframe test page",
"evaluation_previous_goal": null,
"memory": "Starting test",
"next_goal": "Navigate to the iframe test page",
"action": [
{
"navigate": {
"url": "https://browser-use.github.io/stress-tests/challenges/iframe-inception-level1.html",
"new_tab": false
}
}
]
}
""",
# Second action: Input text in the first name field (to verify we can interact)
"""
{
"thinking": "Inputting text in the first name field to test interaction",
"evaluation_previous_goal": "Successfully navigated to the page",
"memory": "Page loaded with nested iframes",
"next_goal": "Type text in the first name field",
"action": [
{
"input_text": {
"index": 1,
"text": "TestName"
}
}
]
}
""",
# Third action: Scroll the iframe (element_index=2 should be the iframe)
"""
{
"thinking": "Scrolling inside the iframe to reveal lower form elements",
"evaluation_previous_goal": "Successfully typed in first name field",
"memory": "Typed TestName in first field",
"next_goal": "Scroll inside the innermost iframe to see more form fields",
"action": [
{
"scroll": {
"down": true,
"num_pages": 1.0,
"index": 2
}
}
]
}
""",
# Fourth action: Done
"""
{
"thinking": "Completed scrolling, ready to inspect DOM",
"evaluation_previous_goal": "Successfully scrolled inside iframe",
"memory": "Scrolled to reveal lower form fields",
"next_goal": "Task completed",
"action": [
{
"done": {
"text": "Scrolling completed",
"success": true
}
}
]
}
""",
]
# Create mock LLM with our action sequence
mock_llm = create_mock_llm(actions=actions)
# Create browser session with headless=False so we can see what's happening
browser_session = BrowserSession(
browser_profile=BrowserProfile(
headless=False, # Set to False to see the browser
user_data_dir=None, # Use temporary directory
keep_alive=True,
enable_default_extensions=True,
cross_origin_iframes=True, # Enable cross-origin iframe support
)
)
try:
# Start the browser session
await browser_session.start()
print('Browser session started')
# Create an agent with the mock LLM
agent = Agent(
task='Navigate to the iframe test page and scroll inside the iframe',
llm=mock_llm,
browser_session=browser_session,
)
# Helper function to capture and analyze DOM state
async def capture_dom_state(label: str) -> dict:
"""Capture DOM state and return analysis"""
print(f'\n📸 Capturing DOM state: {label}')
state_event = browser_session.event_bus.dispatch(
BrowserStateRequestEvent(include_dom=True, include_screenshot=False, include_recent_events=False)
)
browser_state = await state_event.event_result()
if browser_state and browser_state.dom_state and browser_state.dom_state.selector_map:
selector_map = browser_state.dom_state.selector_map
element_count = len(selector_map)
# Check for specific elements
found_elements = {}
expected_checks = [
('First Name', ['firstName', 'first name']),
('Last Name', ['lastName', 'last name']),
('Email', ['email']),
('City', ['city']),
('State', ['state']),
('Zip', ['zip', 'zipCode']),
]
for name, keywords in expected_checks:
for index, element in selector_map.items():
element_str = str(element).lower()
if any(kw.lower() in element_str for kw in keywords):
found_elements[name] = True
break
return {
'label': label,
'total_elements': element_count,
'found_elements': found_elements,
'selector_map': selector_map,
}
return {'label': label, 'error': 'No DOM state available'}
# Capture initial state before any actions
print('\n' + '=' * 80)
print('PHASE 1: INITIAL PAGE LOAD')
print('=' * 80)
# Navigate to the page first
from browser_use.tools.service import Tools
tools = Tools()
# Create the action model for navigation
goto_action = ActionModel.model_validate_json(actions[0])
await tools.act(goto_action, browser_session)
await asyncio.sleep(2) # Wait for page to fully load
initial_state = await capture_dom_state('INITIAL (after page load)')
# Now run the rest of the actions via the agent
print('\n' + '=' * 80)
print('PHASE 2: EXECUTING ACTIONS')
print('=' * 80)
# Create new agent with remaining actions
remaining_actions = actions[1:] # Skip the navigation we already did
mock_llm_remaining = create_mock_llm(actions=remaining_actions)
agent = Agent(
task='Input text and scroll inside the iframe',
llm=mock_llm_remaining,
browser_session=browser_session,
)
# Hook into agent actions to capture state after each one
states = []
original_act = tools.act
async def wrapped_act(action, session):
result = await original_act(action, session)
# Capture state after each action
action_type = 'unknown'
if hasattr(action, 'input_text') and action.input_text:
action_type = 'input_text'
await asyncio.sleep(1) # Give time for DOM to update
state = await capture_dom_state('AFTER INPUT_TEXT')
states.append(state)
elif hasattr(action, 'scroll') and action.scroll:
action_type = 'scroll'
await asyncio.sleep(2) # Give more time after scroll
state = await capture_dom_state('AFTER SCROLL')
states.append(state)
return result
tools.act = wrapped_act
# Run the agent with remaining actions
result = await agent.run()
print(f'\nAgent completed with result: {result}')
# Analyze all captured states
print('\n' + '=' * 80)
print('PHASE 3: ANALYSIS OF DOM STATES')
print('=' * 80)
all_states = [initial_state] + states
for state in all_states:
if 'error' in state:
print(f'\n❌ {state["label"]}: {state["error"]}')
else:
print(f'\n📊 {state["label"]}:')
print(f' Total elements: {state["total_elements"]}')
print(' Found elements:')
for elem_name, found in state['found_elements'].items():
status = '✓' if found else '✗'
print(f' {status} {elem_name}')
# Compare states
print('\n' + '=' * 80)
print('COMPARISON SUMMARY')
print('=' * 80)
if len(all_states) >= 3:
initial = all_states[0]
after_input = all_states[1] if len(all_states) > 1 else None
after_scroll = all_states[2] if len(all_states) > 2 else None
print('\nElement count changes:')
print(f' Initial: {initial.get("total_elements", 0)} elements')
if after_input:
print(f' After input_text: {after_input.get("total_elements", 0)} elements')
if after_scroll:
print(f' After scroll: {after_scroll.get("total_elements", 0)} elements')
# Check if lower form fields appear after scroll
if after_scroll and 'found_elements' in after_scroll:
lower_fields = ['City', 'State', 'Zip']
missing_fields = [f for f in lower_fields if not after_scroll['found_elements'].get(f, False)]
if missing_fields:
print('\n⚠️ BUG CONFIRMED: Lower form fields missing after scroll:')
for field in missing_fields:
print(f' ✗ {field}')
print('\nThis confirms that scrolling inside iframes does not update the DOM tree properly.')
else:
print('\n✅ SUCCESS: All lower form fields are visible after scrolling!')
# Show first few elements from final state for debugging
if states and 'selector_map' in states[-1]:
print('\n' + '=' * 80)
print('DEBUG: First 5 elements in final selector_map')
print('=' * 80)
final_map = states[-1]['selector_map']
for i, (index, element) in enumerate(list(final_map.items())[:5]):
elem_preview = str(element)[:150]
print(f'\n [{index}]: {elem_preview}...')
# Keep browser open for manual inspection if needed
print('\n' + '=' * 80)
print('Test complete. Browser will remain open for 10 seconds for inspection...')
print('=' * 80)
await asyncio.sleep(10)
finally:
# Clean up
print('\nCleaning up...')
await browser_session.kill()
await browser_session.event_bus.stop(clear=True, timeout=5)
print('Browser session closed')
if __name__ == '__main__':
# Run the debug test
asyncio.run(debug_iframe_scrolling())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/scripts/test_frame_hierarchy.py | tests/scripts/test_frame_hierarchy.py | #!/usr/bin/env python3
"""Test frame hierarchy for any URL passed as argument."""
import asyncio
import sys
from browser_use.browser import BrowserSession
from browser_use.browser.events import BrowserStartEvent
from browser_use.browser.profile import BrowserProfile
async def analyze_frame_hierarchy(url):
"""Analyze and display complete frame hierarchy for a URL."""
profile = BrowserProfile(headless=True, user_data_dir=None)
session = BrowserSession(browser_profile=profile)
try:
print('🚀 Starting browser...')
await session.on_BrowserStartEvent(BrowserStartEvent())
print(f'📍 Navigating to: {url}')
await session._cdp_navigate(url)
await asyncio.sleep(3)
print('\n' + '=' * 80)
print('FRAME HIERARCHY ANALYSIS')
print('=' * 80)
# Get all targets from SessionManager
all_targets = session.session_manager.get_all_targets()
# Separate by type
page_targets = [target for target in all_targets.values() if target.target_type == 'page']
iframe_targets = [target for target in all_targets.values() if target.target_type == 'iframe']
print('\n📊 Target Summary:')
print(f' Total targets: {len(all_targets)}')
print(f' Page targets: {len(page_targets)}')
print(f' Iframe targets (OOPIFs): {len(iframe_targets)}')
# Show all targets
print('\n📋 All Targets:')
for i, (target_id, target) in enumerate(all_targets.items()):
if target.target_type in ['page', 'iframe']:
print(f'\n [{i + 1}] Type: {target.target_type}')
print(f' URL: {target.url}')
print(f' Target ID: {target.target_id[:30]}...')
# Check if target has active sessions using the public API
try:
cdp_session = await session.get_or_create_cdp_session(target.target_id, focus=False)
has_session = cdp_session is not None
except Exception:
has_session = False
print(f' Has Session: {has_session}')
# Get main page frame tree
main_target = next((t for t in page_targets if url in t.url), page_targets[0] if page_targets else None)
if main_target:
print('\n📐 Main Page Frame Tree:')
print(f' Target: {main_target.url}')
print(f' Target ID: {main_target.target_id[:30]}...')
s = await session.cdp_client.send.Target.attachToTarget(params={'targetId': main_target.target_id, 'flatten': True})
sid = s['sessionId']
try:
await session.cdp_client.send.Page.enable(session_id=sid)
tree = await session.cdp_client.send.Page.getFrameTree(session_id=sid)
print('\n Frame Tree Structure:')
def print_tree(node, indent=0, parent_id=None):
frame = node['frame']
frame_id = frame.get('id', 'unknown')
frame_url = frame.get('url', 'none')
prefix = ' ' * indent + ('└─ ' if indent > 0 else '')
print(f'{prefix}Frame: {frame_url}')
print(f'{" " * (indent + 1)}ID: {frame_id[:30]}...')
if parent_id:
print(f'{" " * (indent + 1)}Parent: {parent_id[:30]}...')
# Check cross-origin status
cross_origin = frame.get('crossOriginIsolatedContextType', 'unknown')
if cross_origin != 'NotIsolated':
print(f'{" " * (indent + 1)}⚠️ Cross-Origin: {cross_origin}')
# Process children
for child in node.get('childFrames', []):
print_tree(child, indent + 1, frame_id)
print_tree(tree['frameTree'])
finally:
await session.cdp_client.send.Target.detachFromTarget(params={'sessionId': sid})
# Show iframe target trees
if iframe_targets:
print('\n🔸 OOPIF Target Frame Trees:')
for iframe_target in iframe_targets:
print(f'\n OOPIF Target: {iframe_target.url}')
print(f' Target ID: {iframe_target.target_id[:30]}...')
s = await session.cdp_client.send.Target.attachToTarget(
params={'targetId': iframe_target.target_id, 'flatten': True}
)
sid = s['sessionId']
try:
await session.cdp_client.send.Page.enable(session_id=sid)
tree = await session.cdp_client.send.Page.getFrameTree(session_id=sid)
frame = tree['frameTree']['frame']
print(f' Frame ID: {frame.get("id", "unknown")[:30]}...')
print(f' Frame URL: {frame.get("url", "none")}')
print(' ⚠️ This frame runs in a separate process (OOPIF)')
except Exception as e:
print(f' Error: {e}')
finally:
await session.cdp_client.send.Target.detachFromTarget(params={'sessionId': sid})
# Now show unified view from get_all_frames
print('\n' + '=' * 80)
print('UNIFIED FRAME HIERARCHY (get_all_frames method)')
print('=' * 80)
all_frames, target_sessions = await session.get_all_frames()
# Clean up sessions
for tid, sess_id in target_sessions.items():
try:
await session.cdp_client.send.Target.detachFromTarget(params={'sessionId': sess_id})
except Exception:
pass
print('\n📊 Frame Statistics:')
print(f' Total frames discovered: {len(all_frames)}')
# Separate root and child frames
root_frames = []
child_frames = []
for frame_id, frame_info in all_frames.items():
if not frame_info.get('parentFrameId'):
root_frames.append((frame_id, frame_info))
else:
child_frames.append((frame_id, frame_info))
print(f' Root frames: {len(root_frames)}')
print(f' Child frames: {len(child_frames)}')
# Display all frames with details
print('\n📋 All Frames:')
for i, (frame_id, frame_info) in enumerate(all_frames.items()):
url = frame_info.get('url', 'none')
parent = frame_info.get('parentFrameId')
target_id = frame_info.get('frameTargetId', 'unknown')
is_cross = frame_info.get('isCrossOrigin', False)
print(f'\n [{i + 1}] Frame URL: {url}')
print(f' Frame ID: {frame_id[:30]}...')
print(f' Parent Frame ID: {parent[:30] + "..." if parent else "None (ROOT)"}')
print(f' Target ID: {target_id[:30]}...')
print(f' Cross-Origin: {is_cross}')
# Highlight problems
if not parent and 'v0-simple-landing' in url:
print(' ❌ PROBLEM: Cross-origin frame incorrectly marked as root!')
elif not parent and url != 'about:blank' and url not in ['chrome://newtab/', 'about:blank']:
# Check if this should be the main frame
if any(url in t.url for t in page_targets):
print(' ✅ Correctly identified as root frame')
if is_cross:
print(' 🔸 This is a cross-origin frame (OOPIF)')
# Show parent-child relationships
print('\n🌳 Frame Relationships:')
# Build a tree structure
def print_frame_tree(frame_id, frame_info, indent=0, visited=None):
if visited is None:
visited = set()
if frame_id in visited:
return
visited.add(frame_id)
url = frame_info.get('url', 'none')
prefix = ' ' * indent + ('└─ ' if indent > 0 else '')
print(f'{prefix}{url[:60]}...')
print(f'{" " * (indent + 1)}[{frame_id[:20]}...]')
# Find children
for child_id, child_info in all_frames.items():
if child_info.get('parentFrameId') == frame_id:
print_frame_tree(child_id, child_info, indent + 1, visited)
# Print trees starting from roots
for frame_id, frame_info in root_frames:
print('\n Tree starting from root:')
print_frame_tree(frame_id, frame_info)
print('\n' + '=' * 80)
print('✅ Analysis complete!')
print('=' * 80)
except Exception as e:
print(f'❌ Error: {e}')
import traceback
traceback.print_exc()
finally:
# Stop the CDP client first before killing the browser
print('\n🛑 Shutting down...')
# Close CDP connection first while browser is still alive
if session._cdp_client_root:
try:
await session._cdp_client_root.stop()
except Exception:
pass # Ignore errors if already disconnected
# Then stop the browser process
from browser_use.browser.events import BrowserStopEvent
stop_event = session.event_bus.dispatch(BrowserStopEvent())
try:
await asyncio.wait_for(stop_event, timeout=2.0)
except TimeoutError:
print('⚠️ Browser stop timed out')
def main():
if len(sys.argv) != 2:
print('Usage: python test_frame_hierarchy.py <URL>')
print('\nExample URLs to test:')
print(' https://v0-website-with-clickable-elements.vercel.app/nested-iframe')
print(' https://v0-website-with-clickable-elements.vercel.app/cross-origin')
print(' https://v0-website-with-clickable-elements.vercel.app/shadow-dom')
sys.exit(1)
url = sys.argv[1]
asyncio.run(analyze_frame_hierarchy(url))
# Ensure clean exit
print('✅ Script completed')
sys.exit(0)
if __name__ == '__main__':
main()
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/ci/test_screenshot_exclusion.py | tests/ci/test_screenshot_exclusion.py | """Test that screenshot action is excluded when use_vision != 'auto'."""
import pytest
from browser_use.agent.service import Agent
from browser_use.browser.profile import BrowserProfile
from browser_use.browser.session import BrowserSession
from browser_use.tools.service import Tools
from tests.ci.conftest import create_mock_llm
@pytest.fixture(scope='function')
async def browser_session():
session = BrowserSession(browser_profile=BrowserProfile(headless=True))
await session.start()
yield session
await session.kill()
def test_screenshot_excluded_with_use_vision_false():
"""Test that screenshot action is excluded when use_vision=False."""
mock_llm = create_mock_llm(actions=['{"action": [{"done": {"text": "test", "success": true}}]}'])
agent = Agent(
task='test',
llm=mock_llm,
use_vision=False,
)
# Verify screenshot is not in the registry
assert 'screenshot' not in agent.tools.registry.registry.actions, 'Screenshot should be excluded when use_vision=False'
def test_screenshot_excluded_with_use_vision_true():
"""Test that screenshot action is excluded when use_vision=True."""
mock_llm = create_mock_llm(actions=['{"action": [{"done": {"text": "test", "success": true}}]}'])
agent = Agent(
task='test',
llm=mock_llm,
use_vision=True,
)
# Verify screenshot is not in the registry
assert 'screenshot' not in agent.tools.registry.registry.actions, 'Screenshot should be excluded when use_vision=True'
def test_screenshot_included_with_use_vision_auto():
"""Test that screenshot action is included when use_vision='auto'."""
mock_llm = create_mock_llm(actions=['{"action": [{"done": {"text": "test", "success": true}}]}'])
agent = Agent(
task='test',
llm=mock_llm,
use_vision='auto',
)
# Verify screenshot IS in the registry
assert 'screenshot' in agent.tools.registry.registry.actions, 'Screenshot should be included when use_vision="auto"'
def test_screenshot_excluded_with_custom_tools_and_use_vision_false():
"""Test that screenshot action is excluded even when user passes custom tools and use_vision=False.
This is the critical test case that verifies the fix:
When users pass their own Tools instance with screenshot included,
the Agent should still enforce the exclusion if use_vision != 'auto'.
"""
mock_llm = create_mock_llm(actions=['{"action": [{"done": {"text": "test", "success": true}}]}'])
# Create custom tools that includes screenshot action
custom_tools = Tools()
assert 'screenshot' in custom_tools.registry.registry.actions, 'Custom tools should have screenshot by default'
# Pass custom tools to agent with use_vision=False
agent = Agent(
task='test',
llm=mock_llm,
tools=custom_tools,
use_vision=False,
)
# Verify screenshot is excluded even though user passed custom tools
assert 'screenshot' not in agent.tools.registry.registry.actions, (
'Screenshot should be excluded when use_vision=False, even with custom tools'
)
def test_screenshot_excluded_with_custom_tools_and_use_vision_true():
"""Test that screenshot action is excluded even when user passes custom tools and use_vision=True.
This is another critical test case:
When users pass their own Tools instance with screenshot included,
the Agent should still enforce the exclusion if use_vision != 'auto'.
"""
mock_llm = create_mock_llm(actions=['{"action": [{"done": {"text": "test", "success": true}}]}'])
# Create custom tools - by default Tools() includes screenshot
# (unless exclude_actions is passed)
custom_tools = Tools()
# Note: We check if screenshot exists in the default set, but it might not
# exist if use_vision defaults have changed. The key is that after passing
# to Agent with use_vision=True, it should be excluded.
has_screenshot_before = 'screenshot' in custom_tools.registry.registry.actions
# Pass custom tools to agent with use_vision=True
agent = Agent(
task='test',
llm=mock_llm,
tools=custom_tools,
use_vision=True,
)
# Verify screenshot is excluded even though user passed custom tools
# The key test: screenshot should be excluded after Agent init
assert 'screenshot' not in agent.tools.registry.registry.actions, (
f'Screenshot should be excluded when use_vision=True, even with custom tools (had screenshot before: {has_screenshot_before})'
)
def test_screenshot_included_with_custom_tools_and_use_vision_auto():
"""Test that screenshot action is kept when user passes custom tools and use_vision='auto'."""
mock_llm = create_mock_llm(actions=['{"action": [{"done": {"text": "test", "success": true}}]}'])
# Create custom tools that includes screenshot action
custom_tools = Tools()
assert 'screenshot' in custom_tools.registry.registry.actions, 'Custom tools should have screenshot by default'
# Pass custom tools to agent with use_vision='auto'
agent = Agent(
task='test',
llm=mock_llm,
tools=custom_tools,
use_vision='auto',
)
# Verify screenshot is kept when use_vision='auto'
assert 'screenshot' in agent.tools.registry.registry.actions, (
'Screenshot should be included when use_vision="auto", even with custom tools'
)
def test_tools_exclude_action_method():
"""Test the Tools.exclude_action() method directly."""
tools = Tools()
# Verify screenshot is included initially
assert 'screenshot' in tools.registry.registry.actions, 'Screenshot should be included by default'
# Exclude screenshot
tools.exclude_action('screenshot')
# Verify screenshot is excluded
assert 'screenshot' not in tools.registry.registry.actions, 'Screenshot should be excluded after calling exclude_action()'
assert 'screenshot' in tools.registry.exclude_actions, 'Screenshot should be in exclude_actions list'
def test_exclude_action_prevents_re_registration():
"""Test that excluded actions cannot be re-registered."""
tools = Tools()
# Exclude screenshot
tools.exclude_action('screenshot')
assert 'screenshot' not in tools.registry.registry.actions
# Try to re-register screenshot (simulating what happens in __init__)
# The decorator should skip registration since it's in exclude_actions
@tools.registry.action('Test screenshot action')
async def screenshot():
return 'test'
# Verify it was not re-registered
assert 'screenshot' not in tools.registry.registry.actions, 'Excluded action should not be re-registered'
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/ci/evaluate_tasks.py | tests/ci/evaluate_tasks.py | """
Runs all agent tasks in parallel (up to 10 at a time) using separate subprocesses.
Each task gets its own Python process, preventing browser session interference.
Fails with exit code 1 if 0% of tasks pass.
"""
import argparse
import asyncio
import glob
import json
import logging
import os
import sys
import warnings
import anyio
import yaml
from dotenv import load_dotenv
from pydantic import BaseModel
load_dotenv()
from browser_use import Agent, AgentHistoryList, BrowserProfile, BrowserSession, ChatBrowserUse
from browser_use.llm.google.chat import ChatGoogle
from browser_use.llm.messages import UserMessage
# --- CONFIG ---
MAX_PARALLEL = 10
TASK_DIR = (
sys.argv[1]
if len(sys.argv) > 1 and not sys.argv[1].startswith('--')
else os.path.join(os.path.dirname(__file__), '../agent_tasks')
)
TASK_FILES = glob.glob(os.path.join(TASK_DIR, '*.yaml'))
class JudgeResponse(BaseModel):
success: bool
explanation: str
async def run_single_task(task_file):
"""Run a single task in the current process (called by subprocess)"""
try:
print(f'[DEBUG] Starting task: {os.path.basename(task_file)}', file=sys.stderr)
# Suppress all logging in subprocess to avoid interfering with JSON output
logging.getLogger().setLevel(logging.CRITICAL)
for logger_name in ['browser_use', 'telemetry', 'message_manager']:
logging.getLogger(logger_name).setLevel(logging.CRITICAL)
warnings.filterwarnings('ignore')
print('[DEBUG] Loading task file...', file=sys.stderr)
content = await anyio.Path(task_file).read_text()
task_data = yaml.safe_load(content)
task = task_data['task']
judge_context = task_data.get('judge_context', ['The agent must solve the task'])
max_steps = task_data.get('max_steps', 15)
print(f'[DEBUG] Task: {task[:100]}...', file=sys.stderr)
print(f'[DEBUG] Max steps: {max_steps}', file=sys.stderr)
api_key = os.getenv('BROWSER_USE_API_KEY')
if not api_key:
print('[SKIP] BROWSER_USE_API_KEY is not set - skipping task evaluation', file=sys.stderr)
return {
'file': os.path.basename(task_file),
'success': True, # Mark as success so it doesn't fail CI
'explanation': 'Skipped - API key not available (fork PR or missing secret)',
}
agent_llm = ChatBrowserUse(api_key=api_key)
# Check if Google API key is available for judge LLM
google_api_key = os.getenv('GOOGLE_API_KEY')
if not google_api_key:
print('[SKIP] GOOGLE_API_KEY is not set - skipping task evaluation', file=sys.stderr)
return {
'file': os.path.basename(task_file),
'success': True, # Mark as success so it doesn't fail CI
'explanation': 'Skipped - Google API key not available (fork PR or missing secret)',
}
judge_llm = ChatGoogle(model='gemini-flash-lite-latest')
print('[DEBUG] LLMs initialized', file=sys.stderr)
# Each subprocess gets its own profile and session
print('[DEBUG] Creating browser session...', file=sys.stderr)
profile = BrowserProfile(
headless=True,
user_data_dir=None,
chromium_sandbox=False, # Disable sandbox for CI environment (GitHub Actions)
)
session = BrowserSession(browser_profile=profile)
print('[DEBUG] Browser session created', file=sys.stderr)
# Test if browser is working
try:
await session.start()
from browser_use.browser.events import NavigateToUrlEvent
event = session.event_bus.dispatch(NavigateToUrlEvent(url='https://httpbin.org/get', new_tab=True))
await event
print('[DEBUG] Browser test: navigation successful', file=sys.stderr)
title = await session.get_current_page_title()
print(f"[DEBUG] Browser test: got title '{title}'", file=sys.stderr)
except Exception as browser_error:
print(f'[DEBUG] Browser test failed: {str(browser_error)}', file=sys.stderr)
print(
f'[DEBUG] Browser error type: {type(browser_error).__name__}',
file=sys.stderr,
)
print('[DEBUG] Starting agent execution...', file=sys.stderr)
agent = Agent(task=task, llm=agent_llm, browser_session=session)
try:
history: AgentHistoryList = await agent.run(max_steps=max_steps)
print('[DEBUG] Agent.run() returned successfully', file=sys.stderr)
except Exception as agent_error:
print(
f'[DEBUG] Agent.run() failed with error: {str(agent_error)}',
file=sys.stderr,
)
print(f'[DEBUG] Error type: {type(agent_error).__name__}', file=sys.stderr)
# Re-raise to be caught by outer try-catch
raise agent_error
agent_output = history.final_result() or ''
print('[DEBUG] Agent execution completed', file=sys.stderr)
# Test if LLM is working by making a simple call
try:
response = await agent_llm.ainvoke([UserMessage(content="Say 'test'")])
print(
f'[DEBUG] LLM test call successful: {response.completion[:50]}',
file=sys.stderr,
)
except Exception as llm_error:
print(f'[DEBUG] LLM test call failed: {str(llm_error)}', file=sys.stderr)
# Debug: capture more details about the agent execution
total_steps = len(history.history) if hasattr(history, 'history') else 0
last_action = history.history[-1] if hasattr(history, 'history') and history.history else None
debug_info = f'Steps: {total_steps}, Final result length: {len(agent_output)}'
if last_action:
debug_info += f', Last action: {type(last_action).__name__}'
# Log to stderr so it shows up in GitHub Actions (won't interfere with JSON output to stdout)
print(f'[DEBUG] Task {os.path.basename(task_file)}: {debug_info}', file=sys.stderr)
if agent_output:
print(
f'[DEBUG] Agent output preview: {agent_output[:200]}...',
file=sys.stderr,
)
else:
print('[DEBUG] Agent produced no output!', file=sys.stderr)
criteria = '\n- '.join(judge_context)
judge_prompt = f"""
You are a evaluator of a browser agent task inside a ci/cd pipeline. Here was the agent's task:
{task}
Here is the agent's output:
{agent_output if agent_output else '[No output provided]'}
Debug info: {debug_info}
Criteria for success:
- {criteria}
Reply in JSON with keys: success (true/false), explanation (string).
If the agent provided no output, explain what might have gone wrong.
"""
response = await judge_llm.ainvoke([UserMessage(content=judge_prompt)], output_format=JudgeResponse)
judge_response = response.completion
result = {
'file': os.path.basename(task_file),
'success': judge_response.success,
'explanation': judge_response.explanation,
}
# Clean up session before returning
await session.kill()
return result
except Exception as e:
# Ensure session cleanup even on error
try:
await session.kill()
except Exception:
pass
return {
'file': os.path.basename(task_file),
'success': False,
'explanation': f'Task failed with error: {str(e)}',
}
async def run_task_subprocess(task_file, semaphore):
"""Run a task in a separate subprocess"""
async with semaphore:
try:
# Set environment to reduce noise in subprocess
env = os.environ.copy()
env['PYTHONPATH'] = os.pathsep.join(sys.path)
proc = await asyncio.create_subprocess_exec(
sys.executable,
__file__,
'--task',
task_file,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
stdout, stderr = await proc.communicate()
if proc.returncode == 0:
try:
# Parse JSON result from subprocess
stdout_text = stdout.decode().strip()
stderr_text = stderr.decode().strip()
# Display subprocess debug logs
if stderr_text:
print(f'[SUBPROCESS {os.path.basename(task_file)}] Debug output:')
for line in stderr_text.split('\n'):
if line.strip():
print(f' {line}')
# Find the JSON line (should be the last line that starts with {)
lines = stdout_text.split('\n')
json_line = None
for line in reversed(lines):
line = line.strip()
if line.startswith('{') and line.endswith('}'):
json_line = line
break
if json_line:
result = json.loads(json_line)
print(f'[PARENT] Task {os.path.basename(task_file)} completed: {result["success"]}')
else:
raise ValueError(f'No JSON found in output: {stdout_text}')
except (json.JSONDecodeError, ValueError) as e:
result = {
'file': os.path.basename(task_file),
'success': False,
'explanation': f'Failed to parse subprocess result: {str(e)[:100]}',
}
print(f'[PARENT] Task {os.path.basename(task_file)} failed to parse: {str(e)}')
print(f'[PARENT] Full stdout was: {stdout.decode()[:500]}')
else:
stderr_text = stderr.decode().strip()
result = {
'file': os.path.basename(task_file),
'success': False,
'explanation': f'Subprocess failed (code {proc.returncode}): {stderr_text[:200]}',
}
print(f'[PARENT] Task {os.path.basename(task_file)} subprocess failed with code {proc.returncode}')
if stderr_text:
print(f'[PARENT] stderr: {stderr_text[:1000]}')
stdout_text = stdout.decode().strip()
if stdout_text:
print(f'[PARENT] stdout: {stdout_text[:1000]}')
except Exception as e:
result = {
'file': os.path.basename(task_file),
'success': False,
'explanation': f'Failed to start subprocess: {str(e)}',
}
print(f'[PARENT] Failed to start subprocess for {os.path.basename(task_file)}: {str(e)}')
return result
async def main():
"""Run all tasks in parallel using subprocesses"""
semaphore = asyncio.Semaphore(MAX_PARALLEL)
print(f'Found task files: {TASK_FILES}')
if not TASK_FILES:
print('No task files found!')
return 0, 0
# Run all tasks in parallel subprocesses
tasks = [run_task_subprocess(task_file, semaphore) for task_file in TASK_FILES]
results = await asyncio.gather(*tasks)
passed = sum(1 for r in results if r['success'])
total = len(results)
print('\n' + '=' * 60)
print(f'{"RESULTS":^60}\n')
# Prepare table data
headers = ['Task', 'Success', 'Reason']
rows = []
for r in results:
status = '✅' if r['success'] else '❌'
rows.append([r['file'], status, r['explanation']])
# Calculate column widths
col_widths = [max(len(str(row[i])) for row in ([headers] + rows)) for i in range(3)]
# Print header
header_row = ' | '.join(headers[i].ljust(col_widths[i]) for i in range(3))
print(header_row)
print('-+-'.join('-' * w for w in col_widths))
# Print rows
for row in rows:
print(' | '.join(str(row[i]).ljust(col_widths[i]) for i in range(3)))
print('\n' + '=' * 60)
print(f'\n{"SCORE":^60}')
print(f'\n{"=" * 60}\n')
print(f'\n{"*" * 10} {passed}/{total} PASSED {"*" * 10}\n')
print('=' * 60 + '\n')
# Output results for GitHub Actions
print(f'PASSED={passed}')
print(f'TOTAL={total}')
# Output detailed results as JSON for GitHub Actions
detailed_results = []
for r in results:
detailed_results.append(
{
'task': r['file'].replace('.yaml', ''),
'success': r['success'],
'reason': r['explanation'],
}
)
print('DETAILED_RESULTS=' + json.dumps(detailed_results))
return passed, total
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--task', type=str, help='Path to a single task YAML file (for subprocess mode)')
args = parser.parse_args()
if args.task:
# Subprocess mode: run a single task and output ONLY JSON
try:
result = asyncio.run(run_single_task(args.task))
# Output ONLY the JSON result, nothing else
print(json.dumps(result))
except Exception as e:
# Even on critical failure, output valid JSON
error_result = {
'file': os.path.basename(args.task),
'success': False,
'explanation': f'Critical subprocess error: {str(e)}',
}
print(json.dumps(error_result))
else:
# Parent process mode: run all tasks in parallel subprocesses
passed, total = asyncio.run(main())
# Results already printed by main() function
# Fail if 0% pass rate (all tasks failed)
if total > 0 and passed == 0:
print('\n❌ CRITICAL: 0% pass rate - all tasks failed!')
sys.exit(1)
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/ci/test_rerun_ai_summary.py | tests/ci/test_rerun_ai_summary.py | """Tests for AI summary generation during rerun"""
from unittest.mock import AsyncMock
from browser_use.agent.service import Agent
from browser_use.agent.views import ActionResult, AgentHistory, AgentHistoryList, RerunSummaryAction, StepMetadata
from browser_use.browser.views import BrowserStateHistory
from browser_use.dom.views import DOMRect, NodeType
from tests.ci.conftest import create_mock_llm
async def test_generate_rerun_summary_success():
"""Test that _generate_rerun_summary generates an AI summary for successful rerun"""
# Create mock LLM that returns RerunSummaryAction
summary_action = RerunSummaryAction(
summary='Form filled successfully',
success=True,
completion_status='complete',
)
async def custom_ainvoke(*args, **kwargs):
# Get output_format from second positional arg or kwargs
output_format = args[1] if len(args) > 1 else kwargs.get('output_format')
assert output_format is RerunSummaryAction
from browser_use.llm.views import ChatInvokeCompletion
return ChatInvokeCompletion(completion=summary_action, usage=None)
# Mock ChatOpenAI class
mock_openai = AsyncMock()
mock_openai.ainvoke.side_effect = custom_ainvoke
llm = create_mock_llm(actions=None)
agent = Agent(task='Test task', llm=llm)
await agent.browser_session.start()
try:
# Create some successful results
results = [
ActionResult(long_term_memory='Step 1 completed'),
ActionResult(long_term_memory='Step 2 completed'),
]
# Pass the mock LLM directly as summary_llm
summary = await agent._generate_rerun_summary('Test task', results, summary_llm=mock_openai)
# Check that result is the AI summary
assert summary.is_done is True
assert summary.success is True
assert summary.extracted_content == 'Form filled successfully'
assert 'Rerun completed' in (summary.long_term_memory or '')
finally:
await agent.close()
async def test_generate_rerun_summary_with_errors():
"""Test that AI summary correctly reflects errors in execution"""
# Create mock LLM for summary
summary_action = RerunSummaryAction(
summary='Rerun had errors',
success=False,
completion_status='failed',
)
async def custom_ainvoke(*args, **kwargs):
output_format = args[1] if len(args) > 1 else kwargs.get('output_format')
assert output_format is RerunSummaryAction
from browser_use.llm.views import ChatInvokeCompletion
return ChatInvokeCompletion(completion=summary_action, usage=None)
mock_openai = AsyncMock()
mock_openai.ainvoke.side_effect = custom_ainvoke
llm = create_mock_llm(actions=None)
agent = Agent(task='Test task', llm=llm)
await agent.browser_session.start()
try:
# Create results with errors
results_with_errors = [
ActionResult(error='Failed to find element'),
ActionResult(error='Timeout'),
]
# Pass the mock LLM directly as summary_llm
summary = await agent._generate_rerun_summary('Test task', results_with_errors, summary_llm=mock_openai)
# Verify summary reflects errors
assert summary.is_done is True
assert summary.success is False
assert summary.extracted_content == 'Rerun had errors'
finally:
await agent.close()
async def test_generate_rerun_summary_fallback_on_error():
"""Test that a fallback summary is generated if LLM fails"""
# Mock ChatOpenAI to throw an error
mock_openai = AsyncMock()
mock_openai.ainvoke.side_effect = Exception('LLM service unavailable')
llm = create_mock_llm(actions=None)
agent = Agent(task='Test task', llm=llm)
await agent.browser_session.start()
try:
# Create some results
results = [
ActionResult(long_term_memory='Step 1 completed'),
ActionResult(long_term_memory='Step 2 completed'),
]
# Pass the mock LLM directly as summary_llm
summary = await agent._generate_rerun_summary('Test task', results, summary_llm=mock_openai)
# Verify fallback summary
assert summary.is_done is True
assert summary.success is True # No errors, so success=True
assert 'Rerun completed' in (summary.extracted_content or '')
assert '2/2' in (summary.extracted_content or '') # Should show stats
finally:
await agent.close()
async def test_generate_rerun_summary_statistics():
"""Test that summary includes execution statistics in the prompt"""
# Create mock LLM
summary_action = RerunSummaryAction(
summary='3 of 5 steps succeeded',
success=False,
completion_status='partial',
)
async def custom_ainvoke(*args, **kwargs):
output_format = args[1] if len(args) > 1 else kwargs.get('output_format')
assert output_format is RerunSummaryAction
from browser_use.llm.views import ChatInvokeCompletion
return ChatInvokeCompletion(completion=summary_action, usage=None)
mock_openai = AsyncMock()
mock_openai.ainvoke.side_effect = custom_ainvoke
llm = create_mock_llm(actions=None)
agent = Agent(task='Test task', llm=llm)
await agent.browser_session.start()
try:
# Create results with mix of success and errors
results = [
ActionResult(long_term_memory='Step 1 completed'),
ActionResult(error='Step 2 failed'),
ActionResult(long_term_memory='Step 3 completed'),
ActionResult(error='Step 4 failed'),
ActionResult(long_term_memory='Step 5 completed'),
]
# Pass the mock LLM directly as summary_llm
summary = await agent._generate_rerun_summary('Test task', results, summary_llm=mock_openai)
# Verify summary
assert summary.is_done is True
assert summary.success is False # partial completion
assert '3 of 5' in (summary.extracted_content or '')
finally:
await agent.close()
async def test_rerun_skips_steps_with_original_errors():
"""Test that rerun_history skips steps that had errors in the original run when skip_failures=True"""
# Create a mock LLM for summary
summary_action = RerunSummaryAction(
summary='Rerun completed with skipped steps',
success=True,
completion_status='complete',
)
async def custom_ainvoke(*args, **kwargs):
output_format = args[1] if len(args) > 1 else kwargs.get('output_format')
if output_format is RerunSummaryAction:
from browser_use.llm.views import ChatInvokeCompletion
return ChatInvokeCompletion(completion=summary_action, usage=None)
raise ValueError('Unexpected output_format')
mock_summary_llm = AsyncMock()
mock_summary_llm.ainvoke.side_effect = custom_ainvoke
llm = create_mock_llm(actions=None)
agent = Agent(task='Test task', llm=llm)
# Create mock history with a step that has an error
mock_state = BrowserStateHistory(
url='https://example.com',
title='Test Page',
tabs=[],
interacted_element=[None],
)
# Get the dynamically created AgentOutput type from the agent
AgentOutput = agent.AgentOutput
# Create a step that originally had an error (using navigate action which doesn't require element matching)
failed_step = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Trying to navigate',
next_goal=None,
action=[{'navigate': {'url': 'https://example.com/page'}}], # type: ignore[arg-type]
),
result=[ActionResult(error='Navigation failed - network error')],
state=mock_state,
metadata=StepMetadata(
step_start_time=0,
step_end_time=1,
step_number=1,
step_interval=1.0,
),
)
# Create history with the failed step
history = AgentHistoryList(history=[failed_step])
try:
# Run rerun with skip_failures=True - should skip the step with original error
results = await agent.rerun_history(
history,
skip_failures=True,
summary_llm=mock_summary_llm,
)
# The step should have been skipped (not retried) because it originally had an error
# We should have 2 results: the skipped step result and the AI summary
assert len(results) == 2
# First result should indicate the step was skipped
skipped_result = results[0]
assert skipped_result.error is not None
assert 'Skipped - original step had error' in skipped_result.error
# Second result should be the AI summary
summary_result = results[1]
assert summary_result.is_done is True
finally:
await agent.close()
async def test_rerun_does_not_skip_originally_failed_when_skip_failures_false():
"""Test that rerun_history does NOT skip steps with original errors when skip_failures=False.
When skip_failures=False, the step should be attempted (and will succeed since navigate doesn't need element matching)."""
# Create a mock LLM for summary (will be reached after the step succeeds)
summary_action = RerunSummaryAction(
summary='Rerun completed',
success=True,
completion_status='complete',
)
async def custom_ainvoke(*args, **kwargs):
output_format = args[1] if len(args) > 1 else kwargs.get('output_format')
if output_format is RerunSummaryAction:
from browser_use.llm.views import ChatInvokeCompletion
return ChatInvokeCompletion(completion=summary_action, usage=None)
raise ValueError('Unexpected output_format')
mock_summary_llm = AsyncMock()
mock_summary_llm.ainvoke.side_effect = custom_ainvoke
llm = create_mock_llm(actions=None)
agent = Agent(task='Test task', llm=llm)
# Create mock history with a step that has an error
mock_state = BrowserStateHistory(
url='https://example.com',
title='Test Page',
tabs=[],
interacted_element=[None],
)
# Get the dynamically created AgentOutput type from the agent
AgentOutput = agent.AgentOutput
# Create a step that originally had an error but uses navigate (which will work on rerun)
failed_step = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Trying to navigate',
next_goal=None,
action=[{'navigate': {'url': 'https://example.com/page'}}], # type: ignore[arg-type]
),
result=[ActionResult(error='Navigation failed - network error')],
state=mock_state,
metadata=StepMetadata(
step_start_time=0,
step_end_time=1,
step_number=1,
step_interval=1.0,
),
)
# Create history with the failed step
history = AgentHistoryList(history=[failed_step])
try:
# Run rerun with skip_failures=False - should attempt to replay (and succeed since navigate works)
results = await agent.rerun_history(
history,
skip_failures=False,
max_retries=1,
summary_llm=mock_summary_llm,
)
# With skip_failures=False, the step should NOT be skipped even if original had error
# The navigate action should succeed
assert len(results) == 2
# First result should be the successful navigation (not skipped)
nav_result = results[0]
# It should NOT contain "Skipped" since skip_failures=False
if nav_result.error:
assert 'Skipped' not in nav_result.error
finally:
await agent.close()
async def test_rerun_cleanup_on_failure(httpserver):
"""Test that rerun_history properly cleans up resources (closes browser/connections) even when it fails.
This test verifies the try/finally cleanup logic by creating a step that will fail
(element matching fails) and checking that the browser session is properly closed afterward.
"""
from browser_use.dom.views import DOMInteractedElement
# Set up a test page with a button that has DIFFERENT attributes than our historical element
test_html = """<!DOCTYPE html>
<html>
<body>
<button id="real-button" aria-label="real-button">Click me</button>
</body>
</html>"""
httpserver.expect_request('/test').respond_with_data(test_html, content_type='text/html')
test_url = httpserver.url_for('/test')
llm = create_mock_llm(actions=None)
agent = Agent(task='Test task', llm=llm)
AgentOutput = agent.AgentOutput
# Step 1: Navigate to test page
navigate_step = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Navigate to test page',
next_goal=None,
action=[{'navigate': {'url': test_url}}], # type: ignore[arg-type]
),
result=[ActionResult(long_term_memory='Navigated')],
state=BrowserStateHistory(
url=test_url,
title='Test Page',
tabs=[],
interacted_element=[None],
),
metadata=StepMetadata(
step_start_time=0,
step_end_time=1,
step_number=1,
step_interval=0.1,
),
)
# Step 2: Click on element that won't be found (different identifiers)
failing_step = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Trying to click non-existent button',
next_goal=None,
action=[{'click': {'index': 100}}], # type: ignore[arg-type]
),
result=[ActionResult(long_term_memory='Clicked button')], # Original succeeded
state=BrowserStateHistory(
url=test_url,
title='Test Page',
tabs=[],
interacted_element=[
DOMInteractedElement(
node_id=1,
backend_node_id=9999,
frame_id=None,
node_type=NodeType.ELEMENT_NODE,
node_value='',
node_name='BUTTON',
attributes={'aria-label': 'non-existent-button', 'id': 'fake-id'},
x_path='html/body/button[999]',
element_hash=123456789,
stable_hash=987654321,
bounds=DOMRect(x=0, y=0, width=100, height=50),
ax_name='non-existent',
)
],
),
metadata=StepMetadata(
step_start_time=0,
step_end_time=1,
step_number=2,
step_interval=0.1,
),
)
history = AgentHistoryList(history=[navigate_step, failing_step])
# Run rerun with skip_failures=False - should fail and raise RuntimeError
# but the try/finally should ensure cleanup happens
try:
await agent.rerun_history(
history,
skip_failures=False,
max_retries=1, # Fail quickly
)
assert False, 'Expected RuntimeError to be raised'
except RuntimeError as e:
# Expected - the step should fail on element matching
assert 'failed after 1 attempts' in str(e)
# If we get here without hanging, the cleanup worked
# The browser session should be closed by the finally block in rerun_history
# We can verify by checking that calling close again doesn't cause issues
# (close() is idempotent - calling it multiple times should be safe)
await agent.close() # Should not hang or error since already closed
async def test_rerun_records_errors_when_skip_failures_true(httpserver):
"""Test that rerun_history records errors in results even when skip_failures=True.
This ensures the AI summary correctly counts failures. Previously, when skip_failures=True
and a step failed after all retries, no error result was appended, causing the AI summary
to incorrectly report success=True even with multiple failures.
"""
from browser_use.dom.views import DOMInteractedElement
# Set up a test page with a button that has DIFFERENT attributes than our historical element
# This ensures element matching will fail (the historical element won't be found)
test_html = """<!DOCTYPE html>
<html>
<body>
<button id="real-button" aria-label="real-button">Click me</button>
</body>
</html>"""
httpserver.expect_request('/test').respond_with_data(test_html, content_type='text/html')
test_url = httpserver.url_for('/test')
# Create a mock LLM for summary that returns partial success
summary_action = RerunSummaryAction(
summary='Some steps failed',
success=False,
completion_status='partial',
)
async def custom_ainvoke(*args, **kwargs):
output_format = args[1] if len(args) > 1 else kwargs.get('output_format')
if output_format is RerunSummaryAction:
from browser_use.llm.views import ChatInvokeCompletion
return ChatInvokeCompletion(completion=summary_action, usage=None)
raise ValueError('Unexpected output_format')
mock_summary_llm = AsyncMock()
mock_summary_llm.ainvoke.side_effect = custom_ainvoke
llm = create_mock_llm(actions=None)
agent = Agent(task='Test task', llm=llm)
# Create history with:
# 1. First step navigates to test page (will succeed)
# 2. Second step tries to click a non-existent element (will fail on element matching)
AgentOutput = agent.AgentOutput
# Step 1: Navigate to test page
navigate_step = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Navigate to test page',
next_goal=None,
action=[{'navigate': {'url': test_url}}], # type: ignore[arg-type]
),
result=[ActionResult(long_term_memory='Navigated')],
state=BrowserStateHistory(
url=test_url,
title='Test Page',
tabs=[],
interacted_element=[None],
),
metadata=StepMetadata(
step_start_time=0,
step_end_time=1,
step_number=1,
step_interval=0.1,
),
)
# Step 2: Click on element that won't exist on current page (different hash/attributes)
failing_step = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Trying to click non-existent button',
next_goal=None,
action=[{'click': {'index': 100}}], # type: ignore[arg-type] # Original index doesn't matter, matching will fail
),
result=[ActionResult(long_term_memory='Clicked button')], # Original succeeded
state=BrowserStateHistory(
url=test_url,
title='Test Page',
tabs=[],
interacted_element=[
DOMInteractedElement(
node_id=1,
backend_node_id=9999,
frame_id=None,
node_type=NodeType.ELEMENT_NODE,
node_value='',
node_name='BUTTON',
# This element has completely different identifiers than the real button
attributes={'aria-label': 'non-existent-button', 'id': 'fake-id'},
x_path='html/body/button[999]', # XPath that doesn't exist
element_hash=123456789, # Hash that won't match
stable_hash=987654321, # Stable hash that won't match
bounds=DOMRect(x=0, y=0, width=100, height=50),
ax_name='non-existent',
)
],
),
metadata=StepMetadata(
step_start_time=0,
step_end_time=1,
step_number=2,
step_interval=0.1,
),
)
history = AgentHistoryList(history=[navigate_step, failing_step])
try:
# Run rerun with skip_failures=True - should NOT raise but should record the error
results = await agent.rerun_history(
history,
skip_failures=True,
max_retries=1, # Fail quickly
summary_llm=mock_summary_llm,
)
# Should have 3 results: navigation success + error from failed step + AI summary
assert len(results) == 3
# First result should be successful navigation
nav_result = results[0]
assert nav_result.error is None
# Second result should be the error (element matching failed)
error_result = results[1]
assert error_result.error is not None
assert 'failed after 1 attempts' in error_result.error
# Third result should be the AI summary
summary_result = results[2]
assert summary_result.is_done is True
finally:
await agent.close()
async def test_rerun_skips_redundant_retry_steps(httpserver):
"""Test that rerun_history skips redundant retry steps.
This handles cases where the original run needed to click the same element multiple
times due to slow page response, but during replay the first click already succeeded.
When consecutive steps target the same element with the same action, the second step
should be skipped as a redundant retry.
"""
from browser_use.dom.views import DOMInteractedElement
# Set up a test page with a button
test_html = """<!DOCTYPE html>
<html>
<body>
<button id="login-btn" aria-label="Log In">Log In</button>
</body>
</html>"""
httpserver.expect_request('/test').respond_with_data(test_html, content_type='text/html')
test_url = httpserver.url_for('/test')
# Create a mock LLM for summary
summary_action = RerunSummaryAction(
summary='Rerun completed with skipped redundant step',
success=True,
completion_status='complete',
)
async def custom_ainvoke(*args, **kwargs):
output_format = args[1] if len(args) > 1 else kwargs.get('output_format')
if output_format is RerunSummaryAction:
from browser_use.llm.views import ChatInvokeCompletion
return ChatInvokeCompletion(completion=summary_action, usage=None)
raise ValueError('Unexpected output_format')
mock_summary_llm = AsyncMock()
mock_summary_llm.ainvoke.side_effect = custom_ainvoke
llm = create_mock_llm(actions=None)
agent = Agent(task='Test task', llm=llm)
AgentOutput = agent.AgentOutput
# Create an interacted element that matches the button on the page
login_button_element = DOMInteractedElement(
node_id=1,
backend_node_id=1,
frame_id=None,
node_type=NodeType.ELEMENT_NODE,
node_value='',
node_name='BUTTON',
attributes={'aria-label': 'Log In', 'id': 'login-btn'},
x_path='html/body/button',
element_hash=12345, # Same hash for both steps (same element)
stable_hash=12345,
bounds=DOMRect(x=0, y=0, width=100, height=50),
)
# Step 1: Navigate to test page
navigate_step = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Navigate to test page',
next_goal=None,
action=[{'navigate': {'url': test_url}}], # type: ignore[arg-type]
),
result=[ActionResult(long_term_memory='Navigated')],
state=BrowserStateHistory(
url=test_url,
title='Test Page',
tabs=[],
interacted_element=[None],
),
metadata=StepMetadata(
step_start_time=0,
step_end_time=1,
step_number=1,
step_interval=0.1,
),
)
# Step 2: Click login button (first click)
click_step_1 = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Click login button',
next_goal=None,
action=[{'click': {'index': 1}}], # type: ignore[arg-type]
),
result=[ActionResult(long_term_memory='Clicked login button')],
state=BrowserStateHistory(
url=test_url,
title='Test Page',
tabs=[],
interacted_element=[login_button_element],
),
metadata=StepMetadata(
step_start_time=1,
step_end_time=2,
step_number=2,
step_interval=0.1,
),
)
# Step 3: Click login button AGAIN (redundant retry - same element, same action)
click_step_2 = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Page did not change, clicking login button again',
next_goal=None,
action=[{'click': {'index': 1}}], # type: ignore[arg-type] # Same action type
),
result=[ActionResult(long_term_memory='Clicked login button')],
state=BrowserStateHistory(
url=test_url,
title='Test Page',
tabs=[],
interacted_element=[login_button_element], # Same element!
),
metadata=StepMetadata(
step_start_time=2,
step_end_time=3,
step_number=3,
step_interval=0.1,
),
)
history = AgentHistoryList(history=[navigate_step, click_step_1, click_step_2])
try:
results = await agent.rerun_history(
history,
skip_failures=True,
summary_llm=mock_summary_llm,
)
# Should have 4 results: navigate + click + skipped redundant + AI summary
assert len(results) == 4
# First result: navigation succeeded
nav_result = results[0]
assert nav_result.error is None
# Second result: first click succeeded
click_result = results[1]
assert click_result.error is None
# Third result: redundant retry was SKIPPED (not an error)
skipped_result = results[2]
assert skipped_result.error is None # Not an error - intentionally skipped
assert 'Skipped - redundant retry' in (skipped_result.extracted_content or '')
# Fourth result: AI summary
summary_result = results[3]
assert summary_result.is_done is True
finally:
await agent.close()
async def test_is_redundant_retry_step_detection():
"""Test the _is_redundant_retry_step method directly."""
from browser_use.dom.views import DOMInteractedElement
llm = create_mock_llm(actions=None)
agent = Agent(task='Test task', llm=llm)
AgentOutput = agent.AgentOutput
# Create an interacted element
button_element = DOMInteractedElement(
node_id=1,
backend_node_id=1,
frame_id=None,
node_type=NodeType.ELEMENT_NODE,
node_value='',
node_name='BUTTON',
attributes={'aria-label': 'Submit'},
x_path='html/body/button',
element_hash=12345,
stable_hash=12345,
bounds=DOMRect(x=0, y=0, width=100, height=50),
)
different_element = DOMInteractedElement(
node_id=2,
backend_node_id=2,
frame_id=None,
node_type=NodeType.ELEMENT_NODE,
node_value='',
node_name='INPUT',
attributes={'name': 'email'},
x_path='html/body/input',
element_hash=99999, # Different hash
stable_hash=99999,
bounds=DOMRect(x=0, y=0, width=200, height=30),
)
# Step with click on button
click_step = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Click button',
next_goal=None,
action=[{'click': {'index': 1}}], # type: ignore[arg-type]
),
result=[ActionResult(long_term_memory='Clicked')],
state=BrowserStateHistory(
url='http://test.com',
title='Test',
tabs=[],
interacted_element=[button_element],
),
metadata=StepMetadata(step_start_time=0, step_end_time=1, step_number=1, step_interval=0.1),
)
# Same click on same button (redundant retry)
retry_click_step = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Click button again',
next_goal=None,
action=[{'click': {'index': 1}}], # type: ignore[arg-type]
),
result=[ActionResult(long_term_memory='Clicked')],
state=BrowserStateHistory(
url='http://test.com',
title='Test',
tabs=[],
interacted_element=[button_element], # Same element
),
metadata=StepMetadata(step_start_time=1, step_end_time=2, step_number=2, step_interval=0.1),
)
# Different action type on same element (not redundant)
input_step = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Type in button (weird but valid)',
next_goal=None,
action=[{'input': {'index': 1, 'text': 'hello'}}], # type: ignore[arg-type] # Different action type
),
result=[ActionResult(long_term_memory='Typed')],
state=BrowserStateHistory(
url='http://test.com',
title='Test',
tabs=[],
interacted_element=[button_element],
),
metadata=StepMetadata(step_start_time=2, step_end_time=3, step_number=3, step_interval=0.1),
)
# Same action type but different element (not redundant)
different_element_step = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Click different element',
next_goal=None,
action=[{'click': {'index': 2}}], # type: ignore[arg-type]
),
result=[ActionResult(long_term_memory='Clicked')],
state=BrowserStateHistory(
url='http://test.com',
title='Test',
tabs=[],
interacted_element=[different_element], # Different element
),
metadata=StepMetadata(step_start_time=3, step_end_time=4, step_number=4, step_interval=0.1),
)
try:
# Test 1: Same element, same action, previous succeeded -> redundant
assert agent._is_redundant_retry_step(retry_click_step, click_step, True) is True
# Test 2: Same element, same action, previous FAILED -> NOT redundant
assert agent._is_redundant_retry_step(retry_click_step, click_step, False) is False
# Test 3: Same element, different action type -> NOT redundant
assert agent._is_redundant_retry_step(input_step, click_step, True) is False
# Test 4: Different element, same action type -> NOT redundant
assert agent._is_redundant_retry_step(different_element_step, click_step, True) is False
# Test 5: No previous step -> NOT redundant
assert agent._is_redundant_retry_step(click_step, None, True) is False
finally:
await agent.close()
async def test_count_expected_elements_from_history():
"""Test that _count_expected_elements_from_history correctly estimates element count based on action indices."""
llm = create_mock_llm(actions=None)
agent = Agent(task='Test task', llm=llm)
AgentOutput = agent.AgentOutput
# Test 1: Action with low index (5) -> needs at least 6 elements (index + 1)
step_low_index = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Test',
next_goal=None,
action=[{'input': {'index': 5, 'text': 'test'}}], # type: ignore[arg-type]
),
result=[ActionResult(long_term_memory='Done')],
state=BrowserStateHistory(
url='http://test.com',
title='Test',
tabs=[],
interacted_element=[None],
),
metadata=StepMetadata(step_start_time=0, step_end_time=1, step_number=1, step_interval=0.1),
)
# Test 2: Action with higher index (25) -> needs at least 26 elements
step_high_index = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Test',
next_goal=None,
action=[{'click': {'index': 25}}], # type: ignore[arg-type]
),
result=[ActionResult(long_term_memory='Done')],
state=BrowserStateHistory(
url='http://test.com',
title='Test',
tabs=[],
interacted_element=[None],
),
metadata=StepMetadata(step_start_time=0, step_end_time=1, step_number=2, step_interval=0.1),
)
# Test 3: Action with very high index (100) -> capped at 50
step_very_high_index = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Test',
next_goal=None,
action=[{'click': {'index': 100}}], # type: ignore[arg-type]
),
result=[ActionResult(long_term_memory='Done')],
state=BrowserStateHistory(
url='http://test.com',
title='Test',
tabs=[],
interacted_element=[None],
),
metadata=StepMetadata(step_start_time=0, step_end_time=1, step_number=3, step_interval=0.1),
)
# Test 4: Navigate action (no index) -> returns 0
step_no_index = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Test',
next_goal=None,
action=[{'navigate': {'url': 'http://test.com'}}], # type: ignore[arg-type]
),
result=[ActionResult(long_term_memory='Done')],
state=BrowserStateHistory(
url='http://test.com',
title='Test',
tabs=[],
interacted_element=[None],
),
metadata=StepMetadata(step_start_time=0, step_end_time=1, step_number=4, step_interval=0.1),
)
# Test 5: Multiple actions - uses max index
step_multiple_actions = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Test',
next_goal=None,
action=[
{'click': {'index': 3}}, # type: ignore[arg-type]
{'input': {'index': 10, 'text': 'test'}}, # type: ignore[arg-type]
],
),
result=[ActionResult(long_term_memory='Done'), ActionResult(long_term_memory='Done')],
state=BrowserStateHistory(
url='http://test.com',
title='Test',
tabs=[],
interacted_element=[None, None],
),
metadata=StepMetadata(step_start_time=0, step_end_time=1, step_number=5, step_interval=0.1),
)
# Test 6: Action with index 0 (edge case) -> needs at least 1 element
# Using input action because it allows index 0 (click requires ge=1)
step_index_zero = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Test',
next_goal=None,
action=[{'input': {'index': 0, 'text': 'test'}}], # type: ignore[arg-type]
),
result=[ActionResult(long_term_memory='Done')],
state=BrowserStateHistory(
url='http://test.com',
title='Test',
tabs=[],
interacted_element=[None],
),
metadata=StepMetadata(step_start_time=0, step_end_time=1, step_number=6, step_interval=0.1),
)
try:
# Test 1: Action index 5 -> needs 6 elements (index + 1)
assert agent._count_expected_elements_from_history(step_low_index) == 6
# Test 2: Action index 25 -> needs 26 elements
assert agent._count_expected_elements_from_history(step_high_index) == 26
# Test 3: Action index 100 -> capped at 50
assert agent._count_expected_elements_from_history(step_very_high_index) == 50
# Test 4: Navigate has no index -> returns 0
assert agent._count_expected_elements_from_history(step_no_index) == 0
# Test 5: Multiple actions -> uses max index (10) + 1 = 11
assert agent._count_expected_elements_from_history(step_multiple_actions) == 11
# Test 6: Action index 0 (edge case) -> needs 1 element (0 + 1)
assert agent._count_expected_elements_from_history(step_index_zero) == 1
finally:
await agent.close()
async def test_wait_for_minimum_elements(httpserver):
"""Test that _wait_for_minimum_elements waits for elements to appear."""
# Set up a simple test page with a button
test_html = """<!DOCTYPE html>
<html>
<body>
<button id="btn1">Button 1</button>
<button id="btn2">Button 2</button>
<input type="text" id="input1" />
</body>
</html>"""
httpserver.expect_request('/test').respond_with_data(test_html, content_type='text/html')
test_url = httpserver.url_for('/test')
llm = create_mock_llm(actions=None)
agent = Agent(task='Test task', llm=llm)
try:
await agent.browser_session.start()
# Navigate to the test page first
from browser_use.browser.events import NavigateToUrlEvent
await agent.browser_session.event_bus.dispatch(NavigateToUrlEvent(url=test_url, new_tab=False))
# Wait a bit for navigation
import asyncio
await asyncio.sleep(1.0)
# Test 1: Wait for 1 element (should succeed quickly)
state = await agent._wait_for_minimum_elements(min_elements=1, timeout=5.0, poll_interval=0.5)
assert state is not None
assert state.dom_state.selector_map is not None
assert len(state.dom_state.selector_map) >= 1
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | true |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/ci/test_file_system_llm_integration.py | tests/ci/test_file_system_llm_integration.py | """Integration tests for DOCX and image file support in LLM messages."""
import base64
import io
from pathlib import Path
import pytest
from PIL import Image
from browser_use.agent.message_manager.service import MessageManager
from browser_use.agent.prompts import AgentMessagePrompt
from browser_use.agent.views import ActionResult, AgentStepInfo
from browser_use.browser.views import BrowserStateSummary, TabInfo
from browser_use.dom.views import SerializedDOMState
from browser_use.filesystem.file_system import FileSystem
from browser_use.llm.messages import ContentPartImageParam, ContentPartTextParam, SystemMessage
class TestImageInLLMMessages:
"""Test that images flow correctly through to LLM messages."""
def create_test_image(self, width: int = 100, height: int = 100) -> bytes:
"""Create a test image and return bytes."""
img = Image.new('RGB', (width, height), color='red')
buffer = io.BytesIO()
img.save(buffer, format='PNG')
buffer.seek(0)
return buffer.read()
@pytest.mark.asyncio
async def test_image_stored_in_message_manager(self, tmp_path: Path):
"""Test that images are stored in MessageManager state."""
fs = FileSystem(tmp_path)
system_message = SystemMessage(content='Test system message')
mm = MessageManager(task='test', system_message=system_message, file_system=fs)
# Create ActionResult with images
images = [{'name': 'test.png', 'data': 'base64_test_data'}]
action_results = [
ActionResult(
extracted_content='Read image file test.png',
long_term_memory='Read image file test.png',
images=images,
include_extracted_content_only_once=True,
)
]
# Update message manager with results
step_info = AgentStepInfo(step_number=1, max_steps=10)
mm._update_agent_history_description(model_output=None, result=action_results, step_info=step_info)
# Verify images are stored
assert mm.state.read_state_images is not None
assert len(mm.state.read_state_images) == 1
assert mm.state.read_state_images[0]['name'] == 'test.png'
assert mm.state.read_state_images[0]['data'] == 'base64_test_data'
@pytest.mark.asyncio
async def test_images_cleared_after_step(self, tmp_path: Path):
"""Test that images are cleared after each step."""
fs = FileSystem(tmp_path)
system_message = SystemMessage(content='Test system message')
mm = MessageManager(task='test', system_message=system_message, file_system=fs)
# First step with images
images = [{'name': 'test.png', 'data': 'base64_data'}]
action_results = [ActionResult(images=images, include_extracted_content_only_once=True)]
step_info = AgentStepInfo(step_number=1, max_steps=10)
mm._update_agent_history_description(model_output=None, result=action_results, step_info=step_info)
assert len(mm.state.read_state_images) == 1
# Second step without images - should clear
action_results_2 = [ActionResult(extracted_content='No images')]
step_info_2 = AgentStepInfo(step_number=2, max_steps=10)
mm._update_agent_history_description(model_output=None, result=action_results_2, step_info=step_info_2)
assert len(mm.state.read_state_images) == 0
@pytest.mark.asyncio
async def test_multiple_images_accumulated(self, tmp_path: Path):
"""Test that multiple images in one step are accumulated."""
fs = FileSystem(tmp_path)
system_message = SystemMessage(content='Test system message')
mm = MessageManager(task='test', system_message=system_message, file_system=fs)
# Multiple action results with images
action_results = [
ActionResult(images=[{'name': 'img1.png', 'data': 'data1'}], include_extracted_content_only_once=True),
ActionResult(images=[{'name': 'img2.jpg', 'data': 'data2'}], include_extracted_content_only_once=True),
]
step_info = AgentStepInfo(step_number=1, max_steps=10)
mm._update_agent_history_description(model_output=None, result=action_results, step_info=step_info)
assert len(mm.state.read_state_images) == 2
assert mm.state.read_state_images[0]['name'] == 'img1.png'
assert mm.state.read_state_images[1]['name'] == 'img2.jpg'
def test_agent_message_prompt_includes_images(self, tmp_path: Path):
"""Test that AgentMessagePrompt includes images in message content."""
fs = FileSystem(tmp_path)
# Create browser state
browser_state = BrowserStateSummary(
url='https://example.com',
title='Test',
tabs=[TabInfo(target_id='test-0', url='https://example.com', title='Test')],
screenshot=None,
dom_state=SerializedDOMState(_root=None, selector_map={}),
)
# Create images
read_state_images = [{'name': 'test.png', 'data': 'base64_image_data_here'}]
# Create message prompt
prompt = AgentMessagePrompt(
browser_state_summary=browser_state,
file_system=fs,
read_state_images=read_state_images,
)
# Get user message with vision enabled
user_message = prompt.get_user_message(use_vision=True)
# Verify message has content parts (not just string)
assert isinstance(user_message.content, list)
# Find image content parts
image_parts = [part for part in user_message.content if isinstance(part, ContentPartImageParam)]
text_parts = [part for part in user_message.content if isinstance(part, ContentPartTextParam)]
# Should have at least one image
assert len(image_parts) >= 1
# Should have text label
image_labels = [part.text for part in text_parts if 'test.png' in part.text]
assert len(image_labels) >= 1
# Verify image data URL format
img_part = image_parts[0]
assert 'data:image/' in img_part.image_url.url
assert 'base64,base64_image_data_here' in img_part.image_url.url
def test_agent_message_prompt_png_vs_jpg_media_type(self, tmp_path: Path):
"""Test that AgentMessagePrompt correctly detects PNG vs JPG media types."""
fs = FileSystem(tmp_path)
browser_state = BrowserStateSummary(
url='https://example.com',
title='Test',
tabs=[TabInfo(target_id='test-0', url='https://example.com', title='Test')],
screenshot=None,
dom_state=SerializedDOMState(_root=None, selector_map={}),
)
# Test PNG
read_state_images_png = [{'name': 'test.png', 'data': 'data'}]
prompt_png = AgentMessagePrompt(
browser_state_summary=browser_state,
file_system=fs,
read_state_images=read_state_images_png,
)
message_png = prompt_png.get_user_message(use_vision=True)
image_parts_png = [part for part in message_png.content if isinstance(part, ContentPartImageParam)]
assert 'data:image/png;base64' in image_parts_png[0].image_url.url
# Test JPG
read_state_images_jpg = [{'name': 'photo.jpg', 'data': 'data'}]
prompt_jpg = AgentMessagePrompt(
browser_state_summary=browser_state,
file_system=fs,
read_state_images=read_state_images_jpg,
)
message_jpg = prompt_jpg.get_user_message(use_vision=True)
image_parts_jpg = [part for part in message_jpg.content if isinstance(part, ContentPartImageParam)]
assert 'data:image/jpeg;base64' in image_parts_jpg[0].image_url.url
def test_agent_message_prompt_no_images(self, tmp_path: Path):
"""Test that message works correctly when no images are present."""
fs = FileSystem(tmp_path)
browser_state = BrowserStateSummary(
url='https://example.com',
title='Test',
tabs=[TabInfo(target_id='test-0', url='https://example.com', title='Test')],
screenshot=None,
dom_state=SerializedDOMState(_root=None, selector_map={}),
)
# No images
prompt = AgentMessagePrompt(
browser_state_summary=browser_state,
file_system=fs,
read_state_images=[],
)
# Get user message without vision
user_message = prompt.get_user_message(use_vision=False)
# Should be plain text, not content parts
assert isinstance(user_message.content, str)
def test_agent_message_prompt_empty_base64_skipped(self, tmp_path: Path):
"""Test that images with empty base64 data are skipped."""
fs = FileSystem(tmp_path)
browser_state = BrowserStateSummary(
url='https://example.com',
title='Test',
tabs=[TabInfo(target_id='test-0', url='https://example.com', title='Test')],
screenshot=None,
dom_state=SerializedDOMState(_root=None, selector_map={}),
)
# Image with empty data field
read_state_images = [
{'name': 'empty.png', 'data': ''}, # Empty - should be skipped
{'name': 'valid.png', 'data': 'valid_data'}, # Valid
]
prompt = AgentMessagePrompt(
browser_state_summary=browser_state,
file_system=fs,
read_state_images=read_state_images,
)
user_message = prompt.get_user_message(use_vision=True)
image_parts = [part for part in user_message.content if isinstance(part, ContentPartImageParam)]
# Should only have 1 image (the valid one)
assert len(image_parts) == 1
assert 'valid_data' in image_parts[0].image_url.url
class TestDocxInLLMMessages:
"""Test that DOCX content flows correctly through to LLM messages."""
@pytest.mark.asyncio
async def test_docx_in_extracted_content(self, tmp_path: Path):
"""Test that DOCX text appears in extracted_content."""
fs = FileSystem(tmp_path)
# Create DOCX file
content = """# Title
Some important content here."""
await fs.write_file('test.docx', content)
# Read it
result = await fs.read_file('test.docx')
# Verify content is in the result
assert 'Title' in result
assert 'important content' in result
@pytest.mark.asyncio
async def test_docx_in_message_manager(self, tmp_path: Path):
"""Test that DOCX content appears in message manager state."""
fs = FileSystem(tmp_path)
system_message = SystemMessage(content='Test system message')
mm = MessageManager(task='test', system_message=system_message, file_system=fs)
# Simulate read_file action result
docx_content = """Read from file test.docx.
<content>
Title
Some content here.
</content>"""
action_results = [
ActionResult(
extracted_content=docx_content,
long_term_memory='Read file test.docx',
include_extracted_content_only_once=True,
)
]
step_info = AgentStepInfo(step_number=1, max_steps=10)
mm._update_agent_history_description(model_output=None, result=action_results, step_info=step_info)
# Verify it's in read_state_description
assert 'Title' in mm.state.read_state_description
assert 'Some content' in mm.state.read_state_description
class TestEndToEndIntegration:
"""End-to-end tests for file reading and LLM message creation."""
def create_test_image(self) -> bytes:
"""Create a test image."""
img = Image.new('RGB', (50, 50), color='blue')
buffer = io.BytesIO()
img.save(buffer, format='PNG')
buffer.seek(0)
return buffer.read()
@pytest.mark.asyncio
async def test_image_end_to_end(self, tmp_path: Path):
"""Test complete flow: external image → FileSystem → ActionResult → MessageManager → Prompt."""
# Step 1: Create external image
external_file = tmp_path / 'photo.png'
img_bytes = self.create_test_image()
external_file.write_bytes(img_bytes)
# Step 2: Read via FileSystem
fs = FileSystem(tmp_path / 'workspace')
structured_result = await fs.read_file_structured(str(external_file), external_file=True)
assert structured_result['images'] is not None
# Step 3: Create ActionResult (simulating tools/service.py)
action_result = ActionResult(
extracted_content=structured_result['message'],
long_term_memory='Read image file photo.png',
images=structured_result['images'],
include_extracted_content_only_once=True,
)
# Step 4: Process in MessageManager
system_message = SystemMessage(content='Test system message')
mm = MessageManager(task='test', system_message=system_message, file_system=fs)
step_info = AgentStepInfo(step_number=1, max_steps=10)
mm._update_agent_history_description(model_output=None, result=[action_result], step_info=step_info)
# Verify images stored
assert len(mm.state.read_state_images) == 1
assert mm.state.read_state_images[0]['name'] == 'photo.png'
# Step 5: Create message with AgentMessagePrompt
browser_state = BrowserStateSummary(
url='https://example.com',
title='Test',
tabs=[TabInfo(target_id='test-0', url='https://example.com', title='Test')],
screenshot=None,
dom_state=SerializedDOMState(_root=None, selector_map={}),
)
prompt = AgentMessagePrompt(
browser_state_summary=browser_state,
file_system=fs,
read_state_images=mm.state.read_state_images,
)
user_message = prompt.get_user_message(use_vision=True)
# Verify image is in message
assert isinstance(user_message.content, list)
image_parts = [part for part in user_message.content if isinstance(part, ContentPartImageParam)]
assert len(image_parts) >= 1
# Verify image data is correct
base64_str = base64.b64encode(img_bytes).decode('utf-8')
assert base64_str in image_parts[0].image_url.url
@pytest.mark.asyncio
async def test_docx_end_to_end(self, tmp_path: Path):
"""Test complete flow: DOCX file → FileSystem → ActionResult → MessageManager."""
# Step 1: Create DOCX
fs = FileSystem(tmp_path)
docx_content = """# Important Document
This is critical information."""
await fs.write_file('important.docx', docx_content)
# Step 2: Read it
read_result = await fs.read_file('important.docx')
# Step 3: Create ActionResult (simulating tools/service.py)
action_result = ActionResult(
extracted_content=read_result,
long_term_memory=read_result[:100] if len(read_result) > 100 else read_result,
include_extracted_content_only_once=True,
)
# Step 4: Process in MessageManager
system_message = SystemMessage(content='Test system message')
mm = MessageManager(task='test', system_message=system_message, file_system=fs)
step_info = AgentStepInfo(step_number=1, max_steps=10)
mm._update_agent_history_description(model_output=None, result=[action_result], step_info=step_info)
# Verify content is in read_state
assert 'Important Document' in mm.state.read_state_description
assert 'critical information' in mm.state.read_state_description
if __name__ == '__main__':
pytest.main([__file__, '-v'])
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/ci/test_markdown_extractor.py | tests/ci/test_markdown_extractor.py | """Tests for markdown extractor preprocessing."""
from browser_use.dom.markdown_extractor import _preprocess_markdown_content
class TestPreprocessMarkdownContent:
"""Tests for _preprocess_markdown_content function."""
def test_preserves_short_lines(self):
"""Short lines (1-2 chars) should be preserved, not removed."""
content = '# Items\na\nb\nc\nOK\nNo'
filtered, _ = _preprocess_markdown_content(content)
assert 'a' in filtered.split('\n')
assert 'b' in filtered.split('\n')
assert 'c' in filtered.split('\n')
assert 'OK' in filtered.split('\n')
assert 'No' in filtered.split('\n')
def test_preserves_single_digit_numbers(self):
"""Single digit page numbers should be preserved."""
content = 'Page navigation:\n1\n2\n3\n10'
filtered, _ = _preprocess_markdown_content(content)
lines = filtered.split('\n')
assert '1' in lines
assert '2' in lines
assert '3' in lines
assert '10' in lines
def test_preserves_markdown_list_items(self):
"""Markdown list items with short content should be preserved."""
content = 'Shopping list:\n- a\n- b\n- OK\n- No'
filtered, _ = _preprocess_markdown_content(content)
assert '- a' in filtered
assert '- b' in filtered
assert '- OK' in filtered
assert '- No' in filtered
def test_preserves_state_codes(self):
"""Two-letter state codes should be preserved."""
content = 'States:\nCA\nNY\nTX'
filtered, _ = _preprocess_markdown_content(content)
lines = filtered.split('\n')
assert 'CA' in lines
assert 'NY' in lines
assert 'TX' in lines
def test_removes_empty_lines(self):
"""Empty and whitespace-only lines should be removed."""
content = 'Header\n\n \n\nContent'
filtered, _ = _preprocess_markdown_content(content)
# Should not have empty lines
for line in filtered.split('\n'):
assert line.strip(), f'Found empty line in output: {repr(line)}'
def test_removes_large_json_blobs(self):
"""Large JSON-like lines (>100 chars) should be removed."""
# Create a JSON blob > 100 chars
json_blob = '{"key": "' + 'x' * 100 + '"}'
content = f'Header\n{json_blob}\nFooter'
filtered, _ = _preprocess_markdown_content(content)
assert json_blob not in filtered
assert 'Header' in filtered
assert 'Footer' in filtered
def test_preserves_small_json(self):
"""Small JSON objects (<100 chars) should be preserved."""
small_json = '{"key": "value"}'
content = f'Header\n{small_json}\nFooter'
filtered, _ = _preprocess_markdown_content(content)
assert small_json in filtered
def test_compresses_multiple_newlines(self):
"""4+ consecutive newlines should be compressed to max_newlines."""
content = 'Header\n\n\n\n\nFooter'
filtered, _ = _preprocess_markdown_content(content, max_newlines=2)
# After filtering empty lines, we should have just Header and Footer
lines = [line for line in filtered.split('\n') if line.strip()]
assert lines == ['Header', 'Footer']
def test_returns_chars_filtered_count(self):
"""Should return count of characters removed."""
content = 'Header\n\n\n\n\nFooter'
_, chars_filtered = _preprocess_markdown_content(content)
assert chars_filtered > 0
def test_strips_result(self):
"""Result should be stripped of leading/trailing whitespace."""
content = ' \n\nContent\n\n '
filtered, _ = _preprocess_markdown_content(content)
assert not filtered.startswith(' ')
assert not filtered.startswith('\n')
assert not filtered.endswith(' ')
assert not filtered.endswith('\n')
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/ci/test_tools.py | tests/ci/test_tools.py | import asyncio
import tempfile
import time
import pytest
from pydantic import BaseModel
from pytest_httpserver import HTTPServer
from browser_use.agent.views import ActionResult
from browser_use.browser import BrowserSession
from browser_use.browser.profile import BrowserProfile
from browser_use.filesystem.file_system import FileSystem
from browser_use.tools.service import Tools
@pytest.fixture(scope='session')
def http_server():
"""Create and provide a test HTTP server that serves static content."""
server = HTTPServer()
server.start()
# Add routes for common test pages
server.expect_request('/').respond_with_data(
'<html><head><title>Test Home Page</title></head><body><h1>Test Home Page</h1><p>Welcome to the test site</p></body></html>',
content_type='text/html',
)
server.expect_request('/page1').respond_with_data(
'<html><head><title>Test Page 1</title></head><body><h1>Test Page 1</h1><p>This is test page 1</p></body></html>',
content_type='text/html',
)
server.expect_request('/page2').respond_with_data(
'<html><head><title>Test Page 2</title></head><body><h1>Test Page 2</h1><p>This is test page 2</p></body></html>',
content_type='text/html',
)
server.expect_request('/search').respond_with_data(
"""
<html>
<head><title>Search Results</title></head>
<body>
<h1>Search Results</h1>
<div class="results">
<div class="result">Result 1</div>
<div class="result">Result 2</div>
<div class="result">Result 3</div>
</div>
</body>
</html>
""",
content_type='text/html',
)
yield server
server.stop()
@pytest.fixture(scope='session')
def base_url(http_server):
"""Return the base URL for the test HTTP server."""
return f'http://{http_server.host}:{http_server.port}'
@pytest.fixture(scope='module')
async def browser_session():
"""Create and provide a Browser instance with security disabled."""
browser_session = BrowserSession(
browser_profile=BrowserProfile(
headless=True,
user_data_dir=None,
keep_alive=True,
)
)
await browser_session.start()
yield browser_session
await browser_session.kill()
@pytest.fixture(scope='function')
def tools():
"""Create and provide a Tools instance."""
return Tools()
class TestToolsIntegration:
"""Integration tests for Tools using actual browser instances."""
async def test_registry_actions(self, tools, browser_session):
"""Test that the registry contains the expected default actions."""
# Check that common actions are registered
common_actions = [
'navigate',
'search',
'click',
'input',
'scroll',
'go_back',
'switch',
'close',
'wait',
]
for action in common_actions:
assert action in tools.registry.registry.actions
assert tools.registry.registry.actions[action].function is not None
assert tools.registry.registry.actions[action].description is not None
async def test_custom_action_registration(self, tools, browser_session, base_url):
"""Test registering a custom action and executing it."""
# Define a custom action
class CustomParams(BaseModel):
text: str
@tools.action('Test custom action', param_model=CustomParams)
async def custom_action(params: CustomParams, browser_session):
current_url = await browser_session.get_current_page_url()
return ActionResult(extracted_content=f'Custom action executed with: {params.text} on {current_url}')
# Navigate to a page first
await tools.navigate(url=f'{base_url}/page1', new_tab=False, browser_session=browser_session)
# Execute the custom action directly
result = await tools.custom_action(text='test_value', browser_session=browser_session)
# Verify the result
assert isinstance(result, ActionResult)
assert result.extracted_content is not None
assert 'Custom action executed with: test_value on' in result.extracted_content
assert f'{base_url}/page1' in result.extracted_content
async def test_wait_action(self, tools, browser_session):
"""Test that the wait action correctly waits for the specified duration."""
# verify that it's in the default action set
wait_action = None
for action_name, action in tools.registry.registry.actions.items():
if 'wait' in action_name.lower() and 'seconds' in str(action.param_model.model_fields):
wait_action = action
break
assert wait_action is not None, 'Could not find wait action in tools'
# Check that it has seconds parameter with default
assert 'seconds' in wait_action.param_model.model_fields
schema = wait_action.param_model.model_json_schema()
assert schema['properties']['seconds']['default'] == 3
# Record start time
start_time = time.time()
# Execute wait action
result = await tools.wait(seconds=3, browser_session=browser_session)
# Record end time
end_time = time.time()
# Verify the result
assert isinstance(result, ActionResult)
assert result.extracted_content is not None
assert 'Waited for' in result.extracted_content or 'Waiting for' in result.extracted_content
# Verify that approximately 1 second has passed (allowing some margin)
assert end_time - start_time <= 2.5 # We wait 3-1 seconds for LLM call
# longer wait
# Record start time
start_time = time.time()
# Execute wait action
result = await tools.wait(seconds=5, browser_session=browser_session)
# Record end time
end_time = time.time()
# Verify the result
assert isinstance(result, ActionResult)
assert result.extracted_content is not None
assert 'Waited for' in result.extracted_content or 'Waiting for' in result.extracted_content
assert 3.5 <= end_time - start_time <= 4.5 # We wait 5-1 seconds for LLM call
async def test_go_back_action(self, tools, browser_session, base_url):
"""Test that go_back action navigates to the previous page."""
# Navigate to first page
await tools.navigate(url=f'{base_url}/page1', new_tab=False, browser_session=browser_session)
# Store the first page URL
first_url = await browser_session.get_current_page_url()
print(f'First page URL: {first_url}')
# Navigate to second page
await tools.navigate(url=f'{base_url}/page2', new_tab=False, browser_session=browser_session)
# Verify we're on the second page
second_url = await browser_session.get_current_page_url()
print(f'Second page URL: {second_url}')
assert f'{base_url}/page2' in second_url
# Execute go back action
result = await tools.go_back(browser_session=browser_session)
# Verify the result
assert isinstance(result, ActionResult)
assert result.extracted_content is not None
assert 'Navigated back' in result.extracted_content
# Add another delay to allow the navigation to complete
await asyncio.sleep(1)
# Verify we're back on a different page than before
final_url = await browser_session.get_current_page_url()
print(f'Final page URL after going back: {final_url}')
# Try to verify we're back on the first page, but don't fail the test if not
assert f'{base_url}/page1' in final_url, f'Expected to return to page1 but got {final_url}'
async def test_navigation_chain(self, tools, browser_session, base_url):
"""Test navigating through multiple pages and back through history."""
# Set up a chain of navigation: Home -> Page1 -> Page2
urls = [f'{base_url}/', f'{base_url}/page1', f'{base_url}/page2']
# Navigate to each page in sequence
for url in urls:
await tools.navigate(url=url, new_tab=False, browser_session=browser_session)
# Verify current page
current_url = await browser_session.get_current_page_url()
assert url in current_url
# Go back twice and verify each step
for expected_url in reversed(urls[:-1]):
await tools.go_back(browser_session=browser_session)
await asyncio.sleep(1) # Wait for navigation to complete
current_url = await browser_session.get_current_page_url()
assert expected_url in current_url
async def test_excluded_actions(self, browser_session):
"""Test that excluded actions are not registered."""
# Create tools with excluded actions
excluded_tools = Tools(exclude_actions=['search', 'scroll'])
# Verify excluded actions are not in the registry
assert 'search' not in excluded_tools.registry.registry.actions
assert 'scroll' not in excluded_tools.registry.registry.actions
# But other actions are still there
assert 'navigate' in excluded_tools.registry.registry.actions
assert 'click' in excluded_tools.registry.registry.actions
async def test_search_action(self, tools, browser_session, base_url):
"""Test the search action."""
await browser_session.get_current_page_url()
# Execute search action - it will actually navigate to our search results page
result = await tools.search(query='Python web automation', browser_session=browser_session)
# Verify the result
assert isinstance(result, ActionResult)
assert result.extracted_content is not None
assert 'Searched' in result.extracted_content and 'Python web automation' in result.extracted_content
# For our test purposes, we just verify we're on some URL
current_url = await browser_session.get_current_page_url()
assert current_url is not None and 'Python' in current_url
async def test_done_action(self, tools, browser_session, base_url):
"""Test that DoneAction completes a task and reports success or failure."""
# Create a temporary directory for the file system
with tempfile.TemporaryDirectory() as temp_dir:
file_system = FileSystem(temp_dir)
# First navigate to a page
await tools.navigate(url=f'{base_url}/page1', new_tab=False, browser_session=browser_session)
success_done_message = 'Successfully completed task'
# Execute done action with file_system
result = await tools.done(
text=success_done_message, success=True, browser_session=browser_session, file_system=file_system
)
# Verify the result
assert isinstance(result, ActionResult)
assert result.extracted_content is not None
assert success_done_message in result.extracted_content
assert result.success is True
assert result.is_done is True
assert result.error is None
failed_done_message = 'Failed to complete task'
# Execute failed done action with file_system
result = await tools.done(
text=failed_done_message, success=False, browser_session=browser_session, file_system=file_system
)
# Verify the result
assert isinstance(result, ActionResult)
assert result.extracted_content is not None
assert failed_done_message in result.extracted_content
assert result.success is False
assert result.is_done is True
assert result.error is None
async def test_get_dropdown_options(self, tools, browser_session, base_url, http_server):
"""Test that get_dropdown_options correctly retrieves options from a dropdown."""
# Add route for dropdown test page
http_server.expect_request('/dropdown1').respond_with_data(
"""
<!DOCTYPE html>
<html>
<head>
<title>Dropdown Test</title>
</head>
<body>
<h1>Dropdown Test</h1>
<select id="test-dropdown" name="test-dropdown">
<option value="">Please select</option>
<option value="option1">First Option</option>
<option value="option2">Second Option</option>
<option value="option3">Third Option</option>
</select>
</body>
</html>
""",
content_type='text/html',
)
# Navigate to the dropdown test page
await tools.navigate(url=f'{base_url}/dropdown1', new_tab=False, browser_session=browser_session)
# Wait for the page to load using CDP
cdp_session = await browser_session.get_or_create_cdp_session()
assert cdp_session is not None, 'CDP session not initialized'
# Wait for page load by checking document ready state
await asyncio.sleep(0.5) # Brief wait for navigation to start
ready_state = await cdp_session.cdp_client.send.Runtime.evaluate(
params={'expression': 'document.readyState'}, session_id=cdp_session.session_id
)
# If not complete, wait a bit more
if ready_state.get('result', {}).get('value') != 'complete':
await asyncio.sleep(1.0)
# Initialize the DOM state to populate the selector map
await browser_session.get_browser_state_summary()
# Get the selector map
selector_map = await browser_session.get_selector_map()
# Find the dropdown element in the selector map
dropdown_index = None
for idx, element in selector_map.items():
if element.tag_name.lower() == 'select':
dropdown_index = idx
break
assert dropdown_index is not None, (
f'Could not find select element in selector map. Available elements: {[f"{idx}: {element.tag_name}" for idx, element in selector_map.items()]}'
)
# Execute the action with the dropdown index
result = await tools.dropdown_options(index=dropdown_index, browser_session=browser_session)
expected_options = [
{'index': 0, 'text': 'Please select', 'value': ''},
{'index': 1, 'text': 'First Option', 'value': 'option1'},
{'index': 2, 'text': 'Second Option', 'value': 'option2'},
{'index': 3, 'text': 'Third Option', 'value': 'option3'},
]
# Verify the result structure
assert isinstance(result, ActionResult)
# Core logic validation: Verify all options are returned
assert result.extracted_content is not None
for option in expected_options[1:]: # Skip the placeholder option
assert option['text'] in result.extracted_content, f"Option '{option['text']}' not found in result content"
# Verify the instruction for using the text in select_dropdown is included
assert 'Use the exact text or value string' in result.extracted_content and 'select_dropdown' in result.extracted_content
# Verify the actual dropdown options in the DOM using CDP
dropdown_options_result = await cdp_session.cdp_client.send.Runtime.evaluate(
params={
'expression': """
JSON.stringify((() => {
const select = document.getElementById('test-dropdown');
return Array.from(select.options).map(opt => ({
text: opt.text,
value: opt.value
}));
})())
""",
'returnByValue': True,
},
session_id=cdp_session.session_id,
)
dropdown_options_json = dropdown_options_result.get('result', {}).get('value', '[]')
import json
dropdown_options = json.loads(dropdown_options_json) if isinstance(dropdown_options_json, str) else dropdown_options_json
# Verify the dropdown has the expected options
assert len(dropdown_options) == len(expected_options), (
f'Expected {len(expected_options)} options, got {len(dropdown_options)}'
)
for i, expected in enumerate(expected_options):
actual = dropdown_options[i]
assert actual['text'] == expected['text'], (
f"Option at index {i} has wrong text: expected '{expected['text']}', got '{actual['text']}'"
)
assert actual['value'] == expected['value'], (
f"Option at index {i} has wrong value: expected '{expected['value']}', got '{actual['value']}'"
)
async def test_select_dropdown_option(self, tools, browser_session, base_url, http_server):
"""Test that select_dropdown_option correctly selects an option from a dropdown."""
# Add route for dropdown test page
http_server.expect_request('/dropdown2').respond_with_data(
"""
<!DOCTYPE html>
<html>
<head>
<title>Dropdown Test</title>
</head>
<body>
<h1>Dropdown Test</h1>
<select id="test-dropdown" name="test-dropdown">
<option value="">Please select</option>
<option value="option1">First Option</option>
<option value="option2">Second Option</option>
<option value="option3">Third Option</option>
</select>
</body>
</html>
""",
content_type='text/html',
)
# Navigate to the dropdown test page
await tools.navigate(url=f'{base_url}/dropdown2', new_tab=False, browser_session=browser_session)
# Wait for the page to load using CDP
cdp_session = await browser_session.get_or_create_cdp_session()
assert cdp_session is not None, 'CDP session not initialized'
# Wait for page load by checking document ready state
await asyncio.sleep(0.5) # Brief wait for navigation to start
ready_state = await cdp_session.cdp_client.send.Runtime.evaluate(
params={'expression': 'document.readyState'}, session_id=cdp_session.session_id
)
# If not complete, wait a bit more
if ready_state.get('result', {}).get('value') != 'complete':
await asyncio.sleep(1.0)
# populate the selector map with highlight indices
await browser_session.get_browser_state_summary()
# Now get the selector map which should contain our dropdown
selector_map = await browser_session.get_selector_map()
# Find the dropdown element in the selector map
dropdown_index = None
for idx, element in selector_map.items():
if element.tag_name.lower() == 'select':
dropdown_index = idx
break
assert dropdown_index is not None, (
f'Could not find select element in selector map. Available elements: {[f"{idx}: {element.tag_name}" for idx, element in selector_map.items()]}'
)
# Execute the action with the dropdown index
result = await tools.select_dropdown(index=dropdown_index, text='Second Option', browser_session=browser_session)
# Verify the result structure
assert isinstance(result, ActionResult)
# Core logic validation: Verify selection was successful
assert result.extracted_content is not None
assert 'selected option' in result.extracted_content.lower()
assert 'Second Option' in result.extracted_content
# Verify the actual dropdown selection was made by checking the DOM using CDP
selected_value_result = await cdp_session.cdp_client.send.Runtime.evaluate(
params={'expression': "document.getElementById('test-dropdown').value"}, session_id=cdp_session.session_id
)
selected_value = selected_value_result.get('result', {}).get('value')
assert selected_value == 'option2' # Second Option has value "option2"
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/ci/test_extension_config.py | tests/ci/test_extension_config.py | """Tests for extension configuration environment variables."""
import os
import pytest
class TestDisableExtensionsEnvVar:
"""Test BROWSER_USE_DISABLE_EXTENSIONS environment variable."""
def test_default_value_is_true(self):
"""Without env var set, enable_default_extensions should default to True."""
# Clear the env var if it exists
original = os.environ.pop('BROWSER_USE_DISABLE_EXTENSIONS', None)
try:
# Import fresh to get the default
from browser_use.browser.profile import _get_enable_default_extensions_default
assert _get_enable_default_extensions_default() is True
finally:
if original is not None:
os.environ['BROWSER_USE_DISABLE_EXTENSIONS'] = original
@pytest.mark.parametrize(
'env_value,expected_enabled',
[
# Truthy values for DISABLE = extensions disabled (False)
('true', False),
('True', False),
('TRUE', False),
('1', False),
('yes', False),
('on', False),
# Falsy values for DISABLE = extensions enabled (True)
('false', True),
('False', True),
('FALSE', True),
('0', True),
('no', True),
('off', True),
('', True),
],
)
def test_env_var_values(self, env_value: str, expected_enabled: bool):
"""Test various env var values are parsed correctly."""
original = os.environ.get('BROWSER_USE_DISABLE_EXTENSIONS')
try:
os.environ['BROWSER_USE_DISABLE_EXTENSIONS'] = env_value
from browser_use.browser.profile import _get_enable_default_extensions_default
result = _get_enable_default_extensions_default()
assert result is expected_enabled, (
f"Expected enable_default_extensions={expected_enabled} for DISABLE_EXTENSIONS='{env_value}', got {result}"
)
finally:
if original is not None:
os.environ['BROWSER_USE_DISABLE_EXTENSIONS'] = original
else:
os.environ.pop('BROWSER_USE_DISABLE_EXTENSIONS', None)
def test_browser_profile_uses_env_var(self):
"""Test that BrowserProfile picks up the env var."""
original = os.environ.get('BROWSER_USE_DISABLE_EXTENSIONS')
try:
# Test with env var set to true (disable extensions)
os.environ['BROWSER_USE_DISABLE_EXTENSIONS'] = 'true'
from browser_use.browser.profile import BrowserProfile
profile = BrowserProfile(headless=True)
assert profile.enable_default_extensions is False, (
'BrowserProfile should disable extensions when BROWSER_USE_DISABLE_EXTENSIONS=true'
)
# Test with env var set to false (enable extensions)
os.environ['BROWSER_USE_DISABLE_EXTENSIONS'] = 'false'
profile2 = BrowserProfile(headless=True)
assert profile2.enable_default_extensions is True, (
'BrowserProfile should enable extensions when BROWSER_USE_DISABLE_EXTENSIONS=false'
)
finally:
if original is not None:
os.environ['BROWSER_USE_DISABLE_EXTENSIONS'] = original
else:
os.environ.pop('BROWSER_USE_DISABLE_EXTENSIONS', None)
def test_explicit_param_overrides_env_var(self):
"""Test that explicit enable_default_extensions parameter overrides env var."""
original = os.environ.get('BROWSER_USE_DISABLE_EXTENSIONS')
try:
os.environ['BROWSER_USE_DISABLE_EXTENSIONS'] = 'true'
from browser_use.browser.profile import BrowserProfile
# Explicitly set to True should override env var
profile = BrowserProfile(headless=True, enable_default_extensions=True)
assert profile.enable_default_extensions is True, 'Explicit param should override env var'
finally:
if original is not None:
os.environ['BROWSER_USE_DISABLE_EXTENSIONS'] = original
else:
os.environ.pop('BROWSER_USE_DISABLE_EXTENSIONS', None)
def test_browser_session_uses_env_var(self):
"""Test that BrowserSession picks up the env var via BrowserProfile."""
original = os.environ.get('BROWSER_USE_DISABLE_EXTENSIONS')
try:
os.environ['BROWSER_USE_DISABLE_EXTENSIONS'] = '1'
from browser_use.browser import BrowserSession
session = BrowserSession(headless=True)
assert session.browser_profile.enable_default_extensions is False, (
'BrowserSession should disable extensions when BROWSER_USE_DISABLE_EXTENSIONS=1'
)
finally:
if original is not None:
os.environ['BROWSER_USE_DISABLE_EXTENSIONS'] = original
else:
os.environ.pop('BROWSER_USE_DISABLE_EXTENSIONS', None)
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/ci/test_file_system_docx.py | tests/ci/test_file_system_docx.py | """Tests for DOCX file support in the FileSystem."""
from pathlib import Path
import pytest
from browser_use.filesystem.file_system import (
DocxFile,
FileSystem,
)
class TestDocxFile:
"""Test DOCX file operations."""
@pytest.mark.asyncio
async def test_create_docx_file(self, tmp_path: Path):
"""Test creating a DOCX file."""
fs = FileSystem(tmp_path)
content = """# Heading 1
## Heading 2
### Heading 3
Regular paragraph text.
Another paragraph."""
result = await fs.write_file('test.docx', content)
assert 'successfully' in result.lower()
assert 'test.docx' in fs.list_files()
@pytest.mark.asyncio
async def test_read_docx_file_internal(self, tmp_path: Path):
"""Test reading internal DOCX file."""
fs = FileSystem(tmp_path)
content = """# Title
Some content here."""
await fs.write_file('test.docx', content)
result = await fs.read_file('test.docx')
assert 'test.docx' in result
assert 'Title' in result or 'content' in result
@pytest.mark.asyncio
async def test_read_docx_file_external(self, tmp_path: Path):
"""Test reading external DOCX file."""
from docx import Document
# Create an external DOCX file
external_file = tmp_path / 'external.docx'
doc = Document()
doc.add_heading('Test Heading', level=1)
doc.add_paragraph('Test paragraph content.')
doc.save(str(external_file))
fs = FileSystem(tmp_path / 'workspace')
structured_result = await fs.read_file_structured(str(external_file), external_file=True)
assert 'message' in structured_result
assert 'Test Heading' in structured_result['message']
assert 'Test paragraph content' in structured_result['message']
def test_docx_file_extension(self):
"""Test DOCX file extension property."""
docx_file = DocxFile(name='test')
assert docx_file.extension == 'docx'
assert docx_file.full_name == 'test.docx'
@pytest.mark.asyncio
async def test_docx_with_unicode_characters(self, tmp_path: Path):
"""Test DOCX with unicode and emoji content."""
fs = FileSystem(tmp_path)
content = """# Unicode Test 🚀
Chinese: 你好世界
Arabic: مرحبا بالعالم
Emoji: 😀 👍 🎉"""
result = await fs.write_file('unicode.docx', content)
assert 'successfully' in result.lower()
read_result = await fs.read_file('unicode.docx')
assert 'Unicode Test' in read_result
# Note: Emoji may not be preserved in all systems
@pytest.mark.asyncio
async def test_empty_docx_file(self, tmp_path: Path):
"""Test creating an empty DOCX file."""
fs = FileSystem(tmp_path)
result = await fs.write_file('empty.docx', '')
assert 'successfully' in result.lower()
@pytest.mark.asyncio
async def test_large_docx_file(self, tmp_path: Path):
"""Test creating a large DOCX file."""
fs = FileSystem(tmp_path)
# Create content with 1000 lines
lines = [f'Line {i}: This is a test line with some content.' for i in range(1000)]
content = '\n'.join(lines)
result = await fs.write_file('large.docx', content)
assert 'successfully' in result.lower()
# Verify it can be read back
read_result = await fs.read_file('large.docx')
assert 'Line 0:' in read_result
assert 'Line 999:' in read_result
@pytest.mark.asyncio
async def test_corrupted_docx_file(self, tmp_path: Path):
"""Test reading a corrupted DOCX file."""
# Create a corrupted DOCX file
external_file = tmp_path / 'corrupted.docx'
external_file.write_bytes(b'This is not a valid DOCX file')
fs = FileSystem(tmp_path / 'workspace')
structured_result = await fs.read_file_structured(str(external_file), external_file=True)
assert 'message' in structured_result
assert 'error' in structured_result['message'].lower() or 'could not' in structured_result['message'].lower()
@pytest.mark.asyncio
async def test_docx_with_multiple_paragraphs(self, tmp_path: Path):
"""Test DOCX with various paragraph styles."""
fs = FileSystem(tmp_path)
content = """# Main Title
## Subtitle
This is a regular paragraph.
This is another paragraph with some text.
### Section 3
Final paragraph here."""
await fs.write_file('multi.docx', content)
result = await fs.read_file('multi.docx')
# Should contain all the text (headings converted to paragraphs)
assert 'Main Title' in result
assert 'Subtitle' in result
assert 'regular paragraph' in result
assert 'Final paragraph' in result
class TestFileSystemDocxIntegration:
"""Integration tests for DOCX file type."""
@pytest.mark.asyncio
async def test_multiple_file_types_with_docx(self, tmp_path: Path):
"""Test working with DOCX alongside other file types."""
fs = FileSystem(tmp_path)
# Create different file types
await fs.write_file('doc.docx', '# Document\nContent here')
await fs.write_file('data.json', '{"key": "value"}')
await fs.write_file('notes.txt', 'Some notes')
# Verify all files exist
files = fs.list_files()
assert 'doc.docx' in files
assert 'data.json' in files
assert 'notes.txt' in files
assert 'todo.md' in files # Default file
@pytest.mark.asyncio
async def test_file_system_state_with_docx(self, tmp_path: Path):
"""Test FileSystem state serialization with DOCX files."""
fs = FileSystem(tmp_path)
# Create files
await fs.write_file('test.docx', '# Title\nContent')
await fs.write_file('data.txt', 'Some text')
# Get state
state = fs.get_state()
assert 'test.docx' in state.files
assert 'data.txt' in state.files
# Restore from state
fs2 = FileSystem.from_state(state)
assert 'test.docx' in fs2.list_files()
assert 'data.txt' in fs2.list_files()
def test_allowed_extensions_include_docx(self, tmp_path: Path):
"""Test that DOCX is in allowed extensions."""
fs = FileSystem(tmp_path)
allowed = fs.get_allowed_extensions()
assert 'docx' in allowed
if __name__ == '__main__':
pytest.main([__file__, '-v'])
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/ci/test_sandbox_structured_output.py | tests/ci/test_sandbox_structured_output.py | """
Tests for sandbox structured output handling.
Tests that output_model_schema works correctly when using @sandbox decorator,
specifically that the _output_model_schema private attribute is preserved
through serialization/deserialization.
"""
from pydantic import BaseModel
from browser_use.agent.views import ActionResult, AgentHistory, AgentHistoryList, BrowserStateHistory
from browser_use.sandbox.sandbox import _parse_with_type_annotation
class ExtractedData(BaseModel):
"""Example structured output model"""
title: str
price: float
in_stock: bool
class NestedModel(BaseModel):
"""Nested model for testing complex structures"""
items: list[ExtractedData]
total_count: int
class TestGetStructuredOutput:
"""Tests for AgentHistoryList.get_structured_output method"""
def test_get_structured_output_parses_final_result(self):
"""Test that get_structured_output correctly parses final result with provided schema"""
# Create history with structured JSON as final result
json_result = '{"title": "Test Product", "price": 29.99, "in_stock": true}'
history = AgentHistoryList(
history=[
AgentHistory(
model_output=None,
result=[ActionResult(extracted_content=json_result, is_done=True)],
state=BrowserStateHistory(url='https://example.com', title='Test', tabs=[], interacted_element=[]),
)
]
)
# Use get_structured_output with explicit schema
result = history.get_structured_output(ExtractedData)
assert result is not None
assert isinstance(result, ExtractedData)
assert result.title == 'Test Product'
assert result.price == 29.99
assert result.in_stock is True
def test_get_structured_output_returns_none_when_no_final_result(self):
"""Test that get_structured_output returns None when there's no final result"""
history = AgentHistoryList(
history=[
AgentHistory(
model_output=None,
result=[ActionResult(extracted_content=None)],
state=BrowserStateHistory(url='https://example.com', title='Test', tabs=[], interacted_element=[]),
)
]
)
result = history.get_structured_output(ExtractedData)
assert result is None
def test_get_structured_output_with_nested_model(self):
"""Test get_structured_output works with nested Pydantic models"""
json_result = """
{
"items": [
{"title": "Item 1", "price": 10.0, "in_stock": true},
{"title": "Item 2", "price": 20.0, "in_stock": false}
],
"total_count": 2
}
"""
history = AgentHistoryList(
history=[
AgentHistory(
model_output=None,
result=[ActionResult(extracted_content=json_result, is_done=True)],
state=BrowserStateHistory(url='https://example.com', title='Test', tabs=[], interacted_element=[]),
)
]
)
result = history.get_structured_output(NestedModel)
assert result is not None
assert len(result.items) == 2
assert result.items[0].title == 'Item 1'
assert result.total_count == 2
class TestSandboxStructuredOutputParsing:
"""Tests for _parse_with_type_annotation handling of AgentHistoryList[T]"""
def test_parse_agent_history_list_without_generic(self):
"""Test parsing AgentHistoryList without generic parameter"""
data = {
'history': [
{
'model_output': None,
'result': [{'extracted_content': '{"title": "Test", "price": 9.99, "in_stock": true}', 'is_done': True}],
'state': {'url': 'https://example.com', 'title': 'Test', 'tabs': []},
}
]
}
result = _parse_with_type_annotation(data, AgentHistoryList)
assert isinstance(result, AgentHistoryList)
assert len(result.history) == 1
# Without generic, _output_model_schema should be None
assert result._output_model_schema is None
def test_parse_agent_history_list_with_generic_parameter(self):
"""Test parsing AgentHistoryList[ExtractedData] preserves output model schema"""
data = {
'history': [
{
'model_output': None,
'result': [{'extracted_content': '{"title": "Test", "price": 9.99, "in_stock": true}', 'is_done': True}],
'state': {'url': 'https://example.com', 'title': 'Test', 'tabs': []},
}
]
}
# Parse with generic type annotation
result = _parse_with_type_annotation(data, AgentHistoryList[ExtractedData])
assert isinstance(result, AgentHistoryList)
assert len(result.history) == 1
# With generic, _output_model_schema should be set
assert result._output_model_schema is ExtractedData
# Now structured_output property should work
structured = result.structured_output
assert structured is not None
assert isinstance(structured, ExtractedData)
assert structured.title == 'Test'
assert structured.price == 9.99
assert structured.in_stock is True
def test_parse_agent_history_list_structured_output_after_sandbox(self):
"""Simulate full sandbox round-trip with AgentHistoryList[T]"""
# This simulates what happens when sandbox returns data
json_content = '{"title": "Product", "price": 49.99, "in_stock": false}'
data = {
'history': [
{
'model_output': None,
'result': [{'extracted_content': json_content, 'is_done': True}],
'state': {'url': 'https://shop.com', 'title': 'Shop', 'tabs': []},
}
]
}
# Sandbox parses with return type annotation AgentHistoryList[ExtractedData]
result = _parse_with_type_annotation(data, AgentHistoryList[ExtractedData])
# User accesses structured_output property
output = result.structured_output
assert output is not None
assert output.title == 'Product'
assert output.price == 49.99
assert output.in_stock is False
class TestStructuredOutputPropertyFallback:
"""Tests for structured_output property behavior with and without _output_model_schema"""
def test_structured_output_property_works_when_schema_set(self):
"""Test structured_output property works when _output_model_schema is set"""
json_result = '{"title": "Test", "price": 5.0, "in_stock": true}'
history = AgentHistoryList(
history=[
AgentHistory(
model_output=None,
result=[ActionResult(extracted_content=json_result, is_done=True)],
state=BrowserStateHistory(url='https://example.com', title='Test', tabs=[], interacted_element=[]),
)
]
)
# Manually set the schema (as Agent.run() does)
history._output_model_schema = ExtractedData
result = history.structured_output
assert result is not None
assert isinstance(result, ExtractedData)
assert result.title == 'Test'
def test_structured_output_property_returns_none_without_schema(self):
"""Test structured_output property returns None when _output_model_schema is not set"""
json_result = '{"title": "Test", "price": 5.0, "in_stock": true}'
history = AgentHistoryList(
history=[
AgentHistory(
model_output=None,
result=[ActionResult(extracted_content=json_result, is_done=True)],
state=BrowserStateHistory(url='https://example.com', title='Test', tabs=[], interacted_element=[]),
)
]
)
# Don't set _output_model_schema
result = history.structured_output
# Property returns None because schema is not set
assert result is None
# But get_structured_output with explicit schema works
explicit_result = history.get_structured_output(ExtractedData)
assert explicit_result is not None
assert explicit_result.title == 'Test'
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/ci/test_ai_step.py | tests/ci/test_ai_step.py | """Tests for AI step private method used during rerun"""
from unittest.mock import AsyncMock
from browser_use.agent.service import Agent
from browser_use.agent.views import ActionResult
from tests.ci.conftest import create_mock_llm
async def test_execute_ai_step_basic():
"""Test that _execute_ai_step extracts content with AI"""
# Create mock LLM that returns text response
async def custom_ainvoke(*args, **kwargs):
from browser_use.llm.views import ChatInvokeCompletion
return ChatInvokeCompletion(completion='Extracted: Test content from page', usage=None)
mock_llm = AsyncMock()
mock_llm.ainvoke.side_effect = custom_ainvoke
mock_llm.model = 'mock-model'
llm = create_mock_llm(actions=None)
agent = Agent(task='Test task', llm=llm)
await agent.browser_session.start()
try:
# Execute _execute_ai_step with mock LLM
result = await agent._execute_ai_step(
query='Extract the main heading',
include_screenshot=False,
extract_links=False,
ai_step_llm=mock_llm,
)
# Verify result
assert isinstance(result, ActionResult)
assert result.extracted_content is not None
assert 'Extracted: Test content from page' in result.extracted_content
assert result.long_term_memory is not None
finally:
await agent.close()
async def test_execute_ai_step_with_screenshot():
"""Test that _execute_ai_step includes screenshot when requested"""
# Create mock LLM
async def custom_ainvoke(*args, **kwargs):
from browser_use.llm.views import ChatInvokeCompletion
# Verify that we received a message with image content
messages = args[0] if args else []
assert len(messages) >= 1, 'Should have at least one message'
# Check if any message has image content
has_image = False
for msg in messages:
if hasattr(msg, 'content') and isinstance(msg.content, list):
for part in msg.content:
if hasattr(part, 'type') and part.type == 'image_url':
has_image = True
break
assert has_image, 'Should include screenshot in message'
return ChatInvokeCompletion(completion='Extracted content with screenshot analysis', usage=None)
mock_llm = AsyncMock()
mock_llm.ainvoke.side_effect = custom_ainvoke
mock_llm.model = 'mock-model'
llm = create_mock_llm(actions=None)
agent = Agent(task='Test task', llm=llm)
await agent.browser_session.start()
try:
# Execute _execute_ai_step with screenshot
result = await agent._execute_ai_step(
query='Analyze this page',
include_screenshot=True,
extract_links=False,
ai_step_llm=mock_llm,
)
# Verify result
assert isinstance(result, ActionResult)
assert result.extracted_content is not None
assert 'Extracted content with screenshot analysis' in result.extracted_content
finally:
await agent.close()
async def test_execute_ai_step_error_handling():
"""Test that _execute_ai_step handles errors gracefully"""
# Create mock LLM that raises an error
mock_llm = AsyncMock()
mock_llm.ainvoke.side_effect = Exception('LLM service unavailable')
mock_llm.model = 'mock-model'
llm = create_mock_llm(actions=None)
agent = Agent(task='Test task', llm=llm)
await agent.browser_session.start()
try:
# Execute _execute_ai_step - should return ActionResult with error
result = await agent._execute_ai_step(
query='Extract data',
include_screenshot=False,
ai_step_llm=mock_llm,
)
# Verify error is in result (not raised)
assert isinstance(result, ActionResult)
assert result.error is not None
assert 'AI step failed' in result.error
finally:
await agent.close()
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/ci/test_history_wait_time.py | tests/ci/test_history_wait_time.py | from browser_use.agent.views import StepMetadata
def test_step_metadata_has_step_interval_field():
"""Test that StepMetadata includes step_interval field"""
metadata = StepMetadata(step_number=1, step_start_time=10.0, step_end_time=12.5, step_interval=2.5)
assert hasattr(metadata, 'step_interval')
assert metadata.step_interval == 2.5
def test_step_metadata_step_interval_optional():
"""Test that step_interval is optional (None for first step)"""
# Explicitly None
metadata_none = StepMetadata(step_number=0, step_start_time=0.0, step_end_time=1.0, step_interval=None)
assert metadata_none.step_interval is None
# Omitted (defaults to None)
metadata_default = StepMetadata(step_number=0, step_start_time=0.0, step_end_time=1.0)
assert metadata_default.step_interval is None
def test_step_interval_calculation():
"""Test step_interval calculation logic (uses previous step's duration)"""
# Previous step (Step 1): runs from 100.0 to 102.5 (duration: 2.5s)
previous_start = 100.0
previous_end = 102.5
previous_duration = previous_end - previous_start
# Current step (Step 2): should have step_interval = previous step's duration
# This tells the rerun system "wait 2.5s before executing Step 2"
expected_step_interval = previous_duration
calculated_step_interval = max(0, previous_end - previous_start)
assert abs(calculated_step_interval - expected_step_interval) < 0.001 # Float comparison
assert calculated_step_interval == 2.5
def test_step_metadata_serialization_with_step_interval():
"""Test that step_interval is included in metadata serialization"""
# With step_interval
metadata_with_wait = StepMetadata(step_number=1, step_start_time=10.0, step_end_time=12.5, step_interval=2.5)
data = metadata_with_wait.model_dump()
assert 'step_interval' in data
assert data['step_interval'] == 2.5
# Without step_interval (None)
metadata_without_wait = StepMetadata(step_number=0, step_start_time=0.0, step_end_time=1.0, step_interval=None)
data = metadata_without_wait.model_dump()
assert 'step_interval' in data
assert data['step_interval'] is None
def test_step_metadata_deserialization_with_step_interval():
"""Test that step_interval can be loaded from dict"""
# Load with step_interval
data_with_wait = {'step_number': 1, 'step_start_time': 10.0, 'step_end_time': 12.5, 'step_interval': 2.5}
metadata = StepMetadata.model_validate(data_with_wait)
assert metadata.step_interval == 2.5
# Load without step_interval (old format)
data_without_wait = {
'step_number': 0,
'step_start_time': 0.0,
'step_end_time': 1.0,
# step_interval is missing
}
metadata = StepMetadata.model_validate(data_without_wait)
assert metadata.step_interval is None # Defaults to None
def test_step_interval_backwards_compatibility():
"""Test that old metadata without step_interval still works"""
# Simulate old format from JSON
old_metadata_dict = {
'step_number': 0,
'step_start_time': 1000.0,
'step_end_time': 1002.5,
# step_interval field doesn't exist (old format)
}
# Should load successfully with step_interval defaulting to None
metadata = StepMetadata.model_validate(old_metadata_dict)
assert metadata.step_number == 0
assert metadata.step_start_time == 1000.0
assert metadata.step_end_time == 1002.5
assert metadata.step_interval is None # Default value
def test_duration_seconds_property_still_works():
"""Test that existing duration_seconds property still works"""
metadata = StepMetadata(step_number=1, step_start_time=10.0, step_end_time=13.5, step_interval=2.0)
# duration_seconds should be 3.5 (13.5 - 10.0)
assert metadata.duration_seconds == 3.5
# step_interval is separate from duration
assert metadata.step_interval == 2.0
def test_step_metadata_json_round_trip():
"""Test that step_interval survives JSON serialization round-trip"""
metadata = StepMetadata(step_number=1, step_start_time=100.0, step_end_time=102.5, step_interval=1.5)
# Serialize to JSON
json_str = metadata.model_dump_json()
# Deserialize from JSON
loaded = StepMetadata.model_validate_json(json_str)
assert loaded.step_interval == 1.5
assert loaded.step_number == 1
assert loaded.step_start_time == 100.0
assert loaded.step_end_time == 102.5
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/ci/test_coordinate_clicking.py | tests/ci/test_coordinate_clicking.py | """Tests for coordinate clicking feature.
This feature allows certain models (Claude Sonnet 4, Claude Opus 4, Gemini 3 Pro, browser-use/* models)
to use coordinate-based clicking, while other models only get index-based clicking.
"""
import pytest
from browser_use.tools.service import Tools
from browser_use.tools.views import ClickElementAction, ClickElementActionIndexOnly
class TestCoordinateClickingTools:
"""Test the Tools class coordinate clicking functionality."""
def test_default_coordinate_clicking_disabled(self):
"""By default, coordinate clicking should be disabled."""
tools = Tools()
assert tools._coordinate_clicking_enabled is False
def test_default_uses_index_only_action(self):
"""Default Tools should use ClickElementActionIndexOnly."""
tools = Tools()
click_action = tools.registry.registry.actions.get('click')
assert click_action is not None
assert click_action.param_model == ClickElementActionIndexOnly
def test_default_click_schema_has_only_index(self):
"""Default click action schema should only have index property."""
tools = Tools()
click_action = tools.registry.registry.actions.get('click')
assert click_action is not None
schema = click_action.param_model.model_json_schema()
assert 'index' in schema['properties']
assert 'coordinate_x' not in schema['properties']
assert 'coordinate_y' not in schema['properties']
def test_enable_coordinate_clicking(self):
"""Enabling coordinate clicking should switch to ClickElementAction."""
tools = Tools()
tools.set_coordinate_clicking(True)
assert tools._coordinate_clicking_enabled is True
click_action = tools.registry.registry.actions.get('click')
assert click_action is not None
assert click_action.param_model == ClickElementAction
def test_enabled_click_schema_has_coordinates(self):
"""Enabled click action schema should have index and coordinate properties."""
tools = Tools()
tools.set_coordinate_clicking(True)
click_action = tools.registry.registry.actions.get('click')
assert click_action is not None
schema = click_action.param_model.model_json_schema()
assert 'index' in schema['properties']
assert 'coordinate_x' in schema['properties']
assert 'coordinate_y' in schema['properties']
def test_disable_coordinate_clicking(self):
"""Disabling coordinate clicking should switch back to index-only."""
tools = Tools()
tools.set_coordinate_clicking(True)
tools.set_coordinate_clicking(False)
assert tools._coordinate_clicking_enabled is False
click_action = tools.registry.registry.actions.get('click')
assert click_action is not None
assert click_action.param_model == ClickElementActionIndexOnly
def test_set_coordinate_clicking_idempotent(self):
"""Setting the same value twice should not cause issues."""
tools = Tools()
# Enable twice
tools.set_coordinate_clicking(True)
tools.set_coordinate_clicking(True)
assert tools._coordinate_clicking_enabled is True
# Disable twice
tools.set_coordinate_clicking(False)
tools.set_coordinate_clicking(False)
assert tools._coordinate_clicking_enabled is False
def test_schema_title_consistent(self):
"""Schema title should be 'ClickElementAction' regardless of mode."""
tools = Tools()
# Check default (disabled)
click_action = tools.registry.registry.actions.get('click')
assert click_action is not None
schema = click_action.param_model.model_json_schema()
assert schema['title'] == 'ClickElementAction'
# Check enabled
tools.set_coordinate_clicking(True)
click_action = tools.registry.registry.actions.get('click')
assert click_action is not None
schema = click_action.param_model.model_json_schema()
assert schema['title'] == 'ClickElementAction'
class TestCoordinateClickingModelDetection:
"""Test the model detection logic for coordinate clicking."""
@pytest.mark.parametrize(
'model_name,expected_coords',
[
# Models that SHOULD have coordinate clicking (claude-sonnet-4*, claude-opus-4*, gemini-3-pro*, browser-use/*)
('claude-sonnet-4-5', True),
('claude-sonnet-4-5-20250101', True),
('claude-sonnet-4-0', True),
('claude-sonnet-4', True),
('claude-opus-4-5', True),
('claude-opus-4-5-latest', True),
('claude-opus-4-0', True),
('claude-opus-4', True),
('gemini-3-pro-preview', True),
('gemini-3-pro', True),
('browser-use/fast', True),
('browser-use/accurate', True),
('CLAUDE-SONNET-4-5', True), # Case insensitive
('CLAUDE-SONNET-4', True), # Case insensitive
('GEMINI-3-PRO', True), # Case insensitive
# Models that should NOT have coordinate clicking
('claude-3-5-sonnet', False),
('claude-sonnet-3-5', False),
('gpt-4o', False),
('gpt-4-turbo', False),
('gemini-2.0-flash', False),
('gemini-1.5-pro', False),
('llama-3.1-70b', False),
('mistral-large', False),
],
)
def test_model_detection_patterns(self, model_name: str, expected_coords: bool):
"""Test that the model detection patterns correctly identify coordinate-capable models."""
model_lower = model_name.lower()
supports_coords = any(
pattern in model_lower for pattern in ['claude-sonnet-4', 'claude-opus-4', 'gemini-3-pro', 'browser-use/']
)
assert supports_coords == expected_coords, f'Model {model_name}: expected {expected_coords}, got {supports_coords}'
class TestCoordinateClickingWithPassedTools:
"""Test that coordinate clicking works correctly when Tools is passed to Agent."""
def test_tools_can_be_modified_after_creation(self):
"""Tools created externally can have coordinate clicking enabled."""
tools = Tools()
assert tools._coordinate_clicking_enabled is False
# Simulate what Agent does for coordinate-capable models
tools.set_coordinate_clicking(True)
click_action = tools.registry.registry.actions.get('click')
assert click_action is not None
assert click_action.param_model == ClickElementAction
def test_tools_state_preserved_after_modification(self):
"""Verify that other tool state is preserved when toggling coordinate clicking."""
tools = Tools(exclude_actions=['search'])
# Search should be excluded
assert 'search' not in tools.registry.registry.actions
# Enable coordinate clicking
tools.set_coordinate_clicking(True)
# Search should still be excluded
assert 'search' not in tools.registry.registry.actions
# Click should have coordinates
click_action = tools.registry.registry.actions.get('click')
assert click_action is not None
assert click_action.param_model == ClickElementAction
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/ci/conftest.py | tests/ci/conftest.py | """
Pytest configuration for browser-use CI tests.
Sets up environment variables to ensure tests never connect to production services.
"""
import os
import socketserver
import tempfile
from unittest.mock import AsyncMock
import pytest
from dotenv import load_dotenv
from pytest_httpserver import HTTPServer
# Fix for httpserver hanging on shutdown - prevent blocking on socket close
# This prevents tests from hanging when shutting down HTTP servers
socketserver.ThreadingMixIn.block_on_close = False
# Also set daemon threads to prevent hanging
socketserver.ThreadingMixIn.daemon_threads = True
from browser_use.agent.views import AgentOutput
from browser_use.llm import BaseChatModel
from browser_use.llm.views import ChatInvokeCompletion
from browser_use.tools.service import Tools
# Load environment variables before any imports
load_dotenv()
# Skip LLM API key verification for tests
os.environ['SKIP_LLM_API_KEY_VERIFICATION'] = 'true'
from bubus import BaseEvent
from browser_use import Agent
from browser_use.browser import BrowserProfile, BrowserSession
from browser_use.sync.service import CloudSync
@pytest.fixture(autouse=True)
def setup_test_environment():
"""
Automatically set up test environment for all tests.
"""
# Create a temporary directory for test config (but not for extensions)
config_dir = tempfile.mkdtemp(prefix='browseruse_tests_')
original_env = {}
test_env_vars = {
'SKIP_LLM_API_KEY_VERIFICATION': 'true',
'ANONYMIZED_TELEMETRY': 'false',
'BROWSER_USE_CLOUD_SYNC': 'true',
'BROWSER_USE_CLOUD_API_URL': 'http://placeholder-will-be-replaced-by-specific-test-fixtures',
'BROWSER_USE_CLOUD_UI_URL': 'http://placeholder-will-be-replaced-by-specific-test-fixtures',
# Don't set BROWSER_USE_CONFIG_DIR anymore - let it use the default ~/.config/browseruse
# This way extensions will be cached in ~/.config/browseruse/extensions
}
for key, value in test_env_vars.items():
original_env[key] = os.environ.get(key)
os.environ[key] = value
yield
# Restore original environment
for key, value in original_env.items():
if value is None:
os.environ.pop(key, None)
else:
os.environ[key] = value
# not a fixture, mock_llm() provides this in a fixture below, this is a helper so that it can accept args
def create_mock_llm(actions: list[str] | None = None) -> BaseChatModel:
"""Create a mock LLM that returns specified actions or a default done action.
Args:
actions: Optional list of JSON strings representing actions to return in sequence.
If not provided, returns a single done action.
After all actions are exhausted, returns a done action.
Returns:
Mock LLM that will return the actions in order, or just a done action if no actions provided.
"""
tools = Tools()
ActionModel = tools.registry.create_action_model()
AgentOutputWithActions = AgentOutput.type_with_custom_actions(ActionModel)
llm = AsyncMock(spec=BaseChatModel)
llm.model = 'mock-llm'
llm._verified_api_keys = True
# Add missing properties from BaseChatModel protocol
llm.provider = 'mock'
llm.name = 'mock-llm'
llm.model_name = 'mock-llm' # Ensure this returns a string, not a mock
# Default done action
default_done_action = """
{
"thinking": "null",
"evaluation_previous_goal": "Successfully completed the task",
"memory": "Task completed",
"next_goal": "Task completed",
"action": [
{
"done": {
"text": "Task completed successfully",
"success": true
}
}
]
}
"""
# Unified logic for both cases
action_index = 0
def get_next_action() -> str:
nonlocal action_index
if actions is not None and action_index < len(actions):
action = actions[action_index]
action_index += 1
return action
else:
return default_done_action
async def mock_ainvoke(*args, **kwargs):
# Check if output_format is provided (2nd argument or in kwargs)
output_format = None
if len(args) >= 2:
output_format = args[1]
elif 'output_format' in kwargs:
output_format = kwargs['output_format']
action_json = get_next_action()
if output_format is None:
# Return string completion
return ChatInvokeCompletion(completion=action_json, usage=None)
else:
# Parse with provided output_format (could be AgentOutputWithActions or another model)
if output_format == AgentOutputWithActions:
parsed = AgentOutputWithActions.model_validate_json(action_json)
else:
# For other output formats, try to parse the JSON with that model
parsed = output_format.model_validate_json(action_json)
return ChatInvokeCompletion(completion=parsed, usage=None)
llm.ainvoke.side_effect = mock_ainvoke
return llm
@pytest.fixture(scope='module')
async def browser_session():
"""Create a real browser session for testing"""
session = BrowserSession(
browser_profile=BrowserProfile(
headless=True,
user_data_dir=None, # Use temporary directory
keep_alive=True,
enable_default_extensions=True, # Enable extensions during tests
)
)
await session.start()
yield session
await session.kill()
# Ensure event bus is properly stopped
await session.event_bus.stop(clear=True, timeout=5)
@pytest.fixture(scope='function')
def cloud_sync(httpserver: HTTPServer):
"""
Create a CloudSync instance configured for testing.
This fixture creates a real CloudSync instance and sets up the test environment
to use the httpserver URLs.
"""
# Set up test environment
test_http_server_url = httpserver.url_for('')
os.environ['BROWSER_USE_CLOUD_API_URL'] = test_http_server_url
os.environ['BROWSER_USE_CLOUD_UI_URL'] = test_http_server_url
os.environ['BROWSER_USE_CLOUD_SYNC'] = 'true'
# Create CloudSync with test server URL
cloud_sync = CloudSync(
base_url=test_http_server_url,
)
return cloud_sync
@pytest.fixture(scope='function')
def mock_llm():
"""Create a mock LLM that just returns the done action if queried"""
return create_mock_llm(actions=None)
@pytest.fixture(scope='function')
def agent_with_cloud(browser_session, mock_llm, cloud_sync):
"""Create agent (cloud_sync parameter removed)."""
agent = Agent(
task='Test task',
llm=mock_llm,
browser_session=browser_session,
)
return agent
@pytest.fixture(scope='function')
def event_collector():
"""Helper to collect all events emitted during tests"""
events = []
event_order = []
class EventCollector:
def __init__(self):
self.events = events
self.event_order = event_order
async def collect_event(self, event: BaseEvent):
self.events.append(event)
self.event_order.append(event.event_type)
return 'collected'
def get_events_by_type(self, event_type: str) -> list[BaseEvent]:
return [e for e in self.events if e.event_type == event_type]
def clear(self):
self.events.clear()
self.event_order.clear()
return EventCollector()
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/ci/test_file_system_images.py | tests/ci/test_file_system_images.py | """Tests for image file support in the FileSystem."""
import base64
import io
from pathlib import Path
import pytest
from PIL import Image
from browser_use.filesystem.file_system import FileSystem
class TestImageFiles:
"""Test image file operations - only external reading supported."""
def create_test_image(self, width: int = 100, height: int = 100, format: str = 'PNG') -> bytes:
"""Create a test image and return bytes."""
img = Image.new('RGB', (width, height), color='red')
buffer = io.BytesIO()
img.save(buffer, format=format)
buffer.seek(0)
return buffer.read()
@pytest.mark.asyncio
async def test_read_external_png_image(self, tmp_path: Path):
"""Test reading external PNG image file."""
# Create an external image file
external_file = tmp_path / 'test.png'
img_bytes = self.create_test_image(width=300, height=200, format='PNG')
external_file.write_bytes(img_bytes)
fs = FileSystem(tmp_path / 'workspace')
structured_result = await fs.read_file_structured(str(external_file), external_file=True)
assert 'message' in structured_result
assert 'Read image file' in structured_result['message']
assert 'images' in structured_result
assert structured_result['images'] is not None
assert len(structured_result['images']) == 1
img_data = structured_result['images'][0]
assert img_data['name'] == 'test.png'
assert 'data' in img_data
# Verify base64 is valid
decoded = base64.b64decode(img_data['data'])
assert decoded == img_bytes
@pytest.mark.asyncio
async def test_read_external_jpg_image(self, tmp_path: Path):
"""Test reading external JPG image file."""
# Create an external image file
external_file = tmp_path / 'photo.jpg'
img_bytes = self.create_test_image(width=150, height=100, format='JPEG')
external_file.write_bytes(img_bytes)
fs = FileSystem(tmp_path / 'workspace')
structured_result = await fs.read_file_structured(str(external_file), external_file=True)
assert 'message' in structured_result
assert 'images' in structured_result
assert structured_result['images'] is not None
img_data = structured_result['images'][0]
assert img_data['name'] == 'photo.jpg'
decoded = base64.b64decode(img_data['data'])
assert len(decoded) > 0
@pytest.mark.asyncio
async def test_read_jpeg_extension(self, tmp_path: Path):
"""Test reading .jpeg extension (not just .jpg)."""
external_file = tmp_path / 'test.jpeg'
img_bytes = self.create_test_image(format='JPEG')
external_file.write_bytes(img_bytes)
fs = FileSystem(tmp_path / 'workspace')
structured_result = await fs.read_file_structured(str(external_file), external_file=True)
assert structured_result['images'] is not None
assert structured_result['images'][0]['name'] == 'test.jpeg'
@pytest.mark.asyncio
async def test_read_nonexistent_image(self, tmp_path: Path):
"""Test reading a nonexistent image file."""
fs = FileSystem(tmp_path / 'workspace')
structured_result = await fs.read_file_structured('/path/to/nonexistent.png', external_file=True)
assert 'message' in structured_result
assert 'not found' in structured_result['message'].lower()
assert structured_result['images'] is None
@pytest.mark.asyncio
async def test_corrupted_image_file(self, tmp_path: Path):
"""Test reading a corrupted image file."""
external_file = tmp_path / 'corrupted.png'
# Write invalid PNG data
external_file.write_bytes(b'Not a valid PNG file')
fs = FileSystem(tmp_path / 'workspace')
structured_result = await fs.read_file_structured(str(external_file), external_file=True)
# Should still return base64 data (we don't validate image format)
assert 'message' in structured_result
assert 'Read image file' in structured_result['message']
# Base64 encoding will succeed even for invalid image data
assert structured_result['images'] is not None
@pytest.mark.asyncio
async def test_large_image_file(self, tmp_path: Path):
"""Test reading a large image file."""
# Create a large image (2000x2000)
external_file = tmp_path / 'large.png'
img = Image.new('RGB', (2000, 2000), color='blue')
img.save(str(external_file), format='PNG')
fs = FileSystem(tmp_path / 'workspace')
structured_result = await fs.read_file_structured(str(external_file), external_file=True)
assert 'images' in structured_result
assert structured_result['images'] is not None
# Verify base64 data is present and substantial
assert len(structured_result['images'][0]['data']) > 10000
@pytest.mark.asyncio
async def test_multiple_images_in_sequence(self, tmp_path: Path):
"""Test reading multiple images in sequence."""
fs = FileSystem(tmp_path / 'workspace')
# Create three different images
for i, color in enumerate(['red', 'green', 'blue']):
img_file = tmp_path / f'image_{i}.png'
img = Image.new('RGB', (100, 100), color=color)
img.save(str(img_file), format='PNG')
# Read them all
results = []
for i in range(3):
img_file = tmp_path / f'image_{i}.png'
result = await fs.read_file_structured(str(img_file), external_file=True)
results.append(result)
# Verify all were read successfully
for i, result in enumerate(results):
assert result['images'] is not None
assert result['images'][0]['name'] == f'image_{i}.png'
@pytest.mark.asyncio
async def test_different_image_formats(self, tmp_path: Path):
"""Test reading different image format variations."""
fs = FileSystem(tmp_path / 'workspace')
# Test .jpg
jpg_file = tmp_path / 'test.jpg'
img = Image.new('RGB', (50, 50), color='yellow')
img.save(str(jpg_file), format='JPEG')
result_jpg = await fs.read_file_structured(str(jpg_file), external_file=True)
assert result_jpg['images'] is not None
# Test .jpeg
jpeg_file = tmp_path / 'test.jpeg'
img.save(str(jpeg_file), format='JPEG')
result_jpeg = await fs.read_file_structured(str(jpeg_file), external_file=True)
assert result_jpeg['images'] is not None
# Test .png
png_file = tmp_path / 'test.png'
img.save(str(png_file), format='PNG')
result_png = await fs.read_file_structured(str(png_file), external_file=True)
assert result_png['images'] is not None
@pytest.mark.asyncio
async def test_image_with_transparency(self, tmp_path: Path):
"""Test reading PNG with transparency (RGBA)."""
external_file = tmp_path / 'transparent.png'
# Create RGBA image with transparency
img = Image.new('RGBA', (100, 100), color=(255, 0, 0, 128))
img.save(str(external_file), format='PNG')
fs = FileSystem(tmp_path / 'workspace')
structured_result = await fs.read_file_structured(str(external_file), external_file=True)
assert structured_result['images'] is not None
assert len(structured_result['images'][0]['data']) > 0
class TestActionResultImages:
"""Test ActionResult with images field."""
def test_action_result_with_images(self):
"""Test creating ActionResult with images."""
from browser_use.agent.views import ActionResult
images = [{'name': 'test.png', 'data': 'base64_encoded_data_here'}]
result = ActionResult(
extracted_content='Read image file test.png',
long_term_memory='Read image file test.png',
images=images,
include_extracted_content_only_once=True,
)
assert result.images is not None
assert len(result.images) == 1
assert result.images[0]['name'] == 'test.png'
assert result.images[0]['data'] == 'base64_encoded_data_here'
def test_action_result_without_images(self):
"""Test ActionResult without images (default behavior)."""
from browser_use.agent.views import ActionResult
result = ActionResult(extracted_content='Some text', long_term_memory='Memory')
assert result.images is None
def test_action_result_with_multiple_images(self):
"""Test ActionResult with multiple images."""
from browser_use.agent.views import ActionResult
images = [
{'name': 'image1.png', 'data': 'base64_data_1'},
{'name': 'image2.jpg', 'data': 'base64_data_2'},
]
result = ActionResult(
extracted_content='Read multiple images',
long_term_memory='Read image files',
images=images,
include_extracted_content_only_once=True,
)
assert result.images is not None
assert len(result.images) == 2
assert result.images[0]['name'] == 'image1.png'
assert result.images[1]['name'] == 'image2.jpg'
def test_action_result_with_empty_images_list(self):
"""Test ActionResult with empty images list."""
from browser_use.agent.views import ActionResult
result = ActionResult(
extracted_content='No images',
images=[],
)
# Empty list is still valid
assert result.images == []
if __name__ == '__main__':
pytest.main([__file__, '-v'])
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/ci/test_variable_substitution.py | tests/ci/test_variable_substitution.py | """Unit tests for variable substitution in agent history"""
from types import SimpleNamespace
from browser_use.agent.service import Agent
from browser_use.dom.views import DOMInteractedElement, NodeType
def create_test_element(attributes: dict[str, str] | None = None) -> DOMInteractedElement:
"""Helper to create a DOMInteractedElement for testing"""
return DOMInteractedElement(
node_id=1,
backend_node_id=1,
frame_id='frame1',
node_type=NodeType.ELEMENT_NODE,
node_value='',
node_name='input',
attributes=attributes or {},
bounds=None,
x_path='//*[@id="test"]',
element_hash=12345,
)
def create_mock_history(actions_with_elements: list[tuple[dict, DOMInteractedElement | None]]):
"""Helper to create mock history for testing"""
history_items = []
for action_dict, element in actions_with_elements:
mock_action = SimpleNamespace(**action_dict)
mock_output = SimpleNamespace(action=[mock_action])
mock_state = SimpleNamespace(interacted_element=[element] if element else None)
mock_history_item = SimpleNamespace(model_output=mock_output, state=mock_state)
history_items.append(mock_history_item)
return SimpleNamespace(history=history_items)
def test_substitute_single_variable(mock_llm):
"""Test substitution of a single variable"""
agent = Agent(task='test', llm=mock_llm)
# Create mock history with email
element = create_test_element(attributes={'type': 'email'})
history = create_mock_history(
[
({'input': {'index': 1, 'text': 'old@example.com'}}, element),
]
)
# Substitute the email
modified_history = agent._substitute_variables_in_history(
history, # type: ignore[arg-type]
{'email': 'new@example.com'},
)
# Check that the value was substituted
action = modified_history.history[0].model_output.action[0] # type: ignore[attr-defined]
action_dict = vars(action)
assert action_dict['input']['text'] == 'new@example.com'
def test_substitute_multiple_variables(mock_llm):
"""Test substitution of multiple variables"""
agent = Agent(task='test', llm=mock_llm)
# Create mock history with email and name
history = create_mock_history(
[
({'input': {'index': 1, 'text': 'old@example.com'}}, create_test_element(attributes={'type': 'email'})),
({'input': {'index': 2, 'text': 'John'}}, create_test_element(attributes={'name': 'first_name'})),
({'input': {'index': 3, 'text': '1990-01-01'}}, create_test_element(attributes={'type': 'date'})),
]
)
# Substitute all variables
modified_history = agent._substitute_variables_in_history(
history, # type: ignore[arg-type]
{
'email': 'new@example.com',
'first_name': 'Jane',
'date': '1995-05-15',
},
)
# Check that all values were substituted
action1 = modified_history.history[0].model_output.action[0] # type: ignore[attr-defined]
action2 = modified_history.history[1].model_output.action[0] # type: ignore[attr-defined]
action3 = modified_history.history[2].model_output.action[0] # type: ignore[attr-defined]
assert vars(action1)['input']['text'] == 'new@example.com'
assert vars(action2)['input']['text'] == 'Jane'
assert vars(action3)['input']['text'] == '1995-05-15'
def test_substitute_partial_variables(mock_llm):
"""Test substitution of only some variables"""
agent = Agent(task='test', llm=mock_llm)
# Create mock history with email and name
history = create_mock_history(
[
({'input': {'index': 1, 'text': 'old@example.com'}}, create_test_element(attributes={'type': 'email'})),
({'input': {'index': 2, 'text': 'John'}}, create_test_element(attributes={'name': 'first_name'})),
]
)
# Substitute only email
modified_history = agent._substitute_variables_in_history(
history, # type: ignore[arg-type]
{'email': 'new@example.com'},
)
# Check that only email was substituted
action1 = modified_history.history[0].model_output.action[0] # type: ignore[attr-defined]
action2 = modified_history.history[1].model_output.action[0] # type: ignore[attr-defined]
assert vars(action1)['input']['text'] == 'new@example.com'
assert vars(action2)['input']['text'] == 'John' # Unchanged
def test_substitute_nonexistent_variable(mock_llm):
"""Test that substituting a nonexistent variable doesn't break things"""
agent = Agent(task='test', llm=mock_llm)
# Create mock history with email
element = create_test_element(attributes={'type': 'email'})
history = create_mock_history(
[
({'input': {'index': 1, 'text': 'old@example.com'}}, element),
]
)
# Try to substitute a variable that doesn't exist
modified_history = agent._substitute_variables_in_history(
history, # type: ignore[arg-type]
{'nonexistent_var': 'some_value'},
)
# Check that nothing changed
action = modified_history.history[0].model_output.action[0] # type: ignore[attr-defined]
action_dict = vars(action)
assert action_dict['input']['text'] == 'old@example.com'
def test_substitute_in_nested_dict(mock_llm):
"""Test substitution in nested dictionary structures"""
agent = Agent(task='test', llm=mock_llm)
# Create a more complex action with nested structure
complex_action = {
'search_google': {
'query': 'test@example.com',
'metadata': {'user': 'test@example.com'},
}
}
element = create_test_element(attributes={'type': 'email'})
history = create_mock_history([(complex_action, element)])
# Substitute the email
modified_history = agent._substitute_variables_in_history(
history, # type: ignore[arg-type]
{'email': 'new@example.com'},
)
# Check that values in nested structures were substituted
action = modified_history.history[0].model_output.action[0] # type: ignore[attr-defined]
action_dict = vars(action)
assert action_dict['search_google']['query'] == 'new@example.com'
assert action_dict['search_google']['metadata']['user'] == 'new@example.com'
def test_substitute_in_list(mock_llm):
"""Test substitution in list structures"""
agent = Agent(task='test', llm=mock_llm)
# Create history with an input action first (so email is detected)
# Then an action with a list containing the same email
history = create_mock_history(
[
({'input': {'index': 1, 'text': 'test@example.com'}}, create_test_element(attributes={'type': 'email'})),
({'some_action': {'items': ['test@example.com', 'other_value', 'test@example.com']}}, None),
]
)
# Substitute the email
modified_history = agent._substitute_variables_in_history(
history, # type: ignore[arg-type]
{'email': 'new@example.com'},
)
# Check that values in the first action were substituted
action1 = modified_history.history[0].model_output.action[0] # type: ignore[attr-defined]
assert vars(action1)['input']['text'] == 'new@example.com'
# Check that values in lists were also substituted
action2 = modified_history.history[1].model_output.action[0] # type: ignore[attr-defined]
action_dict = vars(action2)
assert action_dict['some_action']['items'] == ['new@example.com', 'other_value', 'new@example.com']
def test_substitute_preserves_original_history(mock_llm):
"""Test that substitution doesn't modify the original history"""
agent = Agent(task='test', llm=mock_llm)
# Create mock history
element = create_test_element(attributes={'type': 'email'})
history = create_mock_history(
[
({'input': {'index': 1, 'text': 'old@example.com'}}, element),
]
)
# Get original value
original_action = history.history[0].model_output.action[0]
original_value = vars(original_action)['input']['text']
# Substitute
agent._substitute_variables_in_history(history, {'email': 'new@example.com'}) # type: ignore[arg-type]
# Check that original history is unchanged
current_value = vars(original_action)['input']['text']
assert current_value == original_value
assert current_value == 'old@example.com'
def test_substitute_empty_variables(mock_llm):
"""Test substitution with empty variables dict"""
agent = Agent(task='test', llm=mock_llm)
# Create mock history
element = create_test_element(attributes={'type': 'email'})
history = create_mock_history(
[
({'input': {'index': 1, 'text': 'old@example.com'}}, element),
]
)
# Substitute with empty dict
modified_history = agent._substitute_variables_in_history(history, {}) # type: ignore[arg-type]
# Check that nothing changed
action = modified_history.history[0].model_output.action[0] # type: ignore[attr-defined]
action_dict = vars(action)
assert action_dict['input']['text'] == 'old@example.com'
def test_substitute_same_value_multiple_times(mock_llm):
"""Test that the same value is substituted across multiple actions"""
agent = Agent(task='test', llm=mock_llm)
# Create history where same email appears twice
element = create_test_element(attributes={'type': 'email'})
history = create_mock_history(
[
({'input': {'index': 1, 'text': 'old@example.com'}}, element),
({'input': {'index': 2, 'text': 'old@example.com'}}, element),
]
)
# Substitute the email
modified_history = agent._substitute_variables_in_history(
history, # type: ignore[arg-type]
{'email': 'new@example.com'},
)
# Check that both occurrences were substituted
action1 = modified_history.history[0].model_output.action[0] # type: ignore[attr-defined]
action2 = modified_history.history[1].model_output.action[0] # type: ignore[attr-defined]
assert vars(action1)['input']['text'] == 'new@example.com'
assert vars(action2)['input']['text'] == 'new@example.com'
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/ci/test_fallback_llm.py | tests/ci/test_fallback_llm.py | """
Tests for the fallback_llm feature in Agent.
Tests verify that when the primary LLM fails with rate limit (429) or server errors (503, 502, 500, 504),
the agent automatically switches to the fallback LLM and continues execution.
"""
from unittest.mock import AsyncMock
import pytest
from browser_use.agent.views import AgentOutput
from browser_use.llm import BaseChatModel
from browser_use.llm.exceptions import ModelProviderError, ModelRateLimitError
from browser_use.llm.views import ChatInvokeCompletion
from browser_use.tools.service import Tools
def create_mock_llm(
model_name: str = 'mock-llm',
should_fail: bool = False,
fail_with: type[Exception] | None = None,
fail_status_code: int = 429,
fail_message: str = 'Rate limit exceeded',
) -> BaseChatModel:
"""Create a mock LLM for testing.
Args:
model_name: Name of the mock model
should_fail: If True, the LLM will raise an exception
fail_with: Exception type to raise (ModelRateLimitError or ModelProviderError)
fail_status_code: HTTP status code for the error
fail_message: Error message
"""
tools = Tools()
ActionModel = tools.registry.create_action_model()
AgentOutputWithActions = AgentOutput.type_with_custom_actions(ActionModel)
llm = AsyncMock(spec=BaseChatModel)
llm.model = model_name
llm._verified_api_keys = True
llm.provider = 'mock'
llm.name = model_name
llm.model_name = model_name
default_done_action = """
{
"thinking": "null",
"evaluation_previous_goal": "Successfully completed the task",
"memory": "Task completed",
"next_goal": "Task completed",
"action": [
{
"done": {
"text": "Task completed successfully",
"success": true
}
}
]
}
"""
async def mock_ainvoke(*args, **kwargs):
if should_fail:
if fail_with == ModelRateLimitError:
raise ModelRateLimitError(message=fail_message, status_code=fail_status_code, model=model_name)
elif fail_with == ModelProviderError:
raise ModelProviderError(message=fail_message, status_code=fail_status_code, model=model_name)
else:
raise Exception(fail_message)
output_format = kwargs.get('output_format')
if output_format is None:
return ChatInvokeCompletion(completion=default_done_action, usage=None)
else:
parsed = output_format.model_validate_json(default_done_action)
return ChatInvokeCompletion(completion=parsed, usage=None)
llm.ainvoke.side_effect = mock_ainvoke
return llm
class TestFallbackLLMParameter:
"""Test fallback_llm parameter initialization."""
def test_fallback_llm_none_by_default(self):
"""Verify fallback_llm defaults to None."""
from browser_use import Agent
primary = create_mock_llm('primary-model')
agent = Agent(task='Test task', llm=primary)
assert agent._fallback_llm is None
assert agent._using_fallback_llm is False
assert agent._original_llm is primary
def test_fallback_llm_single_model(self):
"""Test passing a fallback LLM."""
from browser_use import Agent
primary = create_mock_llm('primary-model')
fallback = create_mock_llm('fallback-model')
agent = Agent(task='Test task', llm=primary, fallback_llm=fallback)
assert agent._fallback_llm is fallback
assert agent._using_fallback_llm is False
def test_public_properties(self):
"""Test the public properties for fallback status."""
from browser_use import Agent
primary = create_mock_llm('primary-model')
fallback = create_mock_llm('fallback-model')
agent = Agent(task='Test task', llm=primary, fallback_llm=fallback)
# Before fallback
assert agent.is_using_fallback_llm is False
assert agent.current_llm_model == 'primary-model'
# Trigger fallback
error = ModelRateLimitError(message='Rate limit', status_code=429, model='primary')
agent._try_switch_to_fallback_llm(error)
# After fallback
assert agent.is_using_fallback_llm is True
assert agent.current_llm_model == 'fallback-model'
class TestFallbackLLMSwitching:
"""Test the fallback switching logic in _try_switch_to_fallback_llm."""
def test_switch_on_rate_limit_error(self):
"""Test that agent switches to fallback on ModelRateLimitError."""
from browser_use import Agent
primary = create_mock_llm('primary-model')
fallback = create_mock_llm('fallback-model')
agent = Agent(task='Test task', llm=primary, fallback_llm=fallback)
error = ModelRateLimitError(message='Rate limit exceeded', status_code=429, model='primary-model')
result = agent._try_switch_to_fallback_llm(error)
assert result is True
assert agent.llm is fallback
assert agent._using_fallback_llm is True
def test_switch_on_503_error(self):
"""Test that agent switches to fallback on 503 Service Unavailable."""
from browser_use import Agent
primary = create_mock_llm('primary-model')
fallback = create_mock_llm('fallback-model')
agent = Agent(task='Test task', llm=primary, fallback_llm=fallback)
error = ModelProviderError(message='Service unavailable', status_code=503, model='primary-model')
result = agent._try_switch_to_fallback_llm(error)
assert result is True
assert agent.llm is fallback
assert agent._using_fallback_llm is True
def test_switch_on_500_error(self):
"""Test that agent switches to fallback on 500 Internal Server Error."""
from browser_use import Agent
primary = create_mock_llm('primary-model')
fallback = create_mock_llm('fallback-model')
agent = Agent(task='Test task', llm=primary, fallback_llm=fallback)
error = ModelProviderError(message='Internal server error', status_code=500, model='primary-model')
result = agent._try_switch_to_fallback_llm(error)
assert result is True
assert agent.llm is fallback
def test_switch_on_502_error(self):
"""Test that agent switches to fallback on 502 Bad Gateway."""
from browser_use import Agent
primary = create_mock_llm('primary-model')
fallback = create_mock_llm('fallback-model')
agent = Agent(task='Test task', llm=primary, fallback_llm=fallback)
error = ModelProviderError(message='Bad gateway', status_code=502, model='primary-model')
result = agent._try_switch_to_fallback_llm(error)
assert result is True
assert agent.llm is fallback
def test_no_switch_on_400_error(self):
"""Test that agent does NOT switch on 400 Bad Request (not retryable)."""
from browser_use import Agent
primary = create_mock_llm('primary-model')
fallback = create_mock_llm('fallback-model')
agent = Agent(task='Test task', llm=primary, fallback_llm=fallback)
error = ModelProviderError(message='Bad request', status_code=400, model='primary-model')
result = agent._try_switch_to_fallback_llm(error)
assert result is False
assert agent.llm is primary # Still using primary
assert agent._using_fallback_llm is False
def test_switch_on_401_error(self):
"""Test that agent switches to fallback on 401 Unauthorized (API key error)."""
from browser_use import Agent
primary = create_mock_llm('primary-model')
fallback = create_mock_llm('fallback-model')
agent = Agent(task='Test task', llm=primary, fallback_llm=fallback)
error = ModelProviderError(message='Invalid API key', status_code=401, model='primary-model')
result = agent._try_switch_to_fallback_llm(error)
assert result is True
assert agent.llm is fallback
assert agent._using_fallback_llm is True
def test_switch_on_402_error(self):
"""Test that agent switches to fallback on 402 Payment Required (insufficient credits)."""
from browser_use import Agent
primary = create_mock_llm('primary-model')
fallback = create_mock_llm('fallback-model')
agent = Agent(task='Test task', llm=primary, fallback_llm=fallback)
error = ModelProviderError(message='Insufficient credits', status_code=402, model='primary-model')
result = agent._try_switch_to_fallback_llm(error)
assert result is True
assert agent.llm is fallback
assert agent._using_fallback_llm is True
def test_no_switch_when_no_fallback_configured(self):
"""Test that agent returns False when no fallback is configured."""
from browser_use import Agent
primary = create_mock_llm('primary-model')
agent = Agent(task='Test task', llm=primary)
error = ModelRateLimitError(message='Rate limit exceeded', status_code=429, model='primary-model')
result = agent._try_switch_to_fallback_llm(error)
assert result is False
assert agent.llm is primary
def test_no_switch_when_already_using_fallback(self):
"""Test that agent doesn't switch again when already using fallback."""
from browser_use import Agent
primary = create_mock_llm('primary-model')
fallback = create_mock_llm('fallback-model')
agent = Agent(task='Test task', llm=primary, fallback_llm=fallback)
# First switch succeeds
error = ModelRateLimitError(message='Rate limit', status_code=429, model='primary')
result = agent._try_switch_to_fallback_llm(error)
assert result is True
assert agent.llm is fallback
# Second switch fails - already using fallback
result = agent._try_switch_to_fallback_llm(error)
assert result is False
assert agent.llm is fallback # Still on fallback
class TestFallbackLLMIntegration:
"""Integration tests for fallback LLM behavior in get_model_output."""
def _create_failing_mock_llm(
self,
model_name: str,
fail_with: type[Exception],
fail_status_code: int = 429,
fail_message: str = 'Rate limit exceeded',
) -> BaseChatModel:
"""Create a mock LLM that always fails with the specified error."""
llm = AsyncMock(spec=BaseChatModel)
llm.model = model_name
llm._verified_api_keys = True
llm.provider = 'mock'
llm.name = model_name
llm.model_name = model_name
async def mock_ainvoke(*args, **kwargs):
if fail_with == ModelRateLimitError:
raise ModelRateLimitError(message=fail_message, status_code=fail_status_code, model=model_name)
elif fail_with == ModelProviderError:
raise ModelProviderError(message=fail_message, status_code=fail_status_code, model=model_name)
else:
raise Exception(fail_message)
llm.ainvoke.side_effect = mock_ainvoke
return llm
def _create_succeeding_mock_llm(self, model_name: str, agent) -> BaseChatModel:
"""Create a mock LLM that succeeds and returns a valid AgentOutput."""
llm = AsyncMock(spec=BaseChatModel)
llm.model = model_name
llm._verified_api_keys = True
llm.provider = 'mock'
llm.name = model_name
llm.model_name = model_name
default_done_action = """
{
"thinking": "null",
"evaluation_previous_goal": "Successfully completed the task",
"memory": "Task completed",
"next_goal": "Task completed",
"action": [
{
"done": {
"text": "Task completed successfully",
"success": true
}
}
]
}
"""
# Capture the agent reference for use in the closure
captured_agent = agent
async def mock_ainvoke(*args, **kwargs):
# Get the output format from kwargs and use it to parse
output_format = kwargs.get('output_format')
if output_format is not None:
parsed = output_format.model_validate_json(default_done_action)
return ChatInvokeCompletion(completion=parsed, usage=None)
# Fallback: use the agent's AgentOutput type
parsed = captured_agent.AgentOutput.model_validate_json(default_done_action)
return ChatInvokeCompletion(completion=parsed, usage=None)
llm.ainvoke.side_effect = mock_ainvoke
return llm
@pytest.mark.asyncio
async def test_get_model_output_switches_to_fallback_on_rate_limit(self, browser_session):
"""Test that get_model_output automatically switches to fallback on rate limit."""
from browser_use import Agent
# Create agent first with a working mock LLM
placeholder = create_mock_llm('placeholder')
agent = Agent(task='Test task', llm=placeholder, browser_session=browser_session)
# Create a failing primary and succeeding fallback
primary = self._create_failing_mock_llm(
'primary-model',
fail_with=ModelRateLimitError,
fail_status_code=429,
fail_message='Rate limit exceeded',
)
fallback = self._create_succeeding_mock_llm('fallback-model', agent)
# Replace the LLM and set up fallback
agent.llm = primary
agent._original_llm = primary
agent._fallback_llm = fallback
from browser_use.llm.messages import BaseMessage, UserMessage
messages: list[BaseMessage] = [UserMessage(content='Test message')]
# This should switch to fallback and succeed
result = await agent.get_model_output(messages)
assert result is not None
assert agent.llm is fallback
assert agent._using_fallback_llm is True
@pytest.mark.asyncio
async def test_get_model_output_raises_when_no_fallback(self, browser_session):
"""Test that get_model_output raises error when no fallback is configured."""
from browser_use import Agent
# Create agent first with a working mock LLM
placeholder = create_mock_llm('placeholder')
agent = Agent(task='Test task', llm=placeholder, browser_session=browser_session)
# Replace with failing LLM
primary = self._create_failing_mock_llm(
'primary-model',
fail_with=ModelRateLimitError,
fail_status_code=429,
fail_message='Rate limit exceeded',
)
agent.llm = primary
agent._original_llm = primary
agent._fallback_llm = None # No fallback
from browser_use.llm.messages import BaseMessage, UserMessage
messages: list[BaseMessage] = [UserMessage(content='Test message')]
# This should raise since no fallback is configured
with pytest.raises(ModelRateLimitError):
await agent.get_model_output(messages)
@pytest.mark.asyncio
async def test_get_model_output_raises_when_fallback_also_fails(self, browser_session):
"""Test that error is raised when fallback also fails."""
from browser_use import Agent
# Create agent first with a working mock LLM
placeholder = create_mock_llm('placeholder')
agent = Agent(task='Test task', llm=placeholder, browser_session=browser_session)
# Both models fail
primary = self._create_failing_mock_llm('primary', fail_with=ModelRateLimitError, fail_status_code=429)
fallback = self._create_failing_mock_llm('fallback', fail_with=ModelProviderError, fail_status_code=503)
agent.llm = primary
agent._original_llm = primary
agent._fallback_llm = fallback
from browser_use.llm.messages import BaseMessage, UserMessage
messages: list[BaseMessage] = [UserMessage(content='Test message')]
# Should fail after fallback also fails
with pytest.raises((ModelRateLimitError, ModelProviderError)):
await agent.get_model_output(messages)
if __name__ == '__main__':
pytest.main([__file__, '-v'])
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/ci/test_variable_detection.py | tests/ci/test_variable_detection.py | """Unit tests for variable detection in agent history"""
from browser_use.agent.variable_detector import (
_detect_from_attributes,
_detect_from_value_pattern,
_detect_variable_type,
_ensure_unique_name,
detect_variables_in_history,
)
from browser_use.agent.views import DetectedVariable
from browser_use.dom.views import DOMInteractedElement, NodeType
def create_test_element(attributes: dict[str, str] | None = None) -> DOMInteractedElement:
"""Helper to create a DOMInteractedElement for testing"""
return DOMInteractedElement(
node_id=1,
backend_node_id=1,
frame_id='frame1',
node_type=NodeType.ELEMENT_NODE,
node_value='',
node_name='input',
attributes=attributes or {},
bounds=None,
x_path='//*[@id="test"]',
element_hash=12345,
)
def create_mock_history(actions_with_elements: list[tuple[dict, DOMInteractedElement | None]]):
"""Helper to create mock history for testing"""
from types import SimpleNamespace
history_items = []
for action_dict, element in actions_with_elements:
mock_action = SimpleNamespace(**action_dict)
mock_output = SimpleNamespace(action=[mock_action])
mock_state = SimpleNamespace(interacted_element=[element] if element else None)
mock_history_item = SimpleNamespace(model_output=mock_output, state=mock_state)
history_items.append(mock_history_item)
return SimpleNamespace(history=history_items)
def test_detect_email_from_attributes():
"""Test email detection via type='email' attribute"""
attributes = {'type': 'email', 'id': 'email-input'}
result = _detect_from_attributes(attributes)
assert result is not None
var_name, var_format = result
assert var_name == 'email'
assert var_format == 'email'
def test_detect_email_from_pattern():
"""Test email detection via pattern matching"""
result = _detect_from_value_pattern('test@example.com')
assert result is not None
var_name, var_format = result
assert var_name == 'email'
assert var_format == 'email'
def test_detect_phone_from_attributes():
"""Test phone detection via type='tel' attribute"""
attributes = {'type': 'tel', 'name': 'phone'}
result = _detect_from_attributes(attributes)
assert result is not None
var_name, var_format = result
assert var_name == 'phone'
assert var_format == 'phone'
def test_detect_phone_from_pattern():
"""Test phone detection via pattern matching"""
result = _detect_from_value_pattern('+1 (555) 123-4567')
assert result is not None
var_name, var_format = result
assert var_name == 'phone'
assert var_format == 'phone'
def test_detect_date_from_attributes():
"""Test date detection via type='date' attribute"""
attributes = {'type': 'date', 'id': 'dob'}
result = _detect_from_attributes(attributes)
assert result is not None
var_name, var_format = result
assert var_name == 'date'
assert var_format == 'date'
def test_detect_date_from_pattern():
"""Test date detection via YYYY-MM-DD pattern"""
result = _detect_from_value_pattern('1990-01-01')
assert result is not None
var_name, var_format = result
assert var_name == 'date'
assert var_format == 'date'
def test_detect_first_name_from_attributes():
"""Test first name detection from element attributes"""
attributes = {'name': 'first_name', 'placeholder': 'Enter your first name'}
result = _detect_from_attributes(attributes)
assert result is not None
var_name, var_format = result
assert var_name == 'first_name'
assert var_format is None
def test_detect_first_name_from_pattern():
"""Test first name detection from pattern (single capitalized word)"""
result = _detect_from_value_pattern('John')
assert result is not None
var_name, var_format = result
assert var_name == 'first_name'
assert var_format is None
def test_detect_full_name_from_pattern():
"""Test full name detection from pattern (two capitalized words)"""
result = _detect_from_value_pattern('John Doe')
assert result is not None
var_name, var_format = result
assert var_name == 'full_name'
assert var_format is None
def test_detect_address_from_attributes():
"""Test address detection from element attributes"""
attributes = {'name': 'street_address', 'id': 'address-input'}
result = _detect_from_attributes(attributes)
assert result is not None
var_name, var_format = result
assert var_name == 'address'
assert var_format is None
def test_detect_billing_address_from_attributes():
"""Test billing address detection from element attributes"""
attributes = {'name': 'billing_address', 'placeholder': 'Billing street address'}
result = _detect_from_attributes(attributes)
assert result is not None
var_name, var_format = result
assert var_name == 'billing_address'
assert var_format is None
def test_detect_comment_from_attributes():
"""Test comment detection from element attributes"""
attributes = {'name': 'comment', 'placeholder': 'Enter your comment'}
result = _detect_from_attributes(attributes)
assert result is not None
var_name, var_format = result
assert var_name == 'comment'
assert var_format is None
def test_detect_city_from_attributes():
"""Test city detection from element attributes"""
attributes = {'name': 'city', 'id': 'city-input'}
result = _detect_from_attributes(attributes)
assert result is not None
var_name, var_format = result
assert var_name == 'city'
assert var_format is None
def test_detect_state_from_attributes():
"""Test state detection from element attributes"""
attributes = {'name': 'state', 'aria-label': 'State or Province'}
result = _detect_from_attributes(attributes)
assert result is not None
var_name, var_format = result
assert var_name == 'state'
assert var_format is None
def test_detect_country_from_attributes():
"""Test country detection from element attributes"""
attributes = {'name': 'country', 'id': 'country-select'}
result = _detect_from_attributes(attributes)
assert result is not None
var_name, var_format = result
assert var_name == 'country'
assert var_format is None
def test_detect_zip_code_from_attributes():
"""Test zip code detection from element attributes"""
attributes = {'name': 'zip_code', 'placeholder': 'Zip or postal code'}
result = _detect_from_attributes(attributes)
assert result is not None
var_name, var_format = result
assert var_name == 'zip_code'
assert var_format == 'postal_code'
def test_detect_company_from_attributes():
"""Test company detection from element attributes"""
attributes = {'name': 'company', 'id': 'company-input'}
result = _detect_from_attributes(attributes)
assert result is not None
var_name, var_format = result
assert var_name == 'company'
assert var_format is None
def test_detect_number_from_pattern():
"""Test number detection from pattern (pure digits)"""
result = _detect_from_value_pattern('12345')
assert result is not None
var_name, var_format = result
assert var_name == 'number'
assert var_format == 'number'
def test_no_detection_for_random_text():
"""Test that random text is not detected as a variable"""
result = _detect_from_value_pattern('some random text that is not a variable')
assert result is None
def test_no_detection_for_short_text():
"""Test that very short text is not detected"""
result = _detect_from_value_pattern('a')
assert result is None
def test_element_attributes_take_priority_over_pattern():
"""Test that element attributes are checked before pattern matching"""
# A value that could match pattern (capitalized name)
value = 'Test'
# Element with explicit type="email"
element = create_test_element(attributes={'type': 'email', 'id': 'email-input'})
result = _detect_variable_type(value, element)
assert result is not None
var_name, var_format = result
# Should detect as email (from attributes), not first_name (from pattern)
assert var_name == 'email'
assert var_format == 'email'
def test_pattern_matching_used_when_no_element():
"""Test that pattern matching is used when element context is missing"""
value = 'test@example.com'
result = _detect_variable_type(value, element=None)
assert result is not None
var_name, var_format = result
assert var_name == 'email'
assert var_format == 'email'
def test_ensure_unique_name_no_conflict():
"""Test unique name generation with no conflicts"""
existing = {}
result = _ensure_unique_name('email', existing)
assert result == 'email'
def test_ensure_unique_name_with_conflict():
"""Test unique name generation with conflicts"""
existing = {
'email': DetectedVariable(name='email', original_value='test1@example.com'),
}
result = _ensure_unique_name('email', existing)
assert result == 'email_2'
def test_ensure_unique_name_with_multiple_conflicts():
"""Test unique name generation with multiple conflicts"""
existing = {
'email': DetectedVariable(name='email', original_value='test1@example.com'),
'email_2': DetectedVariable(name='email_2', original_value='test2@example.com'),
}
result = _ensure_unique_name('email', existing)
assert result == 'email_3'
def test_detect_variables_in_empty_history():
"""Test variable detection in empty history"""
from types import SimpleNamespace
history = SimpleNamespace(history=[])
result = detect_variables_in_history(history) # type: ignore[arg-type]
assert result == {}
def test_detect_variables_in_history_with_input_action():
"""Test variable detection in history with input action"""
# Use mock objects to avoid Pydantic validation issues
from types import SimpleNamespace
# Create mock history structure
element = create_test_element(attributes={'type': 'email', 'id': 'email-input'})
mock_action = SimpleNamespace(**{'input': {'index': 1, 'text': 'test@example.com'}})
mock_output = SimpleNamespace(action=[mock_action])
mock_state = SimpleNamespace(interacted_element=[element])
mock_history_item = SimpleNamespace(model_output=mock_output, state=mock_state)
mock_history = SimpleNamespace(history=[mock_history_item])
result = detect_variables_in_history(mock_history) # type: ignore[arg-type]
assert len(result) == 1
assert 'email' in result
assert result['email'].original_value == 'test@example.com'
assert result['email'].format == 'email'
def test_detect_variables_skips_duplicate_values():
"""Test that duplicate values are only detected once"""
# Create history with same value entered twice
element = create_test_element(attributes={'type': 'email'})
history = create_mock_history(
[
({'input': {'index': 1, 'text': 'test@example.com'}}, element),
({'input': {'index': 2, 'text': 'test@example.com'}}, element),
]
)
result = detect_variables_in_history(history) # type: ignore[arg-type]
# Should only detect one variable, not two
assert len(result) == 1
assert 'email' in result
def test_detect_variables_handles_missing_state():
"""Test that detection works when state is missing"""
from types import SimpleNamespace
# Create history with None state
mock_action = SimpleNamespace(**{'input': {'index': 1, 'text': 'test@example.com'}})
mock_output = SimpleNamespace(action=[mock_action])
mock_history_item = SimpleNamespace(model_output=mock_output, state=None)
history = SimpleNamespace(history=[mock_history_item])
result = detect_variables_in_history(history) # type: ignore[arg-type]
# Should still detect via pattern matching
assert len(result) == 1
assert 'email' in result
assert result['email'].original_value == 'test@example.com'
def test_detect_variables_handles_missing_interacted_element():
"""Test that detection works when interacted_element is missing"""
# Use None as element to test when interacted_element is None
history = create_mock_history(
[
({'input': {'index': 1, 'text': 'test@example.com'}}, None),
]
)
result = detect_variables_in_history(history) # type: ignore[arg-type]
# Should still detect via pattern matching
assert len(result) == 1
assert 'email' in result
def test_detect_variables_multiple_types():
"""Test detection of multiple variable types in one history"""
history = create_mock_history(
[
({'input': {'index': 1, 'text': 'test@example.com'}}, create_test_element(attributes={'type': 'email'})),
({'input': {'index': 2, 'text': 'John'}}, create_test_element(attributes={'name': 'first_name'})),
({'input': {'index': 3, 'text': '1990-01-01'}}, create_test_element(attributes={'type': 'date'})),
]
)
result = detect_variables_in_history(history) # type: ignore[arg-type]
assert len(result) == 3
assert 'email' in result
assert 'first_name' in result
assert 'date' in result
assert result['email'].original_value == 'test@example.com'
assert result['first_name'].original_value == 'John'
assert result['date'].original_value == '1990-01-01'
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/ci/test_llm_retries.py | tests/ci/test_llm_retries.py | """
Test retry logic with exponential backoff for LLM clients.
"""
import time
from unittest.mock import AsyncMock, MagicMock, patch
import httpx
import pytest
class TestChatBrowserUseRetries:
"""Test retry logic for ChatBrowserUse."""
@pytest.fixture
def mock_env(self, monkeypatch):
"""Set up environment for ChatBrowserUse."""
monkeypatch.setenv('BROWSER_USE_API_KEY', 'test-api-key')
@pytest.mark.asyncio
async def test_retries_on_503_with_exponential_backoff(self, mock_env):
"""Test that 503 errors trigger retries with exponential backoff."""
from browser_use.llm.browser_use.chat import ChatBrowserUse
from browser_use.llm.messages import UserMessage
# Track timing of each attempt
attempt_times: list[float] = []
attempt_count = 0
async def mock_post(*args, **kwargs):
nonlocal attempt_count
attempt_times.append(time.monotonic())
attempt_count += 1
if attempt_count < 3:
# First 2 attempts fail with 503
response = MagicMock()
response.status_code = 503
response.json.return_value = {'detail': 'Service temporarily unavailable'}
raise httpx.HTTPStatusError('503', request=MagicMock(), response=response)
else:
# Third attempt succeeds
response = MagicMock()
response.json.return_value = {
'completion': 'Success!',
'usage': {
'prompt_tokens': 10,
'completion_tokens': 5,
'total_tokens': 15,
'prompt_cached_tokens': None,
'prompt_cache_creation_tokens': None,
'prompt_image_tokens': None,
},
}
response.raise_for_status = MagicMock()
return response
with patch('httpx.AsyncClient') as mock_client_class:
mock_client = AsyncMock()
mock_client.post = mock_post
mock_client.__aenter__ = AsyncMock(return_value=mock_client)
mock_client.__aexit__ = AsyncMock(return_value=None)
mock_client_class.return_value = mock_client
# Use short delays for testing
client = ChatBrowserUse(retry_base_delay=0.1, retry_max_delay=1.0)
result = await client.ainvoke([UserMessage(content='test')])
# Should have made 3 attempts
assert attempt_count == 3
assert result.completion == 'Success!'
# Verify exponential backoff timing (with some tolerance for test execution)
# First retry: ~0.1s, Second retry: ~0.2s
delay_1 = attempt_times[1] - attempt_times[0]
delay_2 = attempt_times[2] - attempt_times[1]
# Allow 50% tolerance for timing
assert 0.05 <= delay_1 <= 0.2, f'First delay {delay_1:.3f}s not in expected range'
assert 0.1 <= delay_2 <= 0.4, f'Second delay {delay_2:.3f}s not in expected range'
# Second delay should be roughly 2x the first (exponential)
assert delay_2 > delay_1, 'Second delay should be longer than first (exponential backoff)'
@pytest.mark.asyncio
async def test_no_retry_on_401(self, mock_env):
"""Test that 401 errors do NOT trigger retries."""
from browser_use.llm.browser_use.chat import ChatBrowserUse
from browser_use.llm.exceptions import ModelProviderError
from browser_use.llm.messages import UserMessage
attempt_count = 0
async def mock_post(*args, **kwargs):
nonlocal attempt_count
attempt_count += 1
response = MagicMock()
response.status_code = 401
response.json.return_value = {'detail': 'Invalid API key'}
raise httpx.HTTPStatusError('401', request=MagicMock(), response=response)
with patch('httpx.AsyncClient') as mock_client_class:
mock_client = AsyncMock()
mock_client.post = mock_post
mock_client.__aenter__ = AsyncMock(return_value=mock_client)
mock_client.__aexit__ = AsyncMock(return_value=None)
mock_client_class.return_value = mock_client
client = ChatBrowserUse(retry_base_delay=0.01)
with pytest.raises(ModelProviderError, match='Invalid API key'):
await client.ainvoke([UserMessage(content='test')])
# Should only attempt once (no retries for 401)
assert attempt_count == 1
@pytest.mark.asyncio
async def test_retries_on_timeout(self, mock_env):
"""Test that timeouts trigger retries."""
from browser_use.llm.browser_use.chat import ChatBrowserUse
from browser_use.llm.messages import UserMessage
attempt_count = 0
async def mock_post(*args, **kwargs):
nonlocal attempt_count
attempt_count += 1
if attempt_count < 2:
raise httpx.TimeoutException('Request timed out')
# Second attempt succeeds (with no usage data to test None handling)
response = MagicMock()
response.json.return_value = {'completion': 'Success after timeout!', 'usage': None}
response.raise_for_status = MagicMock()
return response
with patch('httpx.AsyncClient') as mock_client_class:
mock_client = AsyncMock()
mock_client.post = mock_post
mock_client.__aenter__ = AsyncMock(return_value=mock_client)
mock_client.__aexit__ = AsyncMock(return_value=None)
mock_client_class.return_value = mock_client
client = ChatBrowserUse(retry_base_delay=0.01)
result = await client.ainvoke([UserMessage(content='test')])
assert attempt_count == 2
assert result.completion == 'Success after timeout!'
@pytest.mark.asyncio
async def test_max_retries_exhausted(self, mock_env):
"""Test that error is raised after max retries exhausted."""
from browser_use.llm.browser_use.chat import ChatBrowserUse
from browser_use.llm.exceptions import ModelProviderError
from browser_use.llm.messages import UserMessage
attempt_count = 0
async def mock_post(*args, **kwargs):
nonlocal attempt_count
attempt_count += 1
response = MagicMock()
response.status_code = 503
response.json.return_value = {'detail': 'Service unavailable'}
raise httpx.HTTPStatusError('503', request=MagicMock(), response=response)
with patch('httpx.AsyncClient') as mock_client_class:
mock_client = AsyncMock()
mock_client.post = mock_post
mock_client.__aenter__ = AsyncMock(return_value=mock_client)
mock_client.__aexit__ = AsyncMock(return_value=None)
mock_client_class.return_value = mock_client
client = ChatBrowserUse(max_retries=3, retry_base_delay=0.01)
with pytest.raises(ModelProviderError, match='Server error'):
await client.ainvoke([UserMessage(content='test')])
# Should have attempted max_retries times
assert attempt_count == 3
class TestChatGoogleRetries:
"""Test retry logic for ChatGoogle."""
@pytest.fixture
def mock_env(self, monkeypatch):
"""Set up environment for ChatGoogle."""
monkeypatch.setenv('GOOGLE_API_KEY', 'test-api-key')
@pytest.mark.asyncio
async def test_retries_on_503_with_exponential_backoff(self, mock_env):
"""Test that 503 errors trigger retries with exponential backoff."""
from browser_use.llm.exceptions import ModelProviderError
from browser_use.llm.google.chat import ChatGoogle
from browser_use.llm.messages import UserMessage
attempt_times: list[float] = []
attempt_count = 0
# Mock the genai client
with patch('browser_use.llm.google.chat.genai') as mock_genai:
mock_client = MagicMock()
mock_genai.Client.return_value = mock_client
async def mock_generate(*args, **kwargs):
nonlocal attempt_count
attempt_times.append(time.monotonic())
attempt_count += 1
if attempt_count < 3:
raise ModelProviderError(message='Service unavailable', status_code=503, model='gemini-2.0-flash')
else:
# Success on third attempt
mock_response = MagicMock()
mock_response.text = 'Success!'
mock_response.usage_metadata = MagicMock(
prompt_token_count=10, candidates_token_count=5, total_token_count=15, cached_content_token_count=0
)
mock_response.candidates = [MagicMock(content=MagicMock(parts=[MagicMock(text='Success!')]))]
return mock_response
# Mock the aio.models.generate_content path
mock_client.aio.models.generate_content = mock_generate
client = ChatGoogle(model='gemini-2.0-flash', api_key='test', retry_base_delay=0.1, retry_max_delay=1.0)
result = await client.ainvoke([UserMessage(content='test')])
assert attempt_count == 3
assert result.completion == 'Success!'
# Verify exponential backoff
delay_1 = attempt_times[1] - attempt_times[0]
delay_2 = attempt_times[2] - attempt_times[1]
assert 0.05 <= delay_1 <= 0.2, f'First delay {delay_1:.3f}s not in expected range'
assert 0.1 <= delay_2 <= 0.4, f'Second delay {delay_2:.3f}s not in expected range'
assert delay_2 > delay_1, 'Second delay should be longer than first'
@pytest.mark.asyncio
async def test_no_retry_on_400(self, mock_env):
"""Test that 400 errors do NOT trigger retries."""
from browser_use.llm.exceptions import ModelProviderError
from browser_use.llm.google.chat import ChatGoogle
from browser_use.llm.messages import UserMessage
attempt_count = 0
with patch('browser_use.llm.google.chat.genai') as mock_genai:
mock_client = MagicMock()
mock_genai.Client.return_value = mock_client
async def mock_generate(*args, **kwargs):
nonlocal attempt_count
attempt_count += 1
raise ModelProviderError(message='Bad request', status_code=400, model='gemini-2.0-flash')
mock_client.aio.models.generate_content = mock_generate
client = ChatGoogle(model='gemini-2.0-flash', api_key='test', retry_base_delay=0.01)
with pytest.raises(ModelProviderError):
await client.ainvoke([UserMessage(content='test')])
# Should only attempt once (400 is not retryable)
assert attempt_count == 1
@pytest.mark.asyncio
async def test_retries_on_429_rate_limit(self, mock_env):
"""Test that 429 rate limit errors trigger retries."""
from browser_use.llm.exceptions import ModelProviderError
from browser_use.llm.google.chat import ChatGoogle
from browser_use.llm.messages import UserMessage
attempt_count = 0
with patch('browser_use.llm.google.chat.genai') as mock_genai:
mock_client = MagicMock()
mock_genai.Client.return_value = mock_client
async def mock_generate(*args, **kwargs):
nonlocal attempt_count
attempt_count += 1
if attempt_count < 2:
raise ModelProviderError(message='Rate limit exceeded', status_code=429, model='gemini-2.0-flash')
else:
mock_response = MagicMock()
mock_response.text = 'Success after rate limit!'
mock_response.usage_metadata = MagicMock(
prompt_token_count=10, candidates_token_count=5, total_token_count=15, cached_content_token_count=0
)
mock_response.candidates = [MagicMock(content=MagicMock(parts=[MagicMock(text='Success after rate limit!')]))]
return mock_response
mock_client.aio.models.generate_content = mock_generate
client = ChatGoogle(model='gemini-2.0-flash', api_key='test', retry_base_delay=0.01)
result = await client.ainvoke([UserMessage(content='test')])
assert attempt_count == 2
assert result.completion == 'Success after rate limit!'
if __name__ == '__main__':
pytest.main([__file__, '-v'])
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/ci/infrastructure/test_config.py | tests/ci/infrastructure/test_config.py | """Tests for lazy loading configuration system."""
import os
from browser_use.config import CONFIG
class TestLazyConfig:
"""Test lazy loading of environment variables through CONFIG object."""
def test_config_reads_env_vars_lazily(self):
"""Test that CONFIG reads environment variables each time they're accessed."""
# Set an env var
original_value = os.environ.get('BROWSER_USE_LOGGING_LEVEL', '')
try:
os.environ['BROWSER_USE_LOGGING_LEVEL'] = 'debug'
assert CONFIG.BROWSER_USE_LOGGING_LEVEL == 'debug'
# Change the env var
os.environ['BROWSER_USE_LOGGING_LEVEL'] = 'info'
assert CONFIG.BROWSER_USE_LOGGING_LEVEL == 'info'
# Delete the env var to test default
del os.environ['BROWSER_USE_LOGGING_LEVEL']
assert CONFIG.BROWSER_USE_LOGGING_LEVEL == 'info' # default value
finally:
# Restore original value
if original_value:
os.environ['BROWSER_USE_LOGGING_LEVEL'] = original_value
else:
os.environ.pop('BROWSER_USE_LOGGING_LEVEL', None)
def test_boolean_env_vars(self):
"""Test boolean environment variables are parsed correctly."""
original_value = os.environ.get('ANONYMIZED_TELEMETRY', '')
try:
# Test true values
for true_val in ['true', 'True', 'TRUE', 'yes', 'Yes', '1']:
os.environ['ANONYMIZED_TELEMETRY'] = true_val
assert CONFIG.ANONYMIZED_TELEMETRY is True, f'Failed for value: {true_val}'
# Test false values
for false_val in ['false', 'False', 'FALSE', 'no', 'No', '0']:
os.environ['ANONYMIZED_TELEMETRY'] = false_val
assert CONFIG.ANONYMIZED_TELEMETRY is False, f'Failed for value: {false_val}'
finally:
if original_value:
os.environ['ANONYMIZED_TELEMETRY'] = original_value
else:
os.environ.pop('ANONYMIZED_TELEMETRY', None)
def test_api_keys_lazy_loading(self):
"""Test API keys are loaded lazily."""
original_value = os.environ.get('OPENAI_API_KEY', '')
try:
# Test empty default
os.environ.pop('OPENAI_API_KEY', None)
assert CONFIG.OPENAI_API_KEY == ''
# Set a value
os.environ['OPENAI_API_KEY'] = 'test-key-123'
assert CONFIG.OPENAI_API_KEY == 'test-key-123'
# Change the value
os.environ['OPENAI_API_KEY'] = 'new-key-456'
assert CONFIG.OPENAI_API_KEY == 'new-key-456'
finally:
if original_value:
os.environ['OPENAI_API_KEY'] = original_value
else:
os.environ.pop('OPENAI_API_KEY', None)
def test_path_configuration(self):
"""Test path configuration variables."""
original_value = os.environ.get('XDG_CACHE_HOME', '')
try:
# Test custom path
test_path = '/tmp/test-cache'
os.environ['XDG_CACHE_HOME'] = test_path
# Use Path().resolve() to handle symlinks (e.g., /tmp -> /private/tmp on macOS)
from pathlib import Path
assert CONFIG.XDG_CACHE_HOME == Path(test_path).resolve()
# Test default path expansion
os.environ.pop('XDG_CACHE_HOME', None)
assert '/.cache' in str(CONFIG.XDG_CACHE_HOME)
finally:
if original_value:
os.environ['XDG_CACHE_HOME'] = original_value
else:
os.environ.pop('XDG_CACHE_HOME', None)
def test_cloud_sync_inherits_telemetry(self):
"""Test BROWSER_USE_CLOUD_SYNC inherits from ANONYMIZED_TELEMETRY when not set."""
telemetry_original = os.environ.get('ANONYMIZED_TELEMETRY', '')
sync_original = os.environ.get('BROWSER_USE_CLOUD_SYNC', '')
try:
# When BROWSER_USE_CLOUD_SYNC is not set, it should inherit from ANONYMIZED_TELEMETRY
os.environ['ANONYMIZED_TELEMETRY'] = 'true'
os.environ.pop('BROWSER_USE_CLOUD_SYNC', None)
assert CONFIG.BROWSER_USE_CLOUD_SYNC is True
os.environ['ANONYMIZED_TELEMETRY'] = 'false'
os.environ.pop('BROWSER_USE_CLOUD_SYNC', None)
assert CONFIG.BROWSER_USE_CLOUD_SYNC is False
# When explicitly set, it should use its own value
os.environ['ANONYMIZED_TELEMETRY'] = 'false'
os.environ['BROWSER_USE_CLOUD_SYNC'] = 'true'
assert CONFIG.BROWSER_USE_CLOUD_SYNC is True
finally:
if telemetry_original:
os.environ['ANONYMIZED_TELEMETRY'] = telemetry_original
else:
os.environ.pop('ANONYMIZED_TELEMETRY', None)
if sync_original:
os.environ['BROWSER_USE_CLOUD_SYNC'] = sync_original
else:
os.environ.pop('BROWSER_USE_CLOUD_SYNC', None)
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/ci/infrastructure/test_filesystem.py | tests/ci/infrastructure/test_filesystem.py | """Tests for the FileSystem class and related file operations."""
import asyncio
import tempfile
from pathlib import Path
import pytest
from browser_use.filesystem.file_system import (
DEFAULT_FILE_SYSTEM_PATH,
INVALID_FILENAME_ERROR_MESSAGE,
CsvFile,
FileSystem,
FileSystemState,
JsonFile,
JsonlFile,
MarkdownFile,
TxtFile,
)
class TestBaseFile:
"""Test the BaseFile abstract base class and its implementations."""
def test_markdown_file_creation(self):
"""Test MarkdownFile creation and basic properties."""
md_file = MarkdownFile(name='test', content='# Hello World')
assert md_file.name == 'test'
assert md_file.content == '# Hello World'
assert md_file.extension == 'md'
assert md_file.full_name == 'test.md'
assert md_file.get_size == 13
assert md_file.get_line_count == 1
def test_txt_file_creation(self):
"""Test TxtFile creation and basic properties."""
txt_file = TxtFile(name='notes', content='Hello\nWorld')
assert txt_file.name == 'notes'
assert txt_file.content == 'Hello\nWorld'
assert txt_file.extension == 'txt'
assert txt_file.full_name == 'notes.txt'
assert txt_file.get_size == 11
assert txt_file.get_line_count == 2
def test_json_file_creation(self):
"""Test JsonFile creation and basic properties."""
json_content = '{"name": "John", "age": 30, "city": "New York"}'
json_file = JsonFile(name='data', content=json_content)
assert json_file.name == 'data'
assert json_file.content == json_content
assert json_file.extension == 'json'
assert json_file.full_name == 'data.json'
assert json_file.get_size == len(json_content)
assert json_file.get_line_count == 1
def test_csv_file_creation(self):
"""Test CsvFile creation and basic properties."""
csv_content = 'name,age,city\nJohn,30,New York\nJane,25,London'
csv_file = CsvFile(name='users', content=csv_content)
assert csv_file.name == 'users'
assert csv_file.content == csv_content
assert csv_file.extension == 'csv'
assert csv_file.full_name == 'users.csv'
assert csv_file.get_size == len(csv_content)
assert csv_file.get_line_count == 3
def test_jsonl_file_creation(self):
"""Test JsonlFile creation and basic properties."""
jsonl_content = '{"id": 1, "name": "John"}\n{"id": 2, "name": "Jane"}'
jsonl_file = JsonlFile(name='data', content=jsonl_content)
assert jsonl_file.name == 'data'
assert jsonl_file.content == jsonl_content
assert jsonl_file.extension == 'jsonl'
assert jsonl_file.full_name == 'data.jsonl'
assert jsonl_file.get_size == len(jsonl_content)
assert jsonl_file.get_line_count == 2
def test_file_content_operations(self):
"""Test content update and append operations."""
file_obj = TxtFile(name='test')
# Initial content
assert file_obj.content == ''
assert file_obj.get_size == 0
# Write content
file_obj.write_file_content('First line')
assert file_obj.content == 'First line'
assert file_obj.get_size == 10
# Append content
file_obj.append_file_content('\nSecond line')
assert file_obj.content == 'First line\nSecond line'
assert file_obj.get_line_count == 2
# Update content
file_obj.update_content('New content')
assert file_obj.content == 'New content'
async def test_file_disk_operations(self):
"""Test file sync to disk operations."""
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = Path(tmp_dir)
file_obj = MarkdownFile(name='test', content='# Test Content')
# Test sync to disk
await file_obj.sync_to_disk(tmp_path)
# Verify file was created on disk
file_path = tmp_path / 'test.md'
assert file_path.exists()
assert file_path.read_text() == '# Test Content'
# Test write operation
await file_obj.write('# New Content', tmp_path)
assert file_path.read_text() == '# New Content'
assert file_obj.content == '# New Content'
# Test append operation
await file_obj.append('\n## Section 2', tmp_path)
expected_content = '# New Content\n## Section 2'
assert file_path.read_text() == expected_content
assert file_obj.content == expected_content
async def test_json_file_disk_operations(self):
"""Test JSON file sync to disk operations."""
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = Path(tmp_dir)
json_content = '{"users": [{"name": "John", "age": 30}]}'
json_file = JsonFile(name='data', content=json_content)
# Test sync to disk
await json_file.sync_to_disk(tmp_path)
# Verify file was created on disk
file_path = tmp_path / 'data.json'
assert file_path.exists()
assert file_path.read_text() == json_content
# Test write operation
new_content = '{"users": [{"name": "Jane", "age": 25}]}'
await json_file.write(new_content, tmp_path)
assert file_path.read_text() == new_content
assert json_file.content == new_content
# Test append operation
await json_file.append(', {"name": "Bob", "age": 35}', tmp_path)
expected_content = new_content + ', {"name": "Bob", "age": 35}'
assert file_path.read_text() == expected_content
assert json_file.content == expected_content
async def test_csv_file_disk_operations(self):
"""Test CSV file sync to disk operations."""
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = Path(tmp_dir)
csv_content = 'name,age,city\nJohn,30,New York'
csv_file = CsvFile(name='users', content=csv_content)
# Test sync to disk
await csv_file.sync_to_disk(tmp_path)
# Verify file was created on disk
file_path = tmp_path / 'users.csv'
assert file_path.exists()
assert file_path.read_text() == csv_content
# Test write operation
new_content = 'name,age,city\nJane,25,London'
await csv_file.write(new_content, tmp_path)
assert file_path.read_text() == new_content
assert csv_file.content == new_content
# Test append operation
await csv_file.append('\nBob,35,Paris', tmp_path)
expected_content = new_content + '\nBob,35,Paris'
assert file_path.read_text() == expected_content
assert csv_file.content == expected_content
def test_file_sync_to_disk_sync(self):
"""Test synchronous disk sync operation."""
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = Path(tmp_dir)
file_obj = TxtFile(name='sync_test', content='Sync content')
# Test synchronous sync
file_obj.sync_to_disk_sync(tmp_path)
# Verify file was created
file_path = tmp_path / 'sync_test.txt'
assert file_path.exists()
assert file_path.read_text() == 'Sync content'
class TestFileSystem:
"""Test the FileSystem class functionality."""
@pytest.fixture
def temp_filesystem(self):
"""Create a temporary FileSystem for testing."""
with tempfile.TemporaryDirectory() as tmp_dir:
fs = FileSystem(base_dir=tmp_dir, create_default_files=True)
yield fs
try:
fs.nuke()
except Exception:
pass # Directory might already be cleaned up
@pytest.fixture
def empty_filesystem(self):
"""Create a temporary FileSystem without default files."""
with tempfile.TemporaryDirectory() as tmp_dir:
fs = FileSystem(base_dir=tmp_dir, create_default_files=False)
yield fs
try:
fs.nuke()
except Exception:
pass
def test_filesystem_initialization(self, temp_filesystem):
"""Test FileSystem initialization with default files."""
fs = temp_filesystem
# Check that base directory and data directory exist
assert fs.base_dir.exists()
assert fs.data_dir.exists()
assert fs.data_dir.name == DEFAULT_FILE_SYSTEM_PATH
# Check default files are created
assert 'todo.md' in fs.files
assert len(fs.files) == 1
# Check files exist on disk
todo_path = fs.data_dir / 'todo.md'
assert todo_path.exists()
def test_filesystem_without_default_files(self, empty_filesystem):
"""Test FileSystem initialization without default files."""
fs = empty_filesystem
assert fs.base_dir.exists()
assert fs.data_dir.exists()
assert len(fs.files) == 0
def test_get_allowed_extensions(self, temp_filesystem):
"""Test getting allowed file extensions."""
fs = temp_filesystem
extensions = fs.get_allowed_extensions()
assert 'md' in extensions
assert 'txt' in extensions
assert 'json' in extensions
assert 'jsonl' in extensions
assert 'csv' in extensions
def test_filename_validation(self, temp_filesystem):
"""Test filename validation."""
fs = temp_filesystem
# Valid filenames
assert fs._is_valid_filename('test.md') is True
assert fs._is_valid_filename('my_file.txt') is True
assert fs._is_valid_filename('file-name.md') is True
assert fs._is_valid_filename('file123.txt') is True
assert fs._is_valid_filename('data.json') is True
assert fs._is_valid_filename('data.jsonl') is True
assert fs._is_valid_filename('users.csv') is True
assert fs._is_valid_filename('WebVoyager_data.jsonl') is True # with underscores
# Invalid filenames
assert fs._is_valid_filename('test.doc') is False # wrong extension
assert fs._is_valid_filename('test') is False # no extension
assert fs._is_valid_filename('test.md.txt') is False # multiple extensions
assert fs._is_valid_filename('test with spaces.md') is False # spaces
assert fs._is_valid_filename('test@file.md') is False # special chars
assert fs._is_valid_filename('.md') is False # no name
assert fs._is_valid_filename('.json') is False # no name
assert fs._is_valid_filename('.jsonl') is False # no name
assert fs._is_valid_filename('.csv') is False # no name
def test_filename_parsing(self, temp_filesystem):
"""Test filename parsing into name and extension."""
fs = temp_filesystem
name, ext = fs._parse_filename('test.md')
assert name == 'test'
assert ext == 'md'
name, ext = fs._parse_filename('my_file.TXT')
assert name == 'my_file'
assert ext == 'txt' # Should be lowercased
name, ext = fs._parse_filename('data.json')
assert name == 'data'
assert ext == 'json'
name, ext = fs._parse_filename('users.CSV')
assert name == 'users'
assert ext == 'csv' # Should be lowercased
def test_get_file(self, temp_filesystem):
"""Test getting files from the filesystem."""
fs = temp_filesystem
# Get non-existent file
non_existent = fs.get_file('nonexistent.md')
assert non_existent is None
# Get file with invalid name
invalid = fs.get_file('invalid@name.md')
assert invalid is None
def test_list_files(self, temp_filesystem):
"""Test listing files in the filesystem."""
fs = temp_filesystem
files = fs.list_files()
assert 'todo.md' in files
assert len(files) == 1
def test_display_file(self, temp_filesystem):
"""Test displaying file content."""
fs = temp_filesystem
# Display existing file
content = fs.display_file('todo.md')
assert content == '' # Default files are empty
# Display non-existent file
content = fs.display_file('nonexistent.md')
assert content is None
# Display file with invalid name
content = fs.display_file('invalid@name.md')
assert content is None
async def test_read_file(self, temp_filesystem: FileSystem):
"""Test reading file content with proper formatting."""
fs: FileSystem = temp_filesystem
# Read existing empty file
result = await fs.read_file('todo.md')
expected = 'Read from file todo.md.\n<content>\n\n</content>'
assert result == expected
# Read non-existent file
result = await fs.read_file('nonexistent.md')
assert result == "File 'nonexistent.md' not found."
# Read file with invalid name
result = await fs.read_file('invalid@name.md')
assert result == INVALID_FILENAME_ERROR_MESSAGE
async def test_write_file(self, temp_filesystem):
"""Test writing content to files."""
fs = temp_filesystem
# Write to existing file
result = await fs.write_file('results.md', '# Test Results\nThis is a test.')
assert result == 'Data written to file results.md successfully.'
# Verify content was written
content = await fs.read_file('results.md')
assert '# Test Results\nThis is a test.' in content
# Write to new file
result = await fs.write_file('new_file.txt', 'New file content')
assert result == 'Data written to file new_file.txt successfully.'
assert 'new_file.txt' in fs.files
assert fs.get_file('new_file.txt').content == 'New file content'
# Write with invalid filename
result = await fs.write_file('invalid@name.md', 'content')
assert result == INVALID_FILENAME_ERROR_MESSAGE
# Write with invalid extension
result = await fs.write_file('test.doc', 'content')
assert result == INVALID_FILENAME_ERROR_MESSAGE
async def test_write_json_file(self, temp_filesystem):
"""Test writing JSON files."""
fs = temp_filesystem
# Write valid JSON content
json_content = '{"users": [{"name": "John", "age": 30}, {"name": "Jane", "age": 25}]}'
result = await fs.write_file('data.json', json_content)
assert result == 'Data written to file data.json successfully.'
# Verify content was written
content = await fs.read_file('data.json')
assert json_content in content
# Verify file object was created
assert 'data.json' in fs.files
file_obj = fs.get_file('data.json')
assert file_obj is not None
assert isinstance(file_obj, JsonFile)
assert file_obj.content == json_content
# Write to new JSON file
result = await fs.write_file('config.json', '{"debug": true, "port": 8080}')
assert result == 'Data written to file config.json successfully.'
assert 'config.json' in fs.files
async def test_write_csv_file(self, temp_filesystem):
"""Test writing CSV files."""
fs = temp_filesystem
# Write valid CSV content
csv_content = 'name,age,city\nJohn,30,New York\nJane,25,London\nBob,35,Paris'
result = await fs.write_file('users.csv', csv_content)
assert result == 'Data written to file users.csv successfully.'
# Verify content was written
content = await fs.read_file('users.csv')
assert csv_content in content
# Verify file object was created
assert 'users.csv' in fs.files
file_obj = fs.get_file('users.csv')
assert file_obj is not None
assert isinstance(file_obj, CsvFile)
assert file_obj.content == csv_content
# Write to new CSV file
result = await fs.write_file('products.csv', 'id,name,price\n1,Laptop,999.99\n2,Mouse,29.99')
assert result == 'Data written to file products.csv successfully.'
assert 'products.csv' in fs.files
async def test_append_file(self, temp_filesystem):
"""Test appending content to files."""
fs = temp_filesystem
# First write some content
await fs.write_file('test.md', '# Title')
# Append content
result = await fs.append_file('test.md', '\n## Section 1')
assert result == 'Data appended to file test.md successfully.'
# Verify content was appended
content = fs.get_file('test.md').content
assert content == '# Title\n## Section 1'
# Append to non-existent file
result = await fs.append_file('nonexistent.md', 'content')
assert result == "File 'nonexistent.md' not found."
# Append with invalid filename
result = await fs.append_file('invalid@name.md', 'content')
assert result == INVALID_FILENAME_ERROR_MESSAGE
async def test_append_json_file(self, temp_filesystem):
"""Test appending content to JSON files."""
fs = temp_filesystem
# First write some JSON content
await fs.write_file('data.json', '{"users": [{"name": "John", "age": 30}]}')
# Append additional JSON content (note: this creates invalid JSON, but tests the append functionality)
result = await fs.append_file('data.json', ', {"name": "Jane", "age": 25}')
assert result == 'Data appended to file data.json successfully.'
# Verify content was appended
file_obj = fs.get_file('data.json')
assert file_obj is not None
expected_content = '{"users": [{"name": "John", "age": 30}]}, {"name": "Jane", "age": 25}'
assert file_obj.content == expected_content
async def test_append_csv_file(self, temp_filesystem):
"""Test appending content to CSV files."""
fs = temp_filesystem
# First write some CSV content
await fs.write_file('users.csv', 'name,age,city\nJohn,30,New York')
# Append additional CSV row
result = await fs.append_file('users.csv', '\nJane,25,London')
assert result == 'Data appended to file users.csv successfully.'
# Verify content was appended
file_obj = fs.get_file('users.csv')
assert file_obj is not None
expected_content = 'name,age,city\nJohn,30,New York\nJane,25,London'
assert file_obj.content == expected_content
# Append another row
await fs.append_file('users.csv', '\nBob,35,Paris')
expected_content = 'name,age,city\nJohn,30,New York\nJane,25,London\nBob,35,Paris'
assert file_obj.content == expected_content
async def test_write_jsonl_file(self, temp_filesystem):
"""Test writing JSONL (JSON Lines) files."""
fs = temp_filesystem
# Write valid JSONL content
jsonl_content = '{"id": 1, "name": "John", "age": 30}\n{"id": 2, "name": "Jane", "age": 25}'
result = await fs.write_file('data.jsonl', jsonl_content)
assert result == 'Data written to file data.jsonl successfully.'
# Verify content was written
content = await fs.read_file('data.jsonl')
assert jsonl_content in content
# Verify file object was created
assert 'data.jsonl' in fs.files
file_obj = fs.get_file('data.jsonl')
assert file_obj is not None
assert isinstance(file_obj, JsonlFile)
assert file_obj.content == jsonl_content
# Write to new JSONL file
result = await fs.write_file('WebVoyager_data.jsonl', '{"task": "test", "url": "https://example.com"}')
assert result == 'Data written to file WebVoyager_data.jsonl successfully.'
assert 'WebVoyager_data.jsonl' in fs.files
async def test_append_jsonl_file(self, temp_filesystem):
"""Test appending content to JSONL files."""
fs = temp_filesystem
# First write some JSONL content
await fs.write_file('data.jsonl', '{"id": 1, "name": "John", "age": 30}')
# Append additional JSONL record
result = await fs.append_file('data.jsonl', '\n{"id": 2, "name": "Jane", "age": 25}')
assert result == 'Data appended to file data.jsonl successfully.'
# Verify content was appended
file_obj = fs.get_file('data.jsonl')
assert file_obj is not None
expected_content = '{"id": 1, "name": "John", "age": 30}\n{"id": 2, "name": "Jane", "age": 25}'
assert file_obj.content == expected_content
# Append another record
await fs.append_file('data.jsonl', '\n{"id": 3, "name": "Bob", "age": 35}')
expected_content = (
'{"id": 1, "name": "John", "age": 30}\n{"id": 2, "name": "Jane", "age": 25}\n{"id": 3, "name": "Bob", "age": 35}'
)
assert file_obj.content == expected_content
async def test_save_extracted_content(self, temp_filesystem):
"""Test saving extracted content with auto-numbering."""
fs = temp_filesystem
# Save first extracted content
result = await fs.save_extracted_content('First extracted content')
assert result == 'extracted_content_0.md'
assert 'extracted_content_0.md' in fs.files
assert fs.extracted_content_count == 1
# Save second extracted content
result = await fs.save_extracted_content('Second extracted content')
assert result == 'extracted_content_1.md'
assert 'extracted_content_1.md' in fs.files
assert fs.extracted_content_count == 2
# Verify content
content1 = fs.get_file('extracted_content_0.md').content
content2 = fs.get_file('extracted_content_1.md').content
assert content1 == 'First extracted content'
assert content2 == 'Second extracted content'
async def test_describe_with_content(self, temp_filesystem):
"""Test describing filesystem with files containing content."""
fs = temp_filesystem
# Add content to files
await fs.write_file('results.md', '# Results\nTest results here.')
await fs.write_file('notes.txt', 'These are my notes.')
description = fs.describe()
# Should contain file information
assert 'results.md' in description
assert 'notes.txt' in description
assert '# Results' in description
assert 'These are my notes.' in description
assert 'lines' in description
async def test_describe_large_files(self, temp_filesystem):
"""Test describing filesystem with large files (truncated content)."""
fs = temp_filesystem
# Create a large file
large_content = '\n'.join([f'Line {i}' for i in range(100)])
await fs.write_file('large.md', large_content)
description = fs.describe()
# Should be truncated with "more lines" indicator
assert 'large.md' in description
assert 'more lines' in description
assert 'Line 0' in description # Start should be shown
assert 'Line 99' in description # End should be shown
def test_get_todo_contents(self, temp_filesystem):
"""Test getting todo file contents."""
fs = temp_filesystem
# Initially empty
todo_content = fs.get_todo_contents()
assert todo_content == ''
# Add content to todo
fs.get_file('todo.md').update_content('- [ ] Task 1\n- [ ] Task 2')
todo_content = fs.get_todo_contents()
assert '- [ ] Task 1' in todo_content
def test_get_state(self, temp_filesystem):
"""Test getting filesystem state."""
fs = temp_filesystem
state = fs.get_state()
assert isinstance(state, FileSystemState)
assert state.base_dir == str(fs.base_dir)
assert state.extracted_content_count == 0
assert 'todo.md' in state.files
async def test_from_state(self, temp_filesystem):
"""Test restoring filesystem from state."""
fs = temp_filesystem
# Add some content
await fs.write_file('results.md', '# Original Results')
await fs.write_file('custom.txt', 'Custom content')
await fs.save_extracted_content('Extracted data')
# Get state
state = fs.get_state()
# Create new filesystem from state
fs2 = FileSystem.from_state(state)
# Verify restoration
assert fs2.base_dir == fs.base_dir
assert fs2.extracted_content_count == fs.extracted_content_count
assert len(fs2.files) == len(fs.files)
# Verify file contents
file_obj = fs2.get_file('results.md')
assert file_obj is not None
assert file_obj.content == '# Original Results'
file_obj = fs2.get_file('custom.txt')
assert file_obj is not None
assert file_obj.content == 'Custom content'
file_obj = fs2.get_file('extracted_content_0.md')
assert file_obj is not None
assert file_obj.content == 'Extracted data'
# Verify files exist on disk
assert (fs2.data_dir / 'results.md').exists()
assert (fs2.data_dir / 'custom.txt').exists()
assert (fs2.data_dir / 'extracted_content_0.md').exists()
# Clean up second filesystem
fs2.nuke()
async def test_complete_workflow_with_json_csv(self):
"""Test a complete filesystem workflow with JSON and CSV files."""
with tempfile.TemporaryDirectory() as tmp_dir:
# Create filesystem
fs = FileSystem(base_dir=tmp_dir, create_default_files=True)
# Write JSON configuration file
config_json = '{"app": {"name": "TestApp", "version": "1.0"}, "database": {"host": "localhost", "port": 5432}}'
await fs.write_file('config.json', config_json)
# Write CSV data file
users_csv = 'id,name,email,age\n1,John Doe,john@example.com,30\n2,Jane Smith,jane@example.com,25'
await fs.write_file('users.csv', users_csv)
# Append more data to CSV
await fs.append_file('users.csv', '\n3,Bob Johnson,bob@example.com,35')
# Update JSON configuration
updated_config = '{"app": {"name": "TestApp", "version": "1.1"}, "database": {"host": "localhost", "port": 5432}, "features": {"logging": true}}'
await fs.write_file('config.json', updated_config)
# Create another JSON file for API responses
api_response = '{"status": "success", "data": [{"id": 1, "name": "Item 1"}, {"id": 2, "name": "Item 2"}]}'
await fs.write_file('api_response.json', api_response)
# Create a products CSV file
products_csv = (
'sku,name,price,category\nLAP001,Gaming Laptop,1299.99,Electronics\nMOU001,Wireless Mouse,29.99,Accessories'
)
await fs.write_file('products.csv', products_csv)
# Verify file listing
files = fs.list_files()
expected_files = ['todo.md', 'config.json', 'users.csv', 'api_response.json', 'products.csv']
assert len(files) == len(expected_files)
for expected_file in expected_files:
assert expected_file in files
# Verify JSON file contents
config_file = fs.get_file('config.json')
assert config_file is not None
assert isinstance(config_file, JsonFile)
assert config_file.content == updated_config
api_file = fs.get_file('api_response.json')
assert api_file is not None
assert isinstance(api_file, JsonFile)
assert api_file.content == api_response
# Verify CSV file contents
users_file = fs.get_file('users.csv')
assert users_file is not None
assert isinstance(users_file, CsvFile)
expected_users_content = 'id,name,email,age\n1,John Doe,john@example.com,30\n2,Jane Smith,jane@example.com,25\n3,Bob Johnson,bob@example.com,35'
assert users_file.content == expected_users_content
products_file = fs.get_file('products.csv')
assert products_file is not None
assert isinstance(products_file, CsvFile)
assert products_file.content == products_csv
# Test state persistence with JSON and CSV files
state = fs.get_state()
fs.nuke()
# Restore from state
fs2 = FileSystem.from_state(state)
# Verify restoration
assert len(fs2.files) == len(expected_files)
# Verify JSON files were restored correctly
restored_config = fs2.get_file('config.json')
assert restored_config is not None
assert isinstance(restored_config, JsonFile)
assert restored_config.content == updated_config
restored_api = fs2.get_file('api_response.json')
assert restored_api is not None
assert isinstance(restored_api, JsonFile)
assert restored_api.content == api_response
# Verify CSV files were restored correctly
restored_users = fs2.get_file('users.csv')
assert restored_users is not None
assert isinstance(restored_users, CsvFile)
assert restored_users.content == expected_users_content
restored_products = fs2.get_file('products.csv')
assert restored_products is not None
assert isinstance(restored_products, CsvFile)
assert restored_products.content == products_csv
# Verify files exist on disk
for filename in expected_files:
if filename != 'todo.md': # Skip todo.md as it's already tested
assert (fs2.data_dir / filename).exists()
fs2.nuke()
async def test_from_state_with_json_csv_files(self, temp_filesystem):
"""Test restoring filesystem from state with JSON and CSV files."""
fs = temp_filesystem
# Add JSON and CSV content
await fs.write_file('data.json', '{"version": "1.0", "users": [{"name": "John", "age": 30}]}')
await fs.write_file('users.csv', 'name,age,city\nJohn,30,New York\nJane,25,London')
await fs.write_file('config.json', '{"debug": true, "port": 8080}')
await fs.write_file('products.csv', 'id,name,price\n1,Laptop,999.99\n2,Mouse,29.99')
# Get state
state = fs.get_state()
# Create new filesystem from state
fs2 = FileSystem.from_state(state)
# Verify restoration
assert fs2.base_dir == fs.base_dir
assert len(fs2.files) == len(fs.files)
# Verify JSON file contents
json_file = fs2.get_file('data.json')
assert json_file is not None
assert isinstance(json_file, JsonFile)
assert json_file.content == '{"version": "1.0", "users": [{"name": "John", "age": 30}]}'
config_file = fs2.get_file('config.json')
assert config_file is not None
assert isinstance(config_file, JsonFile)
assert config_file.content == '{"debug": true, "port": 8080}'
# Verify CSV file contents
csv_file = fs2.get_file('users.csv')
assert csv_file is not None
assert isinstance(csv_file, CsvFile)
assert csv_file.content == 'name,age,city\nJohn,30,New York\nJane,25,London'
products_file = fs2.get_file('products.csv')
assert products_file is not None
assert isinstance(products_file, CsvFile)
assert products_file.content == 'id,name,price\n1,Laptop,999.99\n2,Mouse,29.99'
# Verify files exist on disk
assert (fs2.data_dir / 'data.json').exists()
assert (fs2.data_dir / 'users.csv').exists()
assert (fs2.data_dir / 'config.json').exists()
assert (fs2.data_dir / 'products.csv').exists()
# Verify disk contents match
assert (fs2.data_dir / 'data.json').read_text() == '{"version": "1.0", "users": [{"name": "John", "age": 30}]}'
assert (fs2.data_dir / 'users.csv').read_text() == 'name,age,city\nJohn,30,New York\nJane,25,London'
# Clean up second filesystem
fs2.nuke()
def test_nuke(self, empty_filesystem):
"""Test filesystem destruction."""
fs = empty_filesystem
# Create a file to ensure directory has content
fs.data_dir.mkdir(exist_ok=True)
test_file = fs.data_dir / 'test.txt'
test_file.write_text('test')
assert test_file.exists()
# Nuke the filesystem
fs.nuke()
# Verify directory is removed
assert not fs.data_dir.exists()
def test_get_dir(self, temp_filesystem):
"""Test getting the filesystem directory."""
fs = temp_filesystem
directory = fs.get_dir()
assert directory == fs.data_dir
assert directory.exists()
assert directory.name == DEFAULT_FILE_SYSTEM_PATH
class TestFileSystemEdgeCases:
"""Test edge cases and error handling."""
def test_filesystem_with_string_path(self):
"""Test FileSystem creation with string path."""
with tempfile.TemporaryDirectory() as tmp_dir:
fs = FileSystem(base_dir=tmp_dir, create_default_files=False)
assert isinstance(fs.base_dir, Path)
assert fs.base_dir.exists()
fs.nuke()
def test_filesystem_with_path_object(self):
"""Test FileSystem creation with Path object."""
with tempfile.TemporaryDirectory() as tmp_dir:
path_obj = Path(tmp_dir)
fs = FileSystem(base_dir=path_obj, create_default_files=False)
assert isinstance(fs.base_dir, Path)
assert fs.base_dir == path_obj
fs.nuke()
def test_filesystem_recreates_data_dir(self):
"""Test that FileSystem recreates data directory if it exists."""
with tempfile.TemporaryDirectory() as tmp_dir:
# Create filesystem
fs1 = FileSystem(base_dir=tmp_dir, create_default_files=True)
data_dir = fs1.data_dir
# Add a custom file
custom_file = data_dir / 'custom.txt'
custom_file.write_text('custom content')
assert custom_file.exists()
# Create another filesystem with same base_dir (should clean data_dir)
fs2 = FileSystem(base_dir=tmp_dir, create_default_files=True)
# Custom file should be gone, default files should exist
assert not custom_file.exists()
assert (fs2.data_dir / 'todo.md').exists()
fs2.nuke()
async def test_write_file_exception_handling(self):
"""Test exception handling in write_file."""
with tempfile.TemporaryDirectory() as tmp_dir:
fs = FileSystem(base_dir=tmp_dir, create_default_files=False)
# Test with invalid extension
result = await fs.write_file('test.invalid', 'content')
assert result == INVALID_FILENAME_ERROR_MESSAGE
fs.nuke()
def test_from_state_with_unknown_file_type(self):
"""Test restoring state with unknown file types (should skip them)."""
with tempfile.TemporaryDirectory() as tmp_dir:
# Create a state with unknown file type
state = FileSystemState(
files={
'test.md': {'type': 'MarkdownFile', 'data': {'name': 'test', 'content': 'test content'}},
'unknown.txt': {'type': 'UnknownFileType', 'data': {'name': 'unknown', 'content': 'unknown content'}},
},
base_dir=tmp_dir,
extracted_content_count=0,
)
# Restore from state
fs = FileSystem.from_state(state)
# Should only have the known file type
assert 'test.md' in fs.files
assert 'unknown.txt' not in fs.files
assert len(fs.files) == 1
fs.nuke()
class TestFileSystemIntegration:
"""Integration tests for FileSystem with real file operations."""
async def test_complete_workflow(self):
"""Test a complete filesystem workflow."""
with tempfile.TemporaryDirectory() as tmp_dir:
# Create filesystem
fs = FileSystem(base_dir=tmp_dir, create_default_files=True)
# Write to results file
await fs.write_file('results.md', '# Test Results\n## Section 1\nInitial results.')
# Append more content
await fs.append_file('results.md', '\n## Section 2\nAdditional findings.')
# Create a notes file
await fs.write_file('notes.txt', 'Important notes:\n- Note 1\n- Note 2')
# Save extracted content
await fs.save_extracted_content('Extracted data from web page')
await fs.save_extracted_content('Second extraction')
# Verify file listing
files = fs.list_files()
assert len(files) == 5 # results.md, todo.md, notes.txt, 2 extracted files
# Verify content
file_obj = fs.get_file('results.md')
assert file_obj is not None
results_content = file_obj.content
assert '# Test Results' in results_content
assert '## Section 1' in results_content
assert '## Section 2' in results_content
assert 'Additional findings.' in results_content
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | true |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/ci/infrastructure/test_registry_core.py | tests/ci/infrastructure/test_registry_core.py | """
Comprehensive tests for the action registry system - Core functionality.
Tests cover:
1. Existing parameter patterns (individual params, pydantic models)
2. Special parameter injection (browser_session, page_extraction_llm, etc.)
3. Action-to-action calling scenarios
4. Mixed parameter patterns
5. Registry execution edge cases
"""
import asyncio
import logging
import pytest
from pydantic import Field
from pytest_httpserver import HTTPServer
from pytest_httpserver.httpserver import HandlerType
from browser_use.agent.views import ActionResult
from browser_use.browser import BrowserSession
from browser_use.browser.profile import BrowserProfile
from browser_use.llm.messages import UserMessage
from browser_use.tools.registry.service import Registry
from browser_use.tools.registry.views import ActionModel as BaseActionModel
from browser_use.tools.views import (
ClickElementAction,
InputTextAction,
NoParamsAction,
SearchAction,
)
from tests.ci.conftest import create_mock_llm
# Configure logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
class TestContext:
"""Simple context for testing"""
pass
# Test parameter models
class SimpleParams(BaseActionModel):
"""Simple parameter model"""
value: str = Field(description='Test value')
class ComplexParams(BaseActionModel):
"""Complex parameter model with multiple fields"""
text: str = Field(description='Text input')
number: int = Field(description='Number input', default=42)
optional_flag: bool = Field(description='Optional boolean', default=False)
# Test fixtures
@pytest.fixture(scope='session')
def http_server():
"""Create and provide a test HTTP server that serves static content."""
server = HTTPServer()
server.start()
# Add a simple test page that can handle multiple requests
server.expect_request('/test', handler_type=HandlerType.PERMANENT).respond_with_data(
'<html><head><title>Test Page</title></head><body><h1>Test Page</h1><p>Hello from test page</p></body></html>',
content_type='text/html',
)
yield server
server.stop()
@pytest.fixture(scope='session')
def base_url(http_server):
"""Return the base URL for the test HTTP server."""
return f'http://{http_server.host}:{http_server.port}'
@pytest.fixture(scope='module')
def mock_llm():
"""Create a mock LLM"""
return create_mock_llm()
@pytest.fixture(scope='function')
def registry():
"""Create a fresh registry for each test"""
return Registry[TestContext]()
@pytest.fixture(scope='function')
async def browser_session(base_url):
"""Create a real BrowserSession for testing"""
browser_session = BrowserSession(
browser_profile=BrowserProfile(
headless=True,
user_data_dir=None,
keep_alive=True,
)
)
await browser_session.start()
from browser_use.browser.events import NavigateToUrlEvent
browser_session.event_bus.dispatch(NavigateToUrlEvent(url=f'{base_url}/test'))
await asyncio.sleep(0.5) # Wait for navigation
yield browser_session
await browser_session.kill()
class TestActionRegistryParameterPatterns:
"""Test different parameter patterns that should all continue to work"""
async def test_individual_parameters_no_browser(self, registry):
"""Test action with individual parameters, no special injection"""
@registry.action('Simple action with individual params')
async def simple_action(text: str, number: int = 10):
return ActionResult(extracted_content=f'Text: {text}, Number: {number}')
# Test execution
result = await registry.execute_action('simple_action', {'text': 'hello', 'number': 42})
assert isinstance(result, ActionResult)
assert result.extracted_content is not None
assert 'Text: hello, Number: 42' in result.extracted_content
async def test_individual_parameters_with_browser(self, registry, browser_session, base_url):
"""Test action with individual parameters plus browser_session injection"""
@registry.action('Action with individual params and browser')
async def action_with_browser(text: str, browser_session: BrowserSession):
url = await browser_session.get_current_page_url()
return ActionResult(extracted_content=f'Text: {text}, URL: {url}')
# Navigate to test page first
from browser_use.browser.events import NavigateToUrlEvent
event = browser_session.event_bus.dispatch(NavigateToUrlEvent(url=f'{base_url}/test', new_tab=True))
await event
# Test execution
result = await registry.execute_action('action_with_browser', {'text': 'hello'}, browser_session=browser_session)
assert isinstance(result, ActionResult)
assert result.extracted_content is not None
assert 'Text: hello, URL:' in result.extracted_content
assert base_url in result.extracted_content
async def test_pydantic_model_parameters(self, registry, browser_session, base_url):
"""Test action that takes a pydantic model as first parameter"""
@registry.action('Action with pydantic model', param_model=ComplexParams)
async def pydantic_action(params: ComplexParams, browser_session: BrowserSession):
url = await browser_session.get_current_page_url()
return ActionResult(
extracted_content=f'Text: {params.text}, Number: {params.number}, Flag: {params.optional_flag}, URL: {url}'
)
# Navigate to test page first
from browser_use.browser.events import NavigateToUrlEvent
event = browser_session.event_bus.dispatch(NavigateToUrlEvent(url=f'{base_url}/test', new_tab=True))
await event
# Test execution
result = await registry.execute_action(
'pydantic_action', {'text': 'test', 'number': 100, 'optional_flag': True}, browser_session=browser_session
)
assert isinstance(result, ActionResult)
assert result.extracted_content is not None
assert 'Text: test, Number: 100, Flag: True' in result.extracted_content
assert base_url in result.extracted_content
async def test_mixed_special_parameters(self, registry, browser_session, base_url, mock_llm):
"""Test action with multiple special injected parameters"""
from browser_use.llm.base import BaseChatModel
@registry.action('Action with multiple special params')
async def multi_special_action(
text: str,
browser_session: BrowserSession,
page_extraction_llm: BaseChatModel,
available_file_paths: list,
):
llm_response = await page_extraction_llm.ainvoke([UserMessage(content='test')])
files = available_file_paths or []
url = await browser_session.get_current_page_url()
return ActionResult(
extracted_content=f'Text: {text}, URL: {url}, LLM: {llm_response.completion}, Files: {len(files)}'
)
# Navigate to test page first
from browser_use.browser.events import NavigateToUrlEvent
event = browser_session.event_bus.dispatch(NavigateToUrlEvent(url=f'{base_url}/test', new_tab=True))
await event
# Test execution
result = await registry.execute_action(
'multi_special_action',
{'text': 'hello'},
browser_session=browser_session,
page_extraction_llm=mock_llm,
available_file_paths=['file1.txt', 'file2.txt'],
)
assert isinstance(result, ActionResult)
assert result.extracted_content is not None
assert 'Text: hello' in result.extracted_content
assert base_url in result.extracted_content
# The mock LLM returns a JSON response
assert '"Task completed successfully"' in result.extracted_content
assert 'Files: 2' in result.extracted_content
async def test_no_params_action(self, registry, browser_session):
"""Test action with NoParamsAction model"""
@registry.action('No params action', param_model=NoParamsAction)
async def no_params_action(params: NoParamsAction, browser_session: BrowserSession):
url = await browser_session.get_current_page_url()
return ActionResult(extracted_content=f'No params action executed on {url}')
# Test execution with any parameters (should be ignored)
result = await registry.execute_action(
'no_params_action', {'random': 'data', 'should': 'be', 'ignored': True}, browser_session=browser_session
)
assert isinstance(result, ActionResult)
assert result.extracted_content is not None
assert 'No params action executed on' in result.extracted_content
assert '/test' in result.extracted_content
class TestActionToActionCalling:
"""Test scenarios where actions call other actions"""
async def test_action_calling_action_with_kwargs(self, registry, browser_session):
"""Test action calling another action using kwargs (current problematic pattern)"""
# Helper function that actions can call
async def helper_function(browser_session: BrowserSession, data: str):
url = await browser_session.get_current_page_url()
return f'Helper processed: {data} on {url}'
@registry.action('First action')
async def first_action(text: str, browser_session: BrowserSession):
# This should work without parameter conflicts
result = await helper_function(browser_session=browser_session, data=text)
return ActionResult(extracted_content=f'First: {result}')
@registry.action('Calling action')
async def calling_action(message: str, browser_session: BrowserSession):
# Call the first action through the registry (simulates action-to-action calling)
intermediate_result = await registry.execute_action(
'first_action', {'text': message}, browser_session=browser_session
)
return ActionResult(extracted_content=f'Called result: {intermediate_result.extracted_content}')
# Test the calling chain
result = await registry.execute_action('calling_action', {'message': 'test'}, browser_session=browser_session)
assert isinstance(result, ActionResult)
assert result.extracted_content is not None
assert 'Called result: First: Helper processed: test on' in result.extracted_content
assert '/test' in result.extracted_content
async def test_google_sheets_style_calling_pattern(self, registry, browser_session):
"""Test the specific pattern from Google Sheets actions that causes the error"""
# Simulate the _select_cell_or_range helper function
async def _select_cell_or_range(browser_session: BrowserSession, cell_or_range: str):
url = await browser_session.get_current_page_url()
return ActionResult(extracted_content=f'Selected cell {cell_or_range} on {url}')
@registry.action('Select cell or range')
async def select_cell_or_range(cell_or_range: str, browser_session: BrowserSession):
# This pattern now works with kwargs
return await _select_cell_or_range(browser_session=browser_session, cell_or_range=cell_or_range)
@registry.action('Select cell or range (fixed)')
async def select_cell_or_range_fixed(cell_or_range: str, browser_session: BrowserSession):
# This pattern also works
return await _select_cell_or_range(browser_session, cell_or_range)
@registry.action('Update range contents')
async def update_range_contents(range_name: str, new_contents: str, browser_session: BrowserSession):
# This action calls select_cell_or_range, simulating the real Google Sheets pattern
# Get the action's param model to call it properly
action = registry.registry.actions['select_cell_or_range_fixed']
params = action.param_model(cell_or_range=range_name)
await select_cell_or_range_fixed(cell_or_range=range_name, browser_session=browser_session)
return ActionResult(extracted_content=f'Updated range {range_name} with {new_contents}')
# Test the fixed version (should work)
result_fixed = await registry.execute_action(
'select_cell_or_range_fixed', {'cell_or_range': 'A1:F100'}, browser_session=browser_session
)
assert result_fixed.extracted_content is not None
assert 'Selected cell A1:F100 on' in result_fixed.extracted_content
assert '/test' in result_fixed.extracted_content
# Test the chained calling pattern
result_chain = await registry.execute_action(
'update_range_contents', {'range_name': 'B2:D4', 'new_contents': 'test data'}, browser_session=browser_session
)
assert result_chain.extracted_content is not None
assert 'Updated range B2:D4 with test data' in result_chain.extracted_content
# Test the problematic version (should work with enhanced registry)
result_problematic = await registry.execute_action(
'select_cell_or_range', {'cell_or_range': 'A1:F100'}, browser_session=browser_session
)
# With the enhanced registry, this should succeed
assert result_problematic.extracted_content is not None
assert 'Selected cell A1:F100 on' in result_problematic.extracted_content
assert '/test' in result_problematic.extracted_content
async def test_complex_action_chain(self, registry, browser_session):
"""Test a complex chain of actions calling other actions"""
@registry.action('Base action')
async def base_action(value: str, browser_session: BrowserSession):
url = await browser_session.get_current_page_url()
return ActionResult(extracted_content=f'Base: {value} on {url}')
@registry.action('Middle action')
async def middle_action(input_val: str, browser_session: BrowserSession):
# Call base action
base_result = await registry.execute_action(
'base_action', {'value': f'processed-{input_val}'}, browser_session=browser_session
)
return ActionResult(extracted_content=f'Middle: {base_result.extracted_content}')
@registry.action('Top action')
async def top_action(original: str, browser_session: BrowserSession):
# Call middle action
middle_result = await registry.execute_action(
'middle_action', {'input_val': f'enhanced-{original}'}, browser_session=browser_session
)
return ActionResult(extracted_content=f'Top: {middle_result.extracted_content}')
# Test the full chain
result = await registry.execute_action('top_action', {'original': 'test'}, browser_session=browser_session)
assert isinstance(result, ActionResult)
assert result.extracted_content is not None
assert 'Top: Middle: Base: processed-enhanced-test on' in result.extracted_content
assert '/test' in result.extracted_content
class TestRegistryEdgeCases:
"""Test edge cases and error conditions"""
async def test_decorated_action_rejects_positional_args(self, registry, browser_session):
"""Test that decorated actions reject positional arguments"""
@registry.action('Action that should reject positional args')
async def test_action(cell_or_range: str, browser_session: BrowserSession):
url = await browser_session.get_current_page_url()
return ActionResult(extracted_content=f'Selected cell {cell_or_range} on {url}')
# Test that calling with positional arguments raises TypeError
with pytest.raises(
TypeError, match='test_action\\(\\) does not accept positional arguments, only keyword arguments are allowed'
):
await test_action('A1:B2', browser_session)
# Test that calling with keyword arguments works
result = await test_action(browser_session=browser_session, cell_or_range='A1:B2')
assert isinstance(result, ActionResult)
assert result.extracted_content is not None
assert 'Selected cell A1:B2 on' in result.extracted_content
async def test_missing_required_browser_session(self, registry):
"""Test that actions requiring browser_session fail appropriately when not provided"""
@registry.action('Requires browser')
async def requires_browser(text: str, browser_session: BrowserSession):
url = await browser_session.get_current_page_url()
return ActionResult(extracted_content=f'Text: {text}, URL: {url}')
# Should raise RuntimeError when browser_session is required but not provided
with pytest.raises(RuntimeError, match='requires browser_session but none provided'):
await registry.execute_action(
'requires_browser',
{'text': 'test'},
# No browser_session provided
)
async def test_missing_required_llm(self, registry, browser_session):
"""Test that actions requiring page_extraction_llm fail appropriately when not provided"""
from browser_use.llm.base import BaseChatModel
@registry.action('Requires LLM')
async def requires_llm(text: str, browser_session: BrowserSession, page_extraction_llm: BaseChatModel):
url = await browser_session.get_current_page_url()
llm_response = await page_extraction_llm.ainvoke([UserMessage(content='test')])
return ActionResult(extracted_content=f'Text: {text}, LLM: {llm_response.completion}')
# Should raise RuntimeError when page_extraction_llm is required but not provided
with pytest.raises(RuntimeError, match='requires page_extraction_llm but none provided'):
await registry.execute_action(
'requires_llm',
{'text': 'test'},
browser_session=browser_session,
# No page_extraction_llm provided
)
async def test_invalid_parameters(self, registry, browser_session):
"""Test handling of invalid parameters"""
@registry.action('Typed action')
async def typed_action(number: int, browser_session: BrowserSession):
return ActionResult(extracted_content=f'Number: {number}')
# Should raise RuntimeError when parameter validation fails
with pytest.raises(RuntimeError, match='Invalid parameters'):
await registry.execute_action(
'typed_action',
{'number': 'not a number'}, # Invalid type
browser_session=browser_session,
)
async def test_nonexistent_action(self, registry, browser_session):
"""Test calling a non-existent action"""
with pytest.raises(ValueError, match='Action nonexistent_action not found'):
await registry.execute_action('nonexistent_action', {'param': 'value'}, browser_session=browser_session)
async def test_sync_action_wrapper(self, registry, browser_session):
"""Test that sync functions are properly wrapped to be async"""
@registry.action('Sync action')
def sync_action(text: str, browser_session: BrowserSession):
# This is a sync function that should be wrapped
return ActionResult(extracted_content=f'Sync: {text}')
# Should work even though the original function is sync
result = await registry.execute_action('sync_action', {'text': 'test'}, browser_session=browser_session)
assert isinstance(result, ActionResult)
assert result.extracted_content is not None
assert 'Sync: test' in result.extracted_content
async def test_excluded_actions(self, browser_session):
"""Test that excluded actions are not registered"""
registry_with_exclusions = Registry[TestContext](exclude_actions=['excluded_action'])
@registry_with_exclusions.action('Excluded action')
async def excluded_action(text: str):
return ActionResult(extracted_content=f'Should not execute: {text}')
@registry_with_exclusions.action('Included action')
async def included_action(text: str):
return ActionResult(extracted_content=f'Should execute: {text}')
# Excluded action should not be in registry
assert 'excluded_action' not in registry_with_exclusions.registry.actions
assert 'included_action' in registry_with_exclusions.registry.actions
# Should raise error when trying to execute excluded action
with pytest.raises(ValueError, match='Action excluded_action not found'):
await registry_with_exclusions.execute_action('excluded_action', {'text': 'test'})
# Included action should work
result = await registry_with_exclusions.execute_action('included_action', {'text': 'test'})
assert result.extracted_content is not None
assert 'Should execute: test' in result.extracted_content
class TestExistingToolsActions:
"""Test that existing tools actions continue to work"""
async def test_existing_action_models(self, registry, browser_session):
"""Test that existing action parameter models work correctly"""
@registry.action('Test search', param_model=SearchAction)
async def test_search(params: SearchAction, browser_session: BrowserSession):
return ActionResult(extracted_content=f'Searched for: {params.query}')
@registry.action('Test click', param_model=ClickElementAction)
async def test_click(params: ClickElementAction, browser_session: BrowserSession):
return ActionResult(extracted_content=f'Clicked element: {params.index}')
@registry.action('Test input', param_model=InputTextAction)
async def test_input(params: InputTextAction, browser_session: BrowserSession):
return ActionResult(extracted_content=f'Input text: {params.text} at index: {params.index}')
# Test SearchGoogleAction
result1 = await registry.execute_action('test_search', {'query': 'python testing'}, browser_session=browser_session)
assert result1.extracted_content is not None
assert 'Searched for: python testing' in result1.extracted_content
# Test ClickElementAction
result2 = await registry.execute_action('test_click', {'index': 42}, browser_session=browser_session)
assert result2.extracted_content is not None
assert 'Clicked element: 42' in result2.extracted_content
# Test InputTextAction
result3 = await registry.execute_action('test_input', {'index': 5, 'text': 'test input'}, browser_session=browser_session)
assert result3.extracted_content is not None
assert 'Input text: test input at index: 5' in result3.extracted_content
async def test_pydantic_vs_individual_params_consistency(self, registry, browser_session):
"""Test that pydantic and individual parameter patterns produce consistent results"""
# Action using individual parameters
@registry.action('Individual params')
async def individual_params_action(text: str, number: int, browser_session: BrowserSession):
return ActionResult(extracted_content=f'Individual: {text}-{number}')
# Action using pydantic model
class TestParams(BaseActionModel):
text: str
number: int
@registry.action('Pydantic params', param_model=TestParams)
async def pydantic_params_action(params: TestParams, browser_session: BrowserSession):
return ActionResult(extracted_content=f'Pydantic: {params.text}-{params.number}')
# Both should produce similar results
test_data = {'text': 'hello', 'number': 42}
result1 = await registry.execute_action('individual_params_action', test_data, browser_session=browser_session)
result2 = await registry.execute_action('pydantic_params_action', test_data, browser_session=browser_session)
# Both should extract the same content (just different prefixes)
assert result1.extracted_content is not None
assert 'hello-42' in result1.extracted_content
assert result2.extracted_content is not None
assert 'hello-42' in result2.extracted_content
assert 'Individual:' in result1.extracted_content
assert 'Pydantic:' in result2.extracted_content
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/ci/infrastructure/test_registry_action_parameter_injection.py | tests/ci/infrastructure/test_registry_action_parameter_injection.py | import asyncio
import base64
import socketserver
import pytest
from pytest_httpserver import HTTPServer
from browser_use.browser import BrowserProfile, BrowserSession
# Fix for httpserver hanging on shutdown - prevent blocking on socket close
socketserver.ThreadingMixIn.block_on_close = False
socketserver.ThreadingMixIn.daemon_threads = True
class TestBrowserContext:
"""Tests for browser context functionality using real browser instances."""
@pytest.fixture(scope='session')
def http_server(self):
"""Create and provide a test HTTP server that serves static content."""
server = HTTPServer()
server.start()
# Add routes for test pages
server.expect_request('/').respond_with_data(
'<html><head><title>Test Home Page</title></head><body><h1>Test Home Page</h1><p>Welcome to the test site</p></body></html>',
content_type='text/html',
)
server.expect_request('/scroll_test').respond_with_data(
"""
<html>
<head>
<title>Scroll Test</title>
<style>
body { height: 3000px; }
.marker { position: absolute; }
#top { top: 0; }
#middle { top: 1000px; }
#bottom { top: 2000px; }
</style>
</head>
<body>
<div id="top" class="marker">Top of the page</div>
<div id="middle" class="marker">Middle of the page</div>
<div id="bottom" class="marker">Bottom of the page</div>
</body>
</html>
""",
content_type='text/html',
)
yield server
server.stop()
@pytest.fixture(scope='session')
def base_url(self, http_server):
"""Return the base URL for the test HTTP server."""
return f'http://{http_server.host}:{http_server.port}'
@pytest.fixture(scope='module')
async def browser_session(self):
"""Create and provide a BrowserSession instance with security disabled."""
browser_session = BrowserSession(
browser_profile=BrowserProfile(
headless=True,
user_data_dir=None,
keep_alive=True,
)
)
await browser_session.start()
yield browser_session
await browser_session.kill()
# Ensure event bus is properly stopped
await browser_session.event_bus.stop(clear=True, timeout=5)
@pytest.mark.skip(reason='TODO: fix')
def test_is_url_allowed(self):
"""
Test the _is_url_allowed method to verify that it correctly checks URLs against
the allowed domains configuration.
"""
# Scenario 1: allowed_domains is None, any URL should be allowed.
from bubus import EventBus
from browser_use.browser.watchdogs.security_watchdog import SecurityWatchdog
config1 = BrowserProfile(allowed_domains=None, headless=True, user_data_dir=None)
context1 = BrowserSession(browser_profile=config1)
event_bus1 = EventBus()
watchdog1 = SecurityWatchdog(browser_session=context1, event_bus=event_bus1)
assert watchdog1._is_url_allowed('http://anydomain.com') is True
assert watchdog1._is_url_allowed('https://anotherdomain.org/path') is True
# Scenario 2: allowed_domains is provided.
# Note: match_url_with_domain_pattern defaults to https:// scheme when none is specified
allowed = ['https://example.com', 'http://example.com', 'http://*.mysite.org', 'https://*.mysite.org']
config2 = BrowserProfile(allowed_domains=allowed, headless=True, user_data_dir=None)
context2 = BrowserSession(browser_profile=config2)
event_bus2 = EventBus()
watchdog2 = SecurityWatchdog(browser_session=context2, event_bus=event_bus2)
# URL exactly matching
assert watchdog2._is_url_allowed('http://example.com') is True
# URL with subdomain (should not be allowed)
assert watchdog2._is_url_allowed('http://sub.example.com/path') is False
# URL with subdomain for wildcard pattern (should be allowed)
assert watchdog2._is_url_allowed('http://sub.mysite.org') is True
# URL that matches second allowed domain
assert watchdog2._is_url_allowed('https://mysite.org/page') is True
# URL with port number, still allowed (port is stripped)
assert watchdog2._is_url_allowed('http://example.com:8080') is True
assert watchdog2._is_url_allowed('https://example.com:443') is True
# Scenario 3: Malformed URL or empty domain
# urlparse will return an empty netloc for some malformed URLs.
assert watchdog2._is_url_allowed('notaurl') is False
# Method was removed from BrowserSession
def test_enhanced_css_selector_for_element(self):
"""
Test removed: _enhanced_css_selector_for_element method no longer exists.
"""
pass # Method was removed from BrowserSession
@pytest.mark.asyncio
@pytest.mark.skip(reason='TODO: fix')
async def test_navigate_and_get_current_page(self, browser_session, base_url):
"""Test that navigate method changes the URL and get_current_page returns the proper page."""
# Navigate to the test page
from browser_use.browser.events import NavigateToUrlEvent
event = browser_session.event_bus.dispatch(NavigateToUrlEvent(url=f'{base_url}/'))
await event
# Get the current page
url = await browser_session.get_current_page_url()
# Verify the page URL matches what we navigated to
assert f'{base_url}/' in url
# Verify the page title
title = await browser_session.get_current_page_title()
assert title == 'Test Home Page'
@pytest.mark.asyncio
@pytest.mark.skip(reason='TODO: fix')
async def test_refresh_page(self, browser_session, base_url):
"""Test that refresh_page correctly reloads the current page."""
# Navigate to the test page
from browser_use.browser.events import NavigateToUrlEvent
event = browser_session.event_bus.dispatch(NavigateToUrlEvent(url=f'{base_url}/'))
await event
# Get the current page info before refresh
url_before = await browser_session.get_current_page_url()
title_before = await browser_session.get_current_page_title()
# Refresh the page
await browser_session.refresh()
# Get the current page info after refresh
url_after = await browser_session.get_current_page_url()
title_after = await browser_session.get_current_page_title()
# Verify it's still on the same URL
assert url_after == url_before
# Verify the page title is still correct
assert title_after == 'Test Home Page'
@pytest.mark.asyncio
@pytest.mark.skip(reason='TODO: fix')
async def test_execute_javascript(self, browser_session, base_url):
"""Test that execute_javascript correctly executes JavaScript in the current page."""
# Navigate to a test page
from browser_use.browser.events import NavigateToUrlEvent
event = browser_session.event_bus.dispatch(NavigateToUrlEvent(url=f'{base_url}/'))
await event
# Execute a simple JavaScript snippet that returns a value
result = await browser_session.execute_javascript('document.title')
# Verify the result
assert result == 'Test Home Page'
# Execute JavaScript that modifies the page
await browser_session.execute_javascript("document.body.style.backgroundColor = 'red'")
# Verify the change by reading back the value
bg_color = await browser_session.execute_javascript('document.body.style.backgroundColor')
assert bg_color == 'red'
@pytest.mark.asyncio
@pytest.mark.skip(reason='TODO: fix')
@pytest.mark.skip(reason='get_scroll_info API changed - depends on page object that no longer exists')
async def test_get_scroll_info(self, browser_session, base_url):
"""Test that get_scroll_info returns the correct scroll position information."""
# Navigate to the scroll test page
from browser_use.browser.events import NavigateToUrlEvent
event = browser_session.event_bus.dispatch(NavigateToUrlEvent(url=f'{base_url}/scroll_test'))
await event
page = await browser_session.get_current_page()
# Get initial scroll info
pixels_above_initial, pixels_below_initial = await browser_session.get_scroll_info(page)
# Verify initial scroll position
assert pixels_above_initial == 0, 'Initial scroll position should be at the top'
assert pixels_below_initial > 0, 'There should be content below the viewport'
# Scroll down the page
await browser_session.execute_javascript('window.scrollBy(0, 500)')
await asyncio.sleep(0.2) # Brief delay for scroll to complete
# Get new scroll info
pixels_above_after_scroll, pixels_below_after_scroll = await browser_session.get_scroll_info(page)
# Verify new scroll position
assert pixels_above_after_scroll > 0, 'Page should be scrolled down'
assert pixels_above_after_scroll >= 400, 'Page should be scrolled down at least 400px'
assert pixels_below_after_scroll < pixels_below_initial, 'Less content should be below viewport after scrolling'
@pytest.mark.asyncio
@pytest.mark.skip(reason='TODO: fix')
async def test_take_screenshot(self, browser_session, base_url):
"""Test that take_screenshot returns a valid base64 encoded image."""
# Navigate to the test page
from browser_use.browser.events import NavigateToUrlEvent
event = browser_session.event_bus.dispatch(NavigateToUrlEvent(url=f'{base_url}/'))
await event
# Take a screenshot
screenshot_base64 = await browser_session.take_screenshot()
# Verify the screenshot is a valid base64 string
assert isinstance(screenshot_base64, str)
assert len(screenshot_base64) > 0
# Verify it can be decoded as base64
try:
image_data = base64.b64decode(screenshot_base64)
# Verify the data starts with a valid image signature (PNG file header)
assert image_data[:8] == b'\x89PNG\r\n\x1a\n', 'Screenshot is not a valid PNG image'
except Exception as e:
pytest.fail(f'Failed to decode screenshot as base64: {e}')
@pytest.mark.asyncio
@pytest.mark.skip(reason='TODO: fix')
async def test_switch_tab_operations(self, browser_session, base_url):
"""Test tab creation, switching, and closing operations."""
# Navigate to home page in first tab
from browser_use.browser.events import NavigateToUrlEvent
event = browser_session.event_bus.dispatch(NavigateToUrlEvent(url=f'{base_url}/'))
await event
# Create a new tab
await browser_session.create_new_tab(f'{base_url}/scroll_test')
# Verify we have two tabs now
tabs_info = await browser_session.get_tabs()
assert len(tabs_info) == 2, 'Should have two tabs open'
# Verify current tab is the scroll test page
current_url = await browser_session.get_current_page_url()
assert f'{base_url}/scroll_test' in current_url
# Switch back to the first tab
await browser_session.switch_to_tab(0)
# Verify we're back on the home page
current_url = await browser_session.get_current_page_url()
assert f'{base_url}/' in current_url
# Close the second tab
await browser_session.close_tab(1)
# Verify we have the expected number of tabs
# The first tab remains plus any about:blank tabs created by AboutBlankWatchdog
tabs_info = await browser_session.get_tabs_info()
# Filter out about:blank tabs created by the watchdog
non_blank_tabs = [tab for tab in tabs_info if 'about:blank' not in tab.url]
assert len(non_blank_tabs) == 1, (
f'Should have one non-blank tab open after closing the second, but got {len(non_blank_tabs)}: {non_blank_tabs}'
)
assert base_url in non_blank_tabs[0].url, 'The remaining tab should be the home page'
# TODO: highlighting doesn't exist anymore
# @pytest.mark.asyncio
# async def test_remove_highlights(self, browser_session, base_url):
# """Test that remove_highlights successfully removes highlight elements."""
# # Navigate to a test page
# from browser_use.browser.events import NavigateToUrlEvent; event = browser_session.event_bus.dispatch(NavigateToUrlEvent(url=f'{base_url}/')
# # Add a highlight via JavaScript
# await browser_session.execute_javascript("""
# const container = document.createElement('div');
# container.id = 'playwright-highlight-container';
# document.body.appendChild(container);
# const highlight = document.createElement('div');
# highlight.id = 'playwright-highlight-1';
# container.appendChild(highlight);
# const element = document.querySelector('h1');
# element.setAttribute('browser-user-highlight-id', 'playwright-highlight-1');
# """)
# # Verify the highlight container exists
# container_exists = await browser_session.execute_javascript(
# "document.getElementById('playwright-highlight-container') !== null"
# )
# assert container_exists, 'Highlight container should exist before removal'
# # Call remove_highlights
# await browser_session.remove_highlights()
# # Verify the highlight container was removed
# container_exists_after = await browser_session.execute_javascript(
# "document.getElementById('playwright-highlight-container') !== null"
# )
# assert not container_exists_after, 'Highlight container should be removed'
# # Verify the highlight attribute was removed from the element
# attribute_exists = await browser_session.execute_javascript(
# "document.querySelector('h1').hasAttribute('browser-user-highlight-id')"
# )
# assert not attribute_exists, 'browser-user-highlight-id attribute should be removed'
@pytest.mark.asyncio
@pytest.mark.skip(reason='TODO: fix')
async def test_custom_action_with_no_arguments(self, browser_session, base_url):
"""Test that custom actions with no arguments are handled correctly"""
from browser_use.agent.views import ActionResult
from browser_use.tools.registry.service import Registry
# Create a registry
registry = Registry()
# Register a custom action with no arguments
@registry.action('Some custom action with no args')
def simple_action():
return ActionResult(extracted_content='return some result')
# Navigate to a test page
from browser_use.browser.events import NavigateToUrlEvent
event = browser_session.event_bus.dispatch(NavigateToUrlEvent(url=f'{base_url}/'))
await event
# Execute the action
result = await registry.execute_action('simple_action', {})
# Verify the result
assert isinstance(result, ActionResult)
assert result.extracted_content == 'return some result'
# Test that the action model is created correctly
action_model = registry.create_action_model()
# The action should be in the model fields
assert 'simple_action' in action_model.model_fields
# Create an instance with the simple_action
action_instance = action_model(simple_action={}) # type: ignore[call-arg]
# Test that model_dump works correctly
dumped = action_instance.model_dump(exclude_unset=True)
assert 'simple_action' in dumped
assert dumped['simple_action'] == {}
# Test async version as well
@registry.action('Async custom action with no args')
async def async_simple_action():
return ActionResult(extracted_content='async result')
result = await registry.execute_action('async_simple_action', {})
assert result.extracted_content == 'async result'
# Test with special parameters but no regular arguments
@registry.action('Action with only special params')
async def special_params_only(browser_session):
current_url = await browser_session.get_current_page_url()
return ActionResult(extracted_content=f'Page URL: {current_url}')
result = await registry.execute_action('special_params_only', {}, browser_session=browser_session)
assert 'Page URL:' in result.extracted_content
assert base_url in result.extracted_content
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/ci/infrastructure/test_url_shortening.py | tests/ci/infrastructure/test_url_shortening.py | """
Simplified tests for URL shortening functionality in Agent service.
Three focused tests:
1. Input message processing with URL shortening
2. Output processing with custom actions and URL restoration
3. End-to-end pipeline test
"""
import json
import pytest
from browser_use.agent.service import Agent
from browser_use.agent.views import AgentOutput
from browser_use.llm.messages import AssistantMessage, BaseMessage, UserMessage
# Super long URL to reuse across tests - much longer than the 25 character limit
# Includes both query params (?...) and fragment params (#...)
SUPER_LONG_URL = 'https://documentation.example-company.com/api/v3/enterprise/user-management/endpoints/administration/create-new-user-account-with-permissions/advanced-settings?format=detailed-json&version=3.2.1×tamp=1699123456789&session_id=abc123def456ghi789&authentication_token=very_long_authentication_token_string_here&include_metadata=true&expand_relationships=user_groups,permissions,roles&sort_by=created_at&order=desc&page_size=100&include_deprecated_fields=false&api_key=super_long_api_key_that_exceeds_normal_limits#section=user_management&tab=advanced&view=detailed&scroll_to=permissions_table&highlight=admin_settings&filter=active_users&expand_all=true&debug_mode=enabled'
@pytest.fixture
def agent():
"""Create an agent instance for testing URL shortening functionality."""
from tests.ci.conftest import create_mock_llm
return Agent(task='Test URL shortening', llm=create_mock_llm(), url_shortening_limit=25)
class TestUrlShorteningInputProcessing:
"""Test URL shortening for input messages."""
def test_process_input_messages_with_url_shortening(self, agent: Agent):
"""Test that long URLs in input messages are shortened and mappings stored."""
original_content = f'Please visit {SUPER_LONG_URL} and extract information'
messages: list[BaseMessage] = [UserMessage(content=original_content)]
# Process messages (modifies messages in-place and returns URL mappings)
url_mappings = agent._process_messsages_and_replace_long_urls_shorter_ones(messages)
# Verify URL was shortened in the message (modified in-place)
processed_content = messages[0].content or ''
assert processed_content != original_content
assert 'https://documentation.example-company.com' in processed_content
assert len(processed_content) < len(original_content)
# Verify URL mapping was returned
assert len(url_mappings) == 1
shortened_url = next(iter(url_mappings.keys()))
assert url_mappings[shortened_url] == SUPER_LONG_URL
def test_process_user_and_assistant_messages_with_url_shortening(self, agent: Agent):
"""Test URL shortening in both UserMessage and AssistantMessage."""
user_content = f'I need to access {SUPER_LONG_URL} for the API documentation'
assistant_content = f'I will help you navigate to {SUPER_LONG_URL} to retrieve the documentation'
messages: list[BaseMessage] = [UserMessage(content=user_content), AssistantMessage(content=assistant_content)]
# Process messages (modifies messages in-place and returns URL mappings)
url_mappings = agent._process_messsages_and_replace_long_urls_shorter_ones(messages)
# Verify URL was shortened in both messages
user_processed_content = messages[0].content or ''
assistant_processed_content = messages[1].content or ''
assert user_processed_content != user_content
assert assistant_processed_content != assistant_content
assert 'https://documentation.example-company.com' in user_processed_content
assert 'https://documentation.example-company.com' in assistant_processed_content
assert len(user_processed_content) < len(user_content)
assert len(assistant_processed_content) < len(assistant_content)
# Verify URL mapping was returned (should be same shortened URL for both occurrences)
assert len(url_mappings) == 1
shortened_url = next(iter(url_mappings.keys()))
assert url_mappings[shortened_url] == SUPER_LONG_URL
class TestUrlShorteningOutputProcessing:
"""Test URL restoration for output processing with custom actions."""
def test_process_output_with_custom_actions_and_url_restoration(self, agent: Agent):
"""Test that shortened URLs in AgentOutput with custom actions are restored."""
# Set up URL mapping (simulating previous shortening)
shortened_url: str = agent._replace_urls_in_text(SUPER_LONG_URL)[0]
url_mappings = {shortened_url: SUPER_LONG_URL}
# Create AgentOutput with shortened URLs using JSON parsing
output_json = {
'thinking': f'I need to navigate to {shortened_url} for documentation',
'evaluation_previous_goal': 'Successfully processed the request',
'memory': f'Found useful info at {shortened_url}',
'next_goal': 'Complete the documentation review',
'action': [{'navigate': {'url': shortened_url, 'new_tab': False}}],
}
# Create properly typed AgentOutput with custom actions
tools = agent.tools
ActionModel = tools.registry.create_action_model()
AgentOutputWithActions = AgentOutput.type_with_custom_actions(ActionModel)
agent_output = AgentOutputWithActions.model_validate_json(json.dumps(output_json))
# Process the output to restore URLs (modifies agent_output in-place)
agent._recursive_process_all_strings_inside_pydantic_model(agent_output, url_mappings)
# Verify URLs were restored in all locations
assert SUPER_LONG_URL in (agent_output.thinking or '')
assert SUPER_LONG_URL in (agent_output.memory or '')
action_data = agent_output.action[0].model_dump()
assert action_data['navigate']['url'] == SUPER_LONG_URL
class TestUrlShorteningEndToEnd:
"""Test complete URL shortening pipeline end-to-end."""
def test_complete_url_shortening_pipeline(self, agent: Agent):
"""Test the complete pipeline: input shortening -> processing -> output restoration."""
# Step 1: Input processing with URL shortening
original_content = f'Navigate to {SUPER_LONG_URL} and extract the API documentation'
messages: list[BaseMessage] = [UserMessage(content=original_content)]
url_mappings = agent._process_messsages_and_replace_long_urls_shorter_ones(messages)
# Verify URL was shortened in input
assert len(url_mappings) == 1
shortened_url = next(iter(url_mappings.keys()))
assert url_mappings[shortened_url] == SUPER_LONG_URL
assert shortened_url in (messages[0].content or '')
# Step 2: Simulate agent output with shortened URL
output_json = {
'thinking': f'I will navigate to {shortened_url} to get the documentation',
'evaluation_previous_goal': 'Starting documentation extraction',
'memory': f'Target URL: {shortened_url}',
'next_goal': 'Extract API documentation',
'action': [{'navigate': {'url': shortened_url, 'new_tab': True}}],
}
# Create AgentOutput with custom actions
tools = agent.tools
ActionModel = tools.registry.create_action_model()
AgentOutputWithActions = AgentOutput.type_with_custom_actions(ActionModel)
agent_output = AgentOutputWithActions.model_validate_json(json.dumps(output_json))
# Step 3: Output processing with URL restoration (modifies agent_output in-place)
agent._recursive_process_all_strings_inside_pydantic_model(agent_output, url_mappings)
# Verify complete pipeline worked correctly
assert SUPER_LONG_URL in (agent_output.thinking or '')
assert SUPER_LONG_URL in (agent_output.memory or '')
action_data = agent_output.action[0].model_dump()
assert action_data['navigate']['url'] == SUPER_LONG_URL
assert action_data['navigate']['new_tab'] is True
# Verify original shortened content is no longer present
assert shortened_url not in (agent_output.thinking or '')
assert shortened_url not in (agent_output.memory or '')
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/ci/infrastructure/test_registry_validation.py | tests/ci/infrastructure/test_registry_validation.py | """
Comprehensive tests for the action registry system - Validation and patterns.
Tests cover:
1. Type 1 and Type 2 patterns
2. Validation rules
3. Decorated function behavior
4. Parameter model generation
5. Parameter ordering
"""
import asyncio
import logging
import pytest
from pydantic import Field
from browser_use.agent.views import ActionResult
from browser_use.browser import BrowserSession
from browser_use.tools.registry.service import Registry
from browser_use.tools.registry.views import ActionModel as BaseActionModel
from tests.ci.conftest import create_mock_llm
# Configure logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
class TestType1Pattern:
"""Test Type 1 Pattern: Pydantic model first (from normalization tests)"""
def test_type1_with_param_model(self):
"""Type 1: action(params: Model, special_args...) should work"""
registry = Registry()
class ClickAction(BaseActionModel):
index: int
delay: float = 0.0
@registry.action('Click element', param_model=ClickAction)
async def click_element(params: ClickAction, browser_session: BrowserSession):
return ActionResult(extracted_content=f'Clicked {params.index}')
# Verify registration
assert 'click_element' in registry.registry.actions
action = registry.registry.actions['click_element']
assert action.param_model == ClickAction
# Verify decorated function signature (should be kwargs-only)
import inspect
sig = inspect.signature(click_element)
params = list(sig.parameters.values())
# Should have no positional-only or positional-or-keyword params
for param in params:
assert param.kind in (inspect.Parameter.KEYWORD_ONLY, inspect.Parameter.VAR_KEYWORD)
def test_type1_with_multiple_special_params(self):
"""Type 1 with multiple special params should work"""
registry = Registry()
class ExtractAction(BaseActionModel):
goal: str
include_links: bool = False
from browser_use.llm.base import BaseChatModel
@registry.action('Extract content', param_model=ExtractAction)
async def extract_content(params: ExtractAction, browser_session: BrowserSession, page_extraction_llm: BaseChatModel):
return ActionResult(extracted_content=params.goal)
assert 'extract_content' in registry.registry.actions
class TestType2Pattern:
"""Test Type 2 Pattern: loose parameters (from normalization tests)"""
def test_type2_simple_action(self):
"""Type 2: action(arg1, arg2, special_args...) should work"""
registry = Registry()
@registry.action('Fill field')
async def fill_field(index: int, text: str, browser_session: BrowserSession):
return ActionResult(extracted_content=f'Filled {index} with {text}')
# Verify registration
assert 'fill_field' in registry.registry.actions
action = registry.registry.actions['fill_field']
# Should auto-generate param model
assert action.param_model is not None
assert 'index' in action.param_model.model_fields
assert 'text' in action.param_model.model_fields
def test_type2_with_defaults(self):
"""Type 2 with default values should preserve defaults"""
registry = Registry()
@registry.action('Scroll page')
async def scroll_page(direction: str = 'down', amount: int = 100, browser_session: BrowserSession = None): # type: ignore
return ActionResult(extracted_content=f'Scrolled {direction} by {amount}')
action = registry.registry.actions['scroll_page']
# Check that defaults are preserved in generated model
schema = action.param_model.model_json_schema()
assert schema['properties']['direction']['default'] == 'down'
assert schema['properties']['amount']['default'] == 100
def test_type2_no_action_params(self):
"""Type 2 with only special params should work"""
registry = Registry()
@registry.action('Save PDF')
async def save_pdf(browser_session: BrowserSession):
return ActionResult(extracted_content='Saved PDF')
action = registry.registry.actions['save_pdf']
# Should have empty or minimal param model
fields = action.param_model.model_fields
assert len(fields) == 0 or all(f in ['title'] for f in fields)
def test_no_special_params_action(self):
"""Test action with no special params (like wait action in Tools)"""
registry = Registry()
@registry.action('Wait for x seconds default 3')
async def wait(seconds: int = 3):
await asyncio.sleep(seconds)
return ActionResult(extracted_content=f'Waited {seconds} seconds')
# Should register successfully
assert 'wait' in registry.registry.actions
action = registry.registry.actions['wait']
# Should have seconds in param model
assert 'seconds' in action.param_model.model_fields
# Should preserve default value
schema = action.param_model.model_json_schema()
assert schema['properties']['seconds']['default'] == 3
class TestValidationRules:
"""Test validation rules for action registration (from normalization tests)"""
def test_error_on_kwargs_in_original_function(self):
"""Should error if original function has kwargs"""
registry = Registry()
with pytest.raises(ValueError, match='kwargs.*not allowed'):
@registry.action('Bad action')
async def bad_action(index: int, browser_session: BrowserSession, **kwargs):
pass
def test_error_on_special_param_name_with_wrong_type(self):
"""Should error if special param name used with wrong type"""
registry = Registry()
# Using 'browser_session' with wrong type should error
with pytest.raises(ValueError, match='conflicts with special argument.*browser_session: BrowserSession'):
@registry.action('Bad session')
async def bad_session(browser_session: str):
pass
def test_special_params_must_match_type(self):
"""Special params with correct types should work"""
registry = Registry()
@registry.action('Good action')
async def good_action(
index: int,
browser_session: BrowserSession, # Correct type
):
return ActionResult()
assert 'good_action' in registry.registry.actions
class TestDecoratedFunctionBehavior:
"""Test behavior of decorated action functions (from normalization tests)"""
async def test_decorated_function_only_accepts_kwargs(self):
"""Decorated functions should only accept kwargs, no positional args"""
registry = Registry()
class MockBrowserSession:
async def get_current_page(self):
return None
@registry.action('Click')
async def click(index: int, browser_session: BrowserSession):
return ActionResult()
# Should raise error when called with positional args
with pytest.raises(TypeError, match='positional arguments'):
await click(5, MockBrowserSession())
async def test_decorated_function_accepts_params_model(self):
"""Decorated function should accept params as model"""
registry = Registry()
class MockBrowserSession:
async def get_current_page(self):
return None
@registry.action('Input text')
async def input_text(index: int, text: str, browser_session: BrowserSession):
return ActionResult(extracted_content=f'{index}:{text}')
# Get the generated param model class
action = registry.registry.actions['input_text']
ParamsModel = action.param_model
# Should work with params model
result = await input_text(params=ParamsModel(index=5, text='hello'), browser_session=MockBrowserSession())
assert result.extracted_content == '5:hello'
async def test_decorated_function_ignores_extra_kwargs(self):
"""Decorated function should ignore extra kwargs for easy unpacking"""
registry = Registry()
@registry.action('Simple action')
async def simple_action(value: int):
return ActionResult(extracted_content=str(value))
# Should work even with extra kwargs
special_context = {
'browser_session': None,
'page_extraction_llm': create_mock_llm(),
'context': {'extra': 'data'},
'unknown_param': 'ignored',
}
action = registry.registry.actions['simple_action']
ParamsModel = action.param_model
result = await simple_action(params=ParamsModel(value=42), **special_context)
assert result.extracted_content == '42'
class TestParamsModelGeneration:
"""Test automatic parameter model generation (from normalization tests)"""
def test_generates_model_from_non_special_args(self):
"""Should generate param model from non-special positional args"""
registry = Registry()
@registry.action('Complex action')
async def complex_action(
query: str,
max_results: int,
include_images: bool = True,
browser_session: BrowserSession = None, # type: ignore
):
return ActionResult()
action = registry.registry.actions['complex_action']
model_fields = action.param_model.model_fields
# Should include only non-special params
assert 'query' in model_fields
assert 'max_results' in model_fields
assert 'include_images' in model_fields
# Should NOT include special params
assert 'browser_session' not in model_fields
def test_preserves_type_annotations(self):
"""Generated model should preserve type annotations"""
registry = Registry()
@registry.action('Typed action')
async def typed_action(
count: int,
rate: float,
enabled: bool,
name: str | None = None,
browser_session: BrowserSession = None, # type: ignore
):
return ActionResult()
action = registry.registry.actions['typed_action']
schema = action.param_model.model_json_schema()
# Check types are preserved
assert schema['properties']['count']['type'] == 'integer'
assert schema['properties']['rate']['type'] == 'number'
assert schema['properties']['enabled']['type'] == 'boolean'
# Optional should allow null
assert 'null' in schema['properties']['name']['anyOf'][1]['type']
class TestParameterOrdering:
"""Test mixed ordering of parameters (from normalization tests)"""
def test_mixed_param_ordering(self):
"""Should handle any ordering of action params and special params"""
registry = Registry()
from browser_use.llm.base import BaseChatModel
# Special params mixed throughout
@registry.action('Mixed params')
async def mixed_action(
first: str,
browser_session: BrowserSession,
second: int,
third: bool = True,
page_extraction_llm: BaseChatModel = None, # type: ignore
):
return ActionResult()
action = registry.registry.actions['mixed_action']
model_fields = action.param_model.model_fields
# Only action params in model
assert set(model_fields.keys()) == {'first', 'second', 'third'}
assert model_fields['third'].default is True
def test_extract_content_pattern_registration(self):
"""Test that the extract_content pattern with mixed params registers correctly"""
registry = Registry()
# This is the problematic pattern: positional arg, then special args, then kwargs with defaults
@registry.action('Extract content from page')
async def extract_content(
goal: str,
page_extraction_llm,
include_links: bool = False,
):
return ActionResult(extracted_content=f'Goal: {goal}, include_links: {include_links}')
# Verify registration
assert 'extract_content' in registry.registry.actions
action = registry.registry.actions['extract_content']
# Check that the param model only includes user-facing params
model_fields = action.param_model.model_fields
assert 'goal' in model_fields
assert 'include_links' in model_fields
assert model_fields['include_links'].default is False
# Special params should NOT be in the model
assert 'page' not in model_fields
assert 'page_extraction_llm' not in model_fields
# Verify the action was properly registered
assert action.name == 'extract_content'
assert action.description == 'Extract content from page'
class TestParamsModelArgsAndKwargs:
async def test_browser_session_double_kwarg(self):
"""Run the test to diagnose browser_session parameter issue
This test demonstrates the problem and our fix. The issue happens because:
1. In tools/service.py, we have:
```python
@registry.action('Google Sheets: Select a specific cell or range of cells')
async def select_cell_or_range(browser_session: BrowserSession, cell_or_range: str):
return await _select_cell_or_range(browser_session=browser_session, cell_or_range=cell_or_range)
```
2. When registry.execute_action calls this function, it adds browser_session to extra_args:
```python
# In registry/service.py
if 'browser_session' in parameter_names:
extra_args['browser_session'] = browser_session
```
3. Then later, when calling action.function:
```python
return await action.function(**params_dict, **extra_args)
```
4. This effectively means browser_session is passed twice:
- Once through extra_args['browser_session']
- And again through params_dict['browser_session'] (from the original function)
The fix is to pass browser_session positionally in select_cell_or_range:
```python
return await _select_cell_or_range(browser_session, cell_or_range)
```
This test confirms that this approach works.
"""
from browser_use.tools.registry.service import Registry
from browser_use.tools.registry.views import ActionModel
# Simple context for testing
class TestContext:
pass
class MockBrowserSession:
async def get_current_page(self):
return None
browser_session = MockBrowserSession()
# Create registry
registry = Registry[TestContext]()
# Model that doesn't include browser_session (renamed to avoid pytest collecting it)
class CellActionParams(ActionModel):
value: str = Field(description='Test value')
# Model that includes browser_session
class ModelWithBrowser(ActionModel):
value: str = Field(description='Test value')
browser_session: BrowserSession = None # type: ignore
# Create a custom param model for select_cell_or_range
class CellRangeParams(ActionModel):
cell_or_range: str = Field(description='Cell or range to select')
# Use the provided real browser session
# Test with the real issue: select_cell_or_range
# logger.info('\n\n=== Test: Simulating select_cell_or_range issue with correct model ===')
# Define the function without using our registry - this will be a helper function
async def _select_cell_or_range(browser_session, cell_or_range):
"""Helper function for select_cell_or_range"""
return f'Selected cell {cell_or_range}'
# This simulates the actual issue we're seeing in the real code
# The browser_session parameter is in both the function signature and passed as a named arg
@registry.action('Google Sheets: Select a cell or range', param_model=CellRangeParams)
async def select_cell_or_range(browser_session: BrowserSession, cell_or_range: str):
# logger.info(f'select_cell_or_range called with browser_session={browser_session}, cell_or_range={cell_or_range}')
# PROBLEMATIC LINE: browser_session is passed by name, matching the parameter name
# This is what causes the "got multiple values" error in the real code
return await _select_cell_or_range(browser_session=browser_session, cell_or_range=cell_or_range)
# Fix attempt: Register a version that uses positional args instead
@registry.action('Google Sheets: Select a cell or range (fixed)', param_model=CellRangeParams)
async def select_cell_or_range_fixed(browser_session: BrowserSession, cell_or_range: str):
# logger.info(f'select_cell_or_range_fixed called with browser_session={browser_session}, cell_or_range={cell_or_range}')
# FIXED LINE: browser_session is passed positionally, avoiding the parameter name conflict
return await _select_cell_or_range(browser_session, cell_or_range)
# Another attempt: explicitly call using **kwargs to simulate what the registry does
@registry.action('Google Sheets: Select with kwargs', param_model=CellRangeParams)
async def select_with_kwargs(browser_session: BrowserSession, cell_or_range: str):
# logger.info(f'select_with_kwargs called with browser_session={browser_session}, cell_or_range={cell_or_range}')
# Get params and extra_args, like in Registry.execute_action
params = {'cell_or_range': cell_or_range, 'browser_session': browser_session}
extra_args = {'browser_session': browser_session}
# Try to call _select_cell_or_range with both params and extra_args
# This will fail with "got multiple values for keyword argument 'browser_session'"
try:
# logger.info('Attempting to call with both params and extra_args (should fail):')
await _select_cell_or_range(**params, **extra_args)
except TypeError as e:
# logger.info(f'Expected error: {e}')
# Remove browser_session from params to avoid the conflict
params_fixed = dict(params)
del params_fixed['browser_session']
# logger.info(f'Fixed params: {params_fixed}')
# This should work
result = await _select_cell_or_range(**params_fixed, **extra_args)
# logger.info(f'Success after fix: {result}')
return result
# Test the original problematic version
# logger.info('\n--- Testing original problematic version ---')
try:
result1 = await registry.execute_action(
'select_cell_or_range',
{'cell_or_range': 'A1:F100'},
browser_session=browser_session, # type: ignore
)
# logger.info(f'Success! Result: {result1}')
except Exception as e:
logger.error(f'Error: {str(e)}')
# Test the fixed version (using positional args)
# logger.info('\n--- Testing fixed version (positional args) ---')
try:
result2 = await registry.execute_action(
'select_cell_or_range_fixed',
{'cell_or_range': 'A1:F100'},
browser_session=browser_session, # type: ignore
)
# logger.info(f'Success! Result: {result2}')
except Exception as e:
logger.error(f'Error: {str(e)}')
# Test with kwargs version that simulates what Registry.execute_action does
# logger.info('\n--- Testing kwargs simulation version ---')
try:
result3 = await registry.execute_action(
'select_with_kwargs',
{'cell_or_range': 'A1:F100'},
browser_session=browser_session, # type: ignore
)
# logger.info(f'Success! Result: {result3}')
except Exception as e:
logger.error(f'Error: {str(e)}')
# Manual test of our theory: browser_session is passed twice
# logger.info('\n--- Direct test of our theory ---')
try:
# Create the model instance
params = CellRangeParams(cell_or_range='A1:F100')
# First check if the extra_args approach works
# logger.info('Checking if extra_args approach works:')
extra_args = {'browser_session': browser_session}
# If we were to modify Registry.execute_action:
# 1. Check if the function parameter needs browser_session
parameter_names = ['browser_session', 'cell_or_range']
browser_keys = ['browser_session', 'browser', 'browser_context']
# Create params dict
param_dict = params.model_dump()
# logger.info(f'params dict before: {param_dict}')
# Apply our fix: remove browser_session from params dict
for key in browser_keys:
if key in param_dict and key in extra_args:
# logger.info(f'Removing {key} from params dict')
del param_dict[key]
# logger.info(f'params dict after: {param_dict}')
# logger.info(f'extra_args: {extra_args}')
# This would be the fixed code:
# return await action.function(**param_dict, **extra_args)
# Call directly to test
result3 = await select_cell_or_range(**param_dict, **extra_args)
# logger.info(f'Success with our fix! Result: {result3}')
except Exception as e:
logger.error(f'Error with our manual test: {str(e)}')
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/ci/models/test_llm_openai.py | tests/ci/models/test_llm_openai.py | """Test OpenAI model button click."""
from browser_use.llm.openai.chat import ChatOpenAI
from tests.ci.models.model_test_helper import run_model_button_click_test
async def test_openai_gpt_4_1_mini(httpserver):
"""Test OpenAI gpt-4.1-mini can click a button."""
await run_model_button_click_test(
model_class=ChatOpenAI,
model_name='gpt-4.1-mini',
api_key_env='OPENAI_API_KEY',
extra_kwargs={},
httpserver=httpserver,
)
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/ci/models/test_llm_anthropic.py | tests/ci/models/test_llm_anthropic.py | """Test Anthropic model button click."""
from browser_use.llm.anthropic.chat import ChatAnthropic
from tests.ci.models.model_test_helper import run_model_button_click_test
async def test_anthropic_claude_sonnet_4_0(httpserver):
"""Test Anthropic claude-sonnet-4-0 can click a button."""
await run_model_button_click_test(
model_class=ChatAnthropic,
model_name='claude-sonnet-4-0',
api_key_env='ANTHROPIC_API_KEY',
extra_kwargs={},
httpserver=httpserver,
)
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/ci/models/test_llm_google.py | tests/ci/models/test_llm_google.py | """Test Google model button click."""
from browser_use.llm.google.chat import ChatGoogle
from tests.ci.models.model_test_helper import run_model_button_click_test
async def test_google_gemini_flash_latest(httpserver):
"""Test Google gemini-flash-latest can click a button."""
await run_model_button_click_test(
model_class=ChatGoogle,
model_name='gemini-flash-latest',
api_key_env='GOOGLE_API_KEY',
extra_kwargs={},
httpserver=httpserver,
)
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/ci/models/test_azure_responses_api.py | tests/ci/models/test_azure_responses_api.py | """Tests for Azure OpenAI Responses API support."""
import os
import pytest
from browser_use.llm.azure.chat import RESPONSES_API_ONLY_MODELS, ChatAzureOpenAI
from browser_use.llm.messages import (
AssistantMessage,
ContentPartImageParam,
ContentPartTextParam,
Function,
ImageURL,
SystemMessage,
ToolCall,
UserMessage,
)
from browser_use.llm.openai.responses_serializer import ResponsesAPIMessageSerializer
class TestResponsesAPIMessageSerializer:
"""Tests for the ResponsesAPIMessageSerializer class."""
def test_serialize_user_message_string_content(self):
"""Test serializing a user message with string content."""
message = UserMessage(content='Hello, world!')
result = ResponsesAPIMessageSerializer.serialize(message)
assert result['role'] == 'user'
assert result['content'] == 'Hello, world!'
def test_serialize_user_message_text_parts(self):
"""Test serializing a user message with text content parts."""
message = UserMessage(
content=[
ContentPartTextParam(type='text', text='First part'),
ContentPartTextParam(type='text', text='Second part'),
]
)
result = ResponsesAPIMessageSerializer.serialize(message)
assert result['role'] == 'user'
assert isinstance(result['content'], list)
assert len(result['content']) == 2
assert result['content'][0]['type'] == 'input_text'
assert result['content'][0]['text'] == 'First part'
assert result['content'][1]['type'] == 'input_text'
assert result['content'][1]['text'] == 'Second part'
def test_serialize_user_message_with_image(self):
"""Test serializing a user message with image content."""
message = UserMessage(
content=[
ContentPartTextParam(type='text', text='What is in this image?'),
ContentPartImageParam(
type='image_url',
image_url=ImageURL(url='https://example.com/image.png', detail='auto'),
),
]
)
result = ResponsesAPIMessageSerializer.serialize(message)
assert result['role'] == 'user'
assert isinstance(result['content'], list)
assert len(result['content']) == 2
assert result['content'][0]['type'] == 'input_text'
assert result['content'][1]['type'] == 'input_image'
assert result['content'][1].get('image_url') == 'https://example.com/image.png'
assert result['content'][1].get('detail') == 'auto'
def test_serialize_system_message_string_content(self):
"""Test serializing a system message with string content."""
message = SystemMessage(content='You are a helpful assistant.')
result = ResponsesAPIMessageSerializer.serialize(message)
assert result['role'] == 'system'
assert result['content'] == 'You are a helpful assistant.'
def test_serialize_system_message_text_parts(self):
"""Test serializing a system message with text content parts."""
message = SystemMessage(content=[ContentPartTextParam(type='text', text='System instruction')])
result = ResponsesAPIMessageSerializer.serialize(message)
assert result['role'] == 'system'
assert isinstance(result['content'], list)
assert len(result['content']) == 1
assert result['content'][0]['type'] == 'input_text'
def test_serialize_assistant_message_string_content(self):
"""Test serializing an assistant message with string content."""
message = AssistantMessage(content='Here is my response.')
result = ResponsesAPIMessageSerializer.serialize(message)
assert result['role'] == 'assistant'
assert result['content'] == 'Here is my response.'
def test_serialize_assistant_message_none_content_with_tool_calls(self):
"""Test serializing an assistant message with None content and tool calls."""
message = AssistantMessage(
content=None,
tool_calls=[
ToolCall(
id='call_123',
type='function',
function=Function(name='search', arguments='{"query": "test"}'),
)
],
)
result = ResponsesAPIMessageSerializer.serialize(message)
assert result['role'] == 'assistant'
assert '[Tool call: search({"query": "test"})]' in result['content']
def test_serialize_assistant_message_none_content_no_tool_calls(self):
"""Test serializing an assistant message with None content and no tool calls."""
message = AssistantMessage(content=None)
result = ResponsesAPIMessageSerializer.serialize(message)
assert result['role'] == 'assistant'
assert result['content'] == ''
def test_serialize_messages_list(self):
"""Test serializing a list of messages."""
messages = [
SystemMessage(content='You are helpful.'),
UserMessage(content='Hello!'),
AssistantMessage(content='Hi there!'),
]
results = ResponsesAPIMessageSerializer.serialize_messages(messages)
assert len(results) == 3
assert results[0]['role'] == 'system'
assert results[1]['role'] == 'user'
assert results[2]['role'] == 'assistant'
class TestChatAzureOpenAIShouldUseResponsesAPI:
"""Tests for the _should_use_responses_api method."""
def test_use_responses_api_true(self):
"""Test that use_responses_api=True forces Responses API."""
llm = ChatAzureOpenAI(
model='gpt-4o',
api_key='test',
azure_endpoint='https://test.openai.azure.com',
use_responses_api=True,
)
assert llm._should_use_responses_api() is True
def test_use_responses_api_false(self):
"""Test that use_responses_api=False forces Chat Completions API."""
llm = ChatAzureOpenAI(
model='gpt-5.1-codex-mini', # Even with a Responses-only model
api_key='test',
azure_endpoint='https://test.openai.azure.com',
use_responses_api=False,
)
assert llm._should_use_responses_api() is False
def test_use_responses_api_auto_with_responses_only_model(self):
"""Test that auto mode detects Responses-only models."""
for model_name in RESPONSES_API_ONLY_MODELS:
llm = ChatAzureOpenAI(
model=model_name,
api_key='test',
azure_endpoint='https://test.openai.azure.com',
use_responses_api='auto',
)
assert llm._should_use_responses_api() is True, f'Expected Responses API for {model_name}'
def test_use_responses_api_auto_with_regular_model(self):
"""Test that auto mode uses Chat Completions for regular models."""
regular_models = ['gpt-4o', 'gpt-4.1-mini', 'gpt-3.5-turbo', 'gpt-4']
for model_name in regular_models:
llm = ChatAzureOpenAI(
model=model_name,
api_key='test',
azure_endpoint='https://test.openai.azure.com',
use_responses_api='auto',
)
assert llm._should_use_responses_api() is False, f'Expected Chat Completions for {model_name}'
def test_use_responses_api_auto_is_default(self):
"""Test that 'auto' is the default value for use_responses_api."""
llm = ChatAzureOpenAI(
model='gpt-4o',
api_key='test',
azure_endpoint='https://test.openai.azure.com',
)
assert llm.use_responses_api == 'auto'
def test_responses_api_only_models_list(self):
"""Test that the RESPONSES_API_ONLY_MODELS list contains expected models."""
expected_models = [
'gpt-5.1-codex',
'gpt-5.1-codex-mini',
'gpt-5.1-codex-max',
'gpt-5-codex',
'codex-mini-latest',
'computer-use-preview',
]
for model in expected_models:
assert model in RESPONSES_API_ONLY_MODELS, f'{model} should be in RESPONSES_API_ONLY_MODELS'
class TestChatAzureOpenAIIntegration:
"""Integration tests for Azure OpenAI with Responses API.
These tests require valid Azure OpenAI credentials and are skipped if not available.
"""
@pytest.fixture
def azure_credentials(self):
"""Get Azure OpenAI credentials from environment."""
api_key = os.getenv('AZURE_OPENAI_KEY') or os.getenv('AZURE_OPENAI_API_KEY')
endpoint = os.getenv('AZURE_OPENAI_ENDPOINT')
if not api_key or not endpoint:
pytest.skip('Azure OpenAI credentials not available')
return {'api_key': api_key, 'azure_endpoint': endpoint}
async def test_chat_completions_api_basic_call(self, azure_credentials):
"""Test basic call using Chat Completions API."""
llm = ChatAzureOpenAI(
model='gpt-4.1-mini',
api_key=azure_credentials['api_key'],
azure_endpoint=azure_credentials['azure_endpoint'],
use_responses_api=False, # Force Chat Completions API
)
messages = [
SystemMessage(content='You are a helpful assistant.'),
UserMessage(content='Say "hello" and nothing else.'),
]
result = await llm.ainvoke(messages)
assert result.completion is not None
assert 'hello' in result.completion.lower()
async def test_responses_api_basic_call(self, azure_credentials):
"""Test basic call using Responses API.
This test only runs if the Azure deployment supports the Responses API
(api_version >= 2025-03-01-preview).
"""
llm = ChatAzureOpenAI(
model='gpt-4.1-mini',
api_key=azure_credentials['api_key'],
azure_endpoint=azure_credentials['azure_endpoint'],
api_version='2025-03-01-preview', # Required for Responses API
use_responses_api=True, # Force Responses API
)
messages = [
SystemMessage(content='You are a helpful assistant.'),
UserMessage(content='Say "hello" and nothing else.'),
]
try:
result = await llm.ainvoke(messages)
assert result.completion is not None
assert 'hello' in result.completion.lower()
except Exception as e:
# Skip if Responses API is not supported
if 'Responses API' in str(e) or '404' in str(e):
pytest.skip('Responses API not supported by this Azure deployment')
raise
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/ci/models/test_llm_schema_optimizer.py | tests/ci/models/test_llm_schema_optimizer.py | """
Tests for the SchemaOptimizer to ensure it correctly processes and
optimizes the schemas for agent actions without losing information.
"""
from pydantic import BaseModel
from browser_use.agent.views import AgentOutput
from browser_use.llm.schema import SchemaOptimizer
from browser_use.tools.service import Tools
class ProductInfo(BaseModel):
"""A sample structured output model with multiple fields."""
price: str
title: str
rating: float | None = None
def test_optimizer_preserves_all_fields_in_structured_done_action():
"""
Ensures the SchemaOptimizer does not drop fields from a custom structured
output model when creating the schema for the 'done' action.
This test specifically checks for a bug where fields were being lost
during the optimization process.
"""
# 1. Setup a tools with a custom output model, simulating an Agent
# being created with an `output_model_schema`.
tools = Tools(output_model=ProductInfo)
# 2. Get the dynamically created AgentOutput model, which includes all registered actions.
ActionModel = tools.registry.create_action_model()
agent_output_model = AgentOutput.type_with_custom_actions(ActionModel)
# 3. Run the schema optimizer on the agent's output model.
optimized_schema = SchemaOptimizer.create_optimized_json_schema(agent_output_model)
# 4. Find the 'done' action schema within the optimized output.
# The path is properties -> action -> items -> anyOf -> [schema with 'done'].
done_action_schema = None
actions_schemas = optimized_schema.get('properties', {}).get('action', {}).get('items', {}).get('anyOf', [])
for action_schema in actions_schemas:
if 'done' in action_schema.get('properties', {}):
done_action_schema = action_schema
break
# 5. Assert that the 'done' action schema was successfully found.
assert done_action_schema is not None, "Could not find 'done' action in the optimized schema."
# 6. Navigate to the schema for our custom data model within the 'done' action.
# The path is properties -> done -> properties -> data -> properties.
done_params_schema = done_action_schema.get('properties', {}).get('done', {})
structured_data_schema = done_params_schema.get('properties', {}).get('data', {})
final_properties = structured_data_schema.get('properties', {})
# 7. Assert that the set of fields in the optimized schema matches the original model's fields.
original_fields = set(ProductInfo.model_fields.keys())
optimized_fields = set(final_properties.keys())
assert original_fields == optimized_fields, (
f"Field mismatch between original and optimized structured 'done' action schema.\n"
f'Missing from optimized: {original_fields - optimized_fields}\n'
f'Unexpected in optimized: {optimized_fields - original_fields}'
)
def test_gemini_schema_retains_required_fields():
"""Gemini schema should keep explicit required arrays for mandatory fields."""
schema = SchemaOptimizer.create_gemini_optimized_schema(ProductInfo)
assert 'required' in schema, 'Gemini schema removed required fields.'
required_fields = set(schema['required'])
assert {'price', 'title'}.issubset(required_fields), 'Mandatory fields must stay required for Gemini.'
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/ci/models/test_llm_azure.py | tests/ci/models/test_llm_azure.py | """Test Azure OpenAI model button click."""
from browser_use.llm.azure.chat import ChatAzureOpenAI
from tests.ci.models.model_test_helper import run_model_button_click_test
async def test_azure_gpt_4_1_mini(httpserver):
"""Test Azure OpenAI gpt-4.1-mini can click a button."""
await run_model_button_click_test(
model_class=ChatAzureOpenAI,
model_name='gpt-4.1-mini',
api_key_env='AZURE_OPENAI_KEY',
extra_kwargs={}, # Azure endpoint will be added by helper
httpserver=httpserver,
)
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/ci/models/test_llm_browseruse.py | tests/ci/models/test_llm_browseruse.py | """Test Browser Use model button click."""
from browser_use.llm.browser_use.chat import ChatBrowserUse
from tests.ci.models.model_test_helper import run_model_button_click_test
async def test_browseruse_bu_latest(httpserver):
"""Test Browser Use bu-latest can click a button."""
await run_model_button_click_test(
model_class=ChatBrowserUse,
model_name='bu-latest',
api_key_env='BROWSER_USE_API_KEY',
extra_kwargs={},
httpserver=httpserver,
)
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/ci/models/model_test_helper.py | tests/ci/models/model_test_helper.py | """Shared test helper for LLM model tests."""
import os
import pytest
from browser_use.agent.service import Agent
from browser_use.browser.profile import BrowserProfile
from browser_use.browser.session import BrowserSession
async def run_model_button_click_test(
model_class,
model_name: str,
api_key_env: str | None,
extra_kwargs: dict,
httpserver,
):
"""Test that an LLM model can click a button.
This test verifies:
1. Model can be initialized with API key
2. Agent can navigate and click a button
3. Button click is verified by checking page state change
4. Completes within max 2 steps
"""
# Handle API key validation - skip test if not available
if api_key_env is not None:
api_key = os.getenv(api_key_env)
if not api_key:
pytest.skip(f'{api_key_env} not set - skipping test')
else:
api_key = None
# Handle Azure-specific endpoint validation
from browser_use.llm.azure.chat import ChatAzureOpenAI
if model_class is ChatAzureOpenAI:
azure_endpoint = os.getenv('AZURE_OPENAI_ENDPOINT')
if not azure_endpoint:
pytest.skip('AZURE_OPENAI_ENDPOINT not set - skipping test')
# Add the azure_endpoint to extra_kwargs
extra_kwargs = {**extra_kwargs, 'azure_endpoint': azure_endpoint}
# Create HTML page with a button that changes page content when clicked
html = """
<!DOCTYPE html>
<html>
<head><title>Button Test</title></head>
<body>
<h1>Button Click Test</h1>
<button id="test-button" onclick="document.getElementById('result').innerText='SUCCESS'">
Click Me
</button>
<div id="result">NOT_CLICKED</div>
</body>
</html>
"""
httpserver.expect_request('/').respond_with_data(html, content_type='text/html')
# Create LLM instance with extra kwargs if provided
llm_kwargs = {'model': model_name}
if api_key is not None:
llm_kwargs['api_key'] = api_key
llm_kwargs.update(extra_kwargs)
llm = model_class(**llm_kwargs) # type: ignore[arg-type]
# Create browser session
browser = BrowserSession(
browser_profile=BrowserProfile(
headless=True,
user_data_dir=None, # Use temporary directory
)
)
try:
# Start browser
await browser.start()
# Create agent with button click task (URL in task triggers auto-navigation)
test_url = httpserver.url_for('/')
agent = Agent(
task=f'{test_url} - Click the button',
llm=llm,
browser_session=browser,
max_steps=2, # Max 2 steps as per requirements
)
# Run the agent
result = await agent.run()
# Verify task completed
assert result is not None
assert len(result.history) > 0
# Verify button was clicked by checking page state across any step
button_clicked = False
for step in result.history:
# Check state_message which contains browser state with page text
if step.state_message and 'SUCCESS' in step.state_message:
button_clicked = True
break
# Check if SUCCESS appears in any step (indicating button was clicked)
assert button_clicked, 'Button was not clicked - SUCCESS not found in any page state'
finally:
# Clean up browser session
await browser.kill()
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/ci/security/test_security_flags.py | tests/ci/security/test_security_flags.py | """Test that disable_security flag properly merges --disable-features flags without breaking extensions."""
import tempfile
from browser_use.browser.profile import BrowserProfile
class TestBrowserProfileDisableSecurity:
"""Test disable_security flag behavior."""
def test_disable_security_preserves_extension_features(self):
"""Test that disable_security=True doesn't break extension features by properly merging --disable-features flags."""
# Test with disable_security=False (baseline)
profile_normal = BrowserProfile(disable_security=False, user_data_dir=tempfile.mkdtemp(prefix='test-normal-'))
profile_normal.detect_display_configuration()
args_normal = profile_normal.get_args()
# Test with disable_security=True
profile_security_disabled = BrowserProfile(disable_security=True, user_data_dir=tempfile.mkdtemp(prefix='test-security-'))
profile_security_disabled.detect_display_configuration()
args_security_disabled = profile_security_disabled.get_args()
# Extract disable-features args
def extract_disable_features(args):
for arg in args:
if arg.startswith('--disable-features='):
return set(arg.split('=', 1)[1].split(','))
return set()
features_normal = extract_disable_features(args_normal)
features_security_disabled = extract_disable_features(args_security_disabled)
# Check that extension-related features are preserved
extension_features = {
'ExtensionManifestV2Disabled',
'ExtensionDisableUnsupportedDeveloper',
'ExtensionManifestV2Unsupported',
}
security_features = {'IsolateOrigins', 'site-per-process'}
# Verify that security disabled has both extension and security features
missing_extension_features = extension_features - features_security_disabled
missing_security_features = security_features - features_security_disabled
assert not missing_extension_features, (
f'Missing extension features when disable_security=True: {missing_extension_features}'
)
assert not missing_security_features, f'Missing security features when disable_security=True: {missing_security_features}'
# Verify that security disabled profile has more features than normal (due to added security features)
assert len(features_security_disabled) > len(features_normal), (
'Security disabled profile should have more features than normal profile'
)
# Verify all normal features are preserved in security disabled profile
missing_normal_features = features_normal - features_security_disabled
assert not missing_normal_features, f'Normal features missing from security disabled profile: {missing_normal_features}'
def test_disable_features_flag_deduplication(self):
"""Test that duplicate --disable-features values are properly deduplicated."""
profile = BrowserProfile(
disable_security=True,
user_data_dir=tempfile.mkdtemp(prefix='test-dedup-'),
# Add duplicate features to test deduplication
args=['--disable-features=TestFeature1,TestFeature2', '--disable-features=TestFeature2,TestFeature3'],
)
profile.detect_display_configuration()
args = profile.get_args()
# Extract disable-features args
disable_features_args = [arg for arg in args if arg.startswith('--disable-features=')]
# Should only have one consolidated --disable-features flag
assert len(disable_features_args) == 1, f'Expected 1 disable-features flag, got {len(disable_features_args)}'
features = set(disable_features_args[0].split('=', 1)[1].split(','))
# Should have all test features without duplicates
expected_test_features = {'TestFeature1', 'TestFeature2', 'TestFeature3'}
assert expected_test_features.issubset(features), f'Missing test features: {expected_test_features - features}'
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/ci/security/test_sensitive_data.py | tests/ci/security/test_sensitive_data.py | import pytest
from pydantic import BaseModel, Field
from browser_use.agent.message_manager.service import MessageManager
from browser_use.agent.views import ActionResult, AgentOutput, AgentStepInfo, MessageManagerState
from browser_use.browser.views import BrowserStateSummary
from browser_use.dom.views import SerializedDOMState
from browser_use.filesystem.file_system import FileSystem
from browser_use.llm import SystemMessage, UserMessage
from browser_use.llm.messages import ContentPartTextParam
from browser_use.tools.registry.service import Registry
from browser_use.utils import is_new_tab_page, match_url_with_domain_pattern
class SensitiveParams(BaseModel):
"""Test parameter model for sensitive data testing."""
text: str = Field(description='Text with sensitive data placeholders')
@pytest.fixture
def registry():
return Registry()
@pytest.fixture
def message_manager():
import os
import tempfile
import uuid
base_tmp = tempfile.gettempdir() # e.g., /tmp on Unix
file_system_path = os.path.join(base_tmp, str(uuid.uuid4()))
return MessageManager(
task='Test task',
system_message=SystemMessage(content='System message'),
state=MessageManagerState(),
file_system=FileSystem(file_system_path),
)
def test_replace_sensitive_data_with_missing_keys(registry, caplog):
"""Test that _replace_sensitive_data handles missing keys gracefully"""
# Create a simple Pydantic model with sensitive data placeholders
params = SensitiveParams(text='Please enter <secret>username</secret> and <secret>password</secret>')
# Case 1: All keys present - both placeholders should be replaced
sensitive_data = {'username': 'user123', 'password': 'pass456'}
result = registry._replace_sensitive_data(params, sensitive_data)
assert result.text == 'Please enter user123 and pass456'
assert '<secret>' not in result.text # No secret tags should remain
# Case 2: One key missing - only available key should be replaced
sensitive_data = {'username': 'user123'} # password is missing
result = registry._replace_sensitive_data(params, sensitive_data)
assert result.text == 'Please enter user123 and <secret>password</secret>'
assert 'user123' in result.text
assert '<secret>password</secret>' in result.text # Missing key's tag remains
# Case 3: Multiple keys missing - all tags should be preserved
sensitive_data = {} # both keys missing
result = registry._replace_sensitive_data(params, sensitive_data)
assert result.text == 'Please enter <secret>username</secret> and <secret>password</secret>'
assert '<secret>username</secret>' in result.text
assert '<secret>password</secret>' in result.text
# Case 4: One key empty - empty values are treated as missing
sensitive_data = {'username': 'user123', 'password': ''}
result = registry._replace_sensitive_data(params, sensitive_data)
assert result.text == 'Please enter user123 and <secret>password</secret>'
assert 'user123' in result.text
assert '<secret>password</secret>' in result.text # Empty value's tag remains
def test_simple_domain_specific_sensitive_data(registry, caplog):
"""Test the basic functionality of domain-specific sensitive data replacement"""
# Create a simple Pydantic model with sensitive data placeholders
params = SensitiveParams(text='Please enter <secret>username</secret> and <secret>password</secret>')
# Simple test with directly instantiable values
sensitive_data = {
'example.com': {'username': 'example_user'},
'other_data': 'non_secret_value', # Old format mixed with new
}
# Without a URL, domain-specific secrets should NOT be exposed
result = registry._replace_sensitive_data(params, sensitive_data)
assert result.text == 'Please enter <secret>username</secret> and <secret>password</secret>'
assert '<secret>username</secret>' in result.text # Should NOT be replaced without URL
assert '<secret>password</secret>' in result.text # Password is missing in sensitive_data
assert 'example_user' not in result.text # Domain-specific value should not appear
# Test with a matching URL - domain-specific secrets should be exposed
result = registry._replace_sensitive_data(params, sensitive_data, 'https://example.com/login')
assert result.text == 'Please enter example_user and <secret>password</secret>'
assert 'example_user' in result.text # Should be replaced with matching URL
assert '<secret>password</secret>' in result.text # Password is still missing
assert '<secret>username</secret>' not in result.text # Username tag should be replaced
def test_match_url_with_domain_pattern():
"""Test that the domain pattern matching utility works correctly"""
# Test exact domain matches
assert match_url_with_domain_pattern('https://example.com', 'example.com') is True
assert match_url_with_domain_pattern('http://example.com', 'example.com') is False # Default scheme is now https
assert match_url_with_domain_pattern('https://google.com', 'example.com') is False
# Test subdomain pattern matches
assert match_url_with_domain_pattern('https://sub.example.com', '*.example.com') is True
assert match_url_with_domain_pattern('https://example.com', '*.example.com') is True # Base domain should match too
assert match_url_with_domain_pattern('https://sub.sub.example.com', '*.example.com') is True
assert match_url_with_domain_pattern('https://example.org', '*.example.com') is False
# Test protocol pattern matches
assert match_url_with_domain_pattern('https://example.com', 'http*://example.com') is True
assert match_url_with_domain_pattern('http://example.com', 'http*://example.com') is True
assert match_url_with_domain_pattern('ftp://example.com', 'http*://example.com') is False
# Test explicit http protocol
assert match_url_with_domain_pattern('http://example.com', 'http://example.com') is True
assert match_url_with_domain_pattern('https://example.com', 'http://example.com') is False
# Test Chrome extension pattern
assert match_url_with_domain_pattern('chrome-extension://abcdefghijkl', 'chrome-extension://*') is True
assert match_url_with_domain_pattern('chrome-extension://mnopqrstuvwx', 'chrome-extension://abcdefghijkl') is False
# Test new tab page handling
assert match_url_with_domain_pattern('about:blank', 'example.com') is False
assert match_url_with_domain_pattern('about:blank', '*://*') is False
assert match_url_with_domain_pattern('chrome://new-tab-page/', 'example.com') is False
assert match_url_with_domain_pattern('chrome://new-tab-page/', '*://*') is False
assert match_url_with_domain_pattern('chrome://new-tab-page', 'example.com') is False
assert match_url_with_domain_pattern('chrome://new-tab-page', '*://*') is False
def test_unsafe_domain_patterns():
"""Test that unsafe domain patterns are rejected"""
# These are unsafe patterns that could match too many domains
assert match_url_with_domain_pattern('https://evil.com', '*google.com') is False
assert match_url_with_domain_pattern('https://google.com.evil.com', '*.*.com') is False
assert match_url_with_domain_pattern('https://google.com', '**google.com') is False
assert match_url_with_domain_pattern('https://google.com', 'g*e.com') is False
assert match_url_with_domain_pattern('https://google.com', '*com*') is False
# Test with patterns that have multiple asterisks in different positions
assert match_url_with_domain_pattern('https://subdomain.example.com', '*domain*example*') is False
assert match_url_with_domain_pattern('https://sub.domain.example.com', '*.*.example.com') is False
# Test patterns with wildcards in TLD part
assert match_url_with_domain_pattern('https://example.com', 'example.*') is False
assert match_url_with_domain_pattern('https://example.org', 'example.*') is False
def test_malformed_urls_and_patterns():
"""Test handling of malformed URLs or patterns"""
# Malformed URLs
assert match_url_with_domain_pattern('not-a-url', 'example.com') is False
assert match_url_with_domain_pattern('http://', 'example.com') is False
assert match_url_with_domain_pattern('https://', 'example.com') is False
assert match_url_with_domain_pattern('ftp:/example.com', 'example.com') is False # Missing slash
# Empty URLs or patterns
assert match_url_with_domain_pattern('', 'example.com') is False
assert match_url_with_domain_pattern('https://example.com', '') is False
# URLs with no hostname
assert match_url_with_domain_pattern('file:///path/to/file.txt', 'example.com') is False
# Invalid pattern formats
assert match_url_with_domain_pattern('https://example.com', '..example.com') is False
assert match_url_with_domain_pattern('https://example.com', '.*.example.com') is False
assert match_url_with_domain_pattern('https://example.com', '**') is False
# Nested URL attacks in path, query or fragments
assert match_url_with_domain_pattern('https://example.com/redirect?url=https://evil.com', 'example.com') is True
assert match_url_with_domain_pattern('https://example.com/path/https://evil.com', 'example.com') is True
assert match_url_with_domain_pattern('https://example.com#https://evil.com', 'example.com') is True
# These should match example.com, not evil.com since urlparse extracts the hostname correctly
# Complex URL obfuscation attempts
assert match_url_with_domain_pattern('https://example.com/path?next=//evil.com/attack', 'example.com') is True
assert match_url_with_domain_pattern('https://example.com@evil.com', 'example.com') is False
assert match_url_with_domain_pattern('https://evil.com?example.com', 'example.com') is False
assert match_url_with_domain_pattern('https://user:example.com@evil.com', 'example.com') is False
# urlparse correctly identifies evil.com as the hostname in these cases
def test_url_components():
"""Test handling of URL components like credentials, ports, fragments, etc."""
# URLs with credentials (username:password@)
assert match_url_with_domain_pattern('https://user:pass@example.com', 'example.com') is True
assert match_url_with_domain_pattern('https://user:pass@example.com', '*.example.com') is True
# URLs with ports
assert match_url_with_domain_pattern('https://example.com:8080', 'example.com') is True
assert match_url_with_domain_pattern('https://example.com:8080', 'example.com:8080') is True # Port is stripped from pattern
# URLs with paths
assert match_url_with_domain_pattern('https://example.com/path/to/page', 'example.com') is True
assert (
match_url_with_domain_pattern('https://example.com/path/to/page', 'example.com/path') is False
) # Paths in patterns are not supported
# URLs with query parameters
assert match_url_with_domain_pattern('https://example.com?param=value', 'example.com') is True
# URLs with fragments
assert match_url_with_domain_pattern('https://example.com#section', 'example.com') is True
# URLs with all components
assert match_url_with_domain_pattern('https://user:pass@example.com:8080/path?query=val#fragment', 'example.com') is True
def test_filter_sensitive_data(message_manager):
"""Test that _filter_sensitive_data handles all sensitive data scenarios correctly"""
# Set up a message with sensitive information
message = UserMessage(content='My username is admin and password is secret123')
# Case 1: No sensitive data provided
message_manager.sensitive_data = None
result = message_manager._filter_sensitive_data(message)
assert result.content == 'My username is admin and password is secret123'
# Case 2: All sensitive data is properly replaced
message_manager.sensitive_data = {'username': 'admin', 'password': 'secret123'}
result = message_manager._filter_sensitive_data(message)
assert '<secret>username</secret>' in result.content
assert '<secret>password</secret>' in result.content
# Case 3: Make sure it works with nested content
nested_message = UserMessage(content=[ContentPartTextParam(text='My username is admin and password is secret123')])
result = message_manager._filter_sensitive_data(nested_message)
assert '<secret>username</secret>' in result.content[0].text
assert '<secret>password</secret>' in result.content[0].text
# Case 4: Test with empty values
message_manager.sensitive_data = {'username': 'admin', 'password': ''}
result = message_manager._filter_sensitive_data(message)
assert '<secret>username</secret>' in result.content
# Only username should be replaced since password is empty
# Case 5: Test with domain-specific sensitive data format
message_manager.sensitive_data = {
'example.com': {'username': 'admin', 'password': 'secret123'},
'google.com': {'email': 'user@example.com', 'password': 'google_pass'},
}
# Update the message to include the values we're going to test
message = UserMessage(content='My username is admin, email is user@example.com and password is secret123 or google_pass')
result = message_manager._filter_sensitive_data(message)
# All sensitive values should be replaced regardless of domain
assert '<secret>username</secret>' in result.content
assert '<secret>password</secret>' in result.content
assert '<secret>email</secret>' in result.content
def test_is_new_tab_page():
"""Test is_new_tab_page function"""
# Test about:blank
assert is_new_tab_page('about:blank') is True
# Test chrome://new-tab-page variations
assert is_new_tab_page('chrome://new-tab-page/') is True
assert is_new_tab_page('chrome://new-tab-page') is True
# Test regular URLs
assert is_new_tab_page('https://example.com') is False
assert is_new_tab_page('http://google.com') is False
assert is_new_tab_page('') is False
assert is_new_tab_page('chrome://settings') is False
def test_sensitive_data_filtered_from_action_results():
"""
Test that sensitive data in action results is filtered before being sent to the LLM.
This tests the full flow:
1. Agent outputs actions with <secret>password</secret> placeholder
2. Placeholder gets replaced with real value 'secret_pass123' during action execution
3. Action result contains: "Typed 'secret_pass123' into password field"
4. When state messages are created, the real value should be replaced back to placeholder
5. The LLM should never see the real password value
"""
import os
import tempfile
import uuid
base_tmp = tempfile.gettempdir()
file_system_path = os.path.join(base_tmp, str(uuid.uuid4()))
sensitive_data: dict[str, str | dict[str, str]] = {'username': 'admin_user', 'password': 'secret_pass123'}
message_manager = MessageManager(
task='Login to the website',
system_message=SystemMessage(content='You are a browser automation agent'),
state=MessageManagerState(),
file_system=FileSystem(file_system_path),
sensitive_data=sensitive_data,
)
# Create browser state
dom_state = SerializedDOMState(_root=None, selector_map={})
browser_state = BrowserStateSummary(
dom_state=dom_state,
url='https://example.com/login',
title='Login Page',
tabs=[],
)
# Simulate action result containing sensitive data after placeholder replacement
# This represents what happens after typing a password into a form field
action_results = [
ActionResult(
long_term_memory="Successfully typed 'secret_pass123' into the password field",
error=None,
)
]
# Create model output for step 1
model_output = AgentOutput(
evaluation_previous_goal='Navigated to login page',
memory='On login page, need to enter credentials',
next_goal='Submit login form',
action=[],
)
step_info = AgentStepInfo(step_number=1, max_steps=10)
# Create state messages - this should filter sensitive data
message_manager.create_state_messages(
browser_state_summary=browser_state,
model_output=model_output,
result=action_results,
step_info=step_info,
use_vision=False,
)
# Get messages that would be sent to LLM
messages = message_manager.get_messages()
# Extract all text content from messages
all_text = []
for msg in messages:
if isinstance(msg.content, str):
all_text.append(msg.content)
elif isinstance(msg.content, list):
for part in msg.content:
if isinstance(part, ContentPartTextParam):
all_text.append(part.text)
combined_text = '\n'.join(all_text)
# Verify the bug is fixed: plaintext password should NOT appear in messages
assert 'secret_pass123' not in combined_text, (
'Sensitive data leaked! Real password value found in LLM messages. '
'The _filter_sensitive_data method should replace it with <secret>password</secret>'
)
# Verify the filtered placeholder IS present (proves filtering happened)
assert '<secret>password</secret>' in combined_text, (
'Filtering did not work correctly. Expected <secret>password</secret> placeholder in messages.'
)
def test_sensitive_data_filtered_with_domain_specific_format():
"""Test that domain-specific sensitive data format is also filtered from action results."""
import os
import tempfile
import uuid
base_tmp = tempfile.gettempdir()
file_system_path = os.path.join(base_tmp, str(uuid.uuid4()))
# Use domain-specific format
sensitive_data: dict[str, str | dict[str, str]] = {
'example.com': {'api_key': 'sk-secret-api-key-12345'},
}
message_manager = MessageManager(
task='Use the API',
system_message=SystemMessage(content='You are a browser automation agent'),
state=MessageManagerState(),
file_system=FileSystem(file_system_path),
sensitive_data=sensitive_data,
)
dom_state = SerializedDOMState(_root=None, selector_map={})
browser_state = BrowserStateSummary(
dom_state=dom_state,
url='https://example.com/api',
title='API Page',
tabs=[],
)
# Action result with API key that should be filtered
action_results = [
ActionResult(
long_term_memory="Set API key to 'sk-secret-api-key-12345' in the input field",
error=None,
)
]
model_output = AgentOutput(
evaluation_previous_goal='Opened API settings',
memory='Need to configure API key',
next_goal='Save settings',
action=[],
)
step_info = AgentStepInfo(step_number=1, max_steps=10)
message_manager.create_state_messages(
browser_state_summary=browser_state,
model_output=model_output,
result=action_results,
step_info=step_info,
use_vision=False,
)
messages = message_manager.get_messages()
all_text = []
for msg in messages:
if isinstance(msg.content, str):
all_text.append(msg.content)
elif isinstance(msg.content, list):
for part in msg.content:
if isinstance(part, ContentPartTextParam):
all_text.append(part.text)
combined_text = '\n'.join(all_text)
# API key should be filtered out
assert 'sk-secret-api-key-12345' not in combined_text, 'API key leaked into LLM messages!'
assert '<secret>api_key</secret>' in combined_text, 'API key placeholder not found in messages'
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/ci/security/test_domain_filtering.py | tests/ci/security/test_domain_filtering.py | from browser_use.browser import BrowserProfile, BrowserSession
class TestUrlAllowlistSecurity:
"""Tests for URL allowlist security bypass prevention and URL allowlist glob pattern matching."""
def test_authentication_bypass_prevention(self):
"""Test that the URL allowlist cannot be bypassed using authentication credentials."""
from bubus import EventBus
from browser_use.browser.watchdogs.security_watchdog import SecurityWatchdog
# Create a context config with a sample allowed domain
browser_profile = BrowserProfile(allowed_domains=['example.com'], headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Security vulnerability test cases
# These should all be detected as malicious despite containing "example.com"
assert watchdog._is_url_allowed('https://example.com:password@malicious.com') is False
assert watchdog._is_url_allowed('https://example.com@malicious.com') is False
assert watchdog._is_url_allowed('https://example.com%20@malicious.com') is False
assert watchdog._is_url_allowed('https://example.com%3A@malicious.com') is False
# Make sure legitimate auth credentials still work
assert watchdog._is_url_allowed('https://user:password@example.com') is True
def test_glob_pattern_matching(self):
"""Test that glob patterns in allowed_domains work correctly."""
from bubus import EventBus
from browser_use.browser.watchdogs.security_watchdog import SecurityWatchdog
# Test *.example.com pattern (should match subdomains and main domain)
browser_profile = BrowserProfile(allowed_domains=['*.example.com'], headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Should match subdomains
assert watchdog._is_url_allowed('https://sub.example.com') is True
assert watchdog._is_url_allowed('https://deep.sub.example.com') is True
# Should also match main domain
assert watchdog._is_url_allowed('https://example.com') is True
# Should not match other domains
assert watchdog._is_url_allowed('https://notexample.com') is False
assert watchdog._is_url_allowed('https://example.org') is False
# Test more complex glob patterns
browser_profile = BrowserProfile(
allowed_domains=[
'*.google.com',
'https://wiki.org',
'https://good.com',
'https://*.test.com',
'chrome://version',
'brave://*',
],
headless=True,
user_data_dir=None,
)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Should match domains ending with google.com
assert watchdog._is_url_allowed('https://google.com') is True
assert watchdog._is_url_allowed('https://www.google.com') is True
assert (
watchdog._is_url_allowed('https://evilgood.com') is False
) # make sure we dont allow *good.com patterns, only *.good.com
# Should match domains starting with wiki
assert watchdog._is_url_allowed('http://wiki.org') is False
assert watchdog._is_url_allowed('https://wiki.org') is True
# Should not match internal domains because scheme was not provided
assert watchdog._is_url_allowed('chrome://google.com') is False
assert watchdog._is_url_allowed('chrome://abc.google.com') is False
# Test browser internal URLs
assert watchdog._is_url_allowed('chrome://settings') is False
assert watchdog._is_url_allowed('chrome://version') is True
assert watchdog._is_url_allowed('chrome-extension://version/') is False
assert watchdog._is_url_allowed('brave://anything/') is True
assert watchdog._is_url_allowed('about:blank') is True
assert watchdog._is_url_allowed('chrome://new-tab-page/') is True
assert watchdog._is_url_allowed('chrome://new-tab-page') is True
# Test security for glob patterns (authentication credentials bypass attempts)
# These should all be detected as malicious despite containing allowed domain patterns
assert watchdog._is_url_allowed('https://allowed.example.com:password@notallowed.com') is False
assert watchdog._is_url_allowed('https://subdomain.example.com@evil.com') is False
assert watchdog._is_url_allowed('https://sub.example.com%20@malicious.org') is False
assert watchdog._is_url_allowed('https://anygoogle.com@evil.org') is False
# Test pattern matching
assert watchdog._is_url_allowed('https://www.test.com') is True
assert watchdog._is_url_allowed('https://www.testx.com') is False
def test_glob_pattern_edge_cases(self):
"""Test edge cases for glob pattern matching to ensure proper behavior."""
from bubus import EventBus
from browser_use.browser.watchdogs.security_watchdog import SecurityWatchdog
# Test with domains containing glob pattern in the middle
browser_profile = BrowserProfile(allowed_domains=['*.google.com', 'https://wiki.org'], headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Verify that 'wiki*' pattern doesn't match domains that merely contain 'wiki' in the middle
assert watchdog._is_url_allowed('https://notawiki.com') is False
assert watchdog._is_url_allowed('https://havewikipages.org') is False
assert watchdog._is_url_allowed('https://my-wiki-site.com') is False
# Verify that '*google.com' doesn't match domains that have 'google' in the middle
assert watchdog._is_url_allowed('https://mygoogle.company.com') is False
# Create context with potentially risky glob pattern that demonstrates security concerns
browser_profile = BrowserProfile(allowed_domains=['*.google.com', '*.google.co.uk'], headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Should match legitimate Google domains
assert watchdog._is_url_allowed('https://www.google.com') is True
assert watchdog._is_url_allowed('https://mail.google.co.uk') is True
# Shouldn't match potentially malicious domains with a similar structure
# This demonstrates why the previous pattern was risky and why it's now rejected
assert watchdog._is_url_allowed('https://www.google.evil.com') is False
def test_automatic_www_subdomain_addition(self):
"""Test that root domains automatically allow www subdomain."""
from bubus import EventBus
from browser_use.browser.watchdogs.security_watchdog import SecurityWatchdog
# Test with simple root domains
browser_profile = BrowserProfile(allowed_domains=['example.com', 'test.org'], headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Root domain should allow itself
assert watchdog._is_url_allowed('https://example.com') is True
assert watchdog._is_url_allowed('https://test.org') is True
# Root domain should automatically allow www subdomain
assert watchdog._is_url_allowed('https://www.example.com') is True
assert watchdog._is_url_allowed('https://www.test.org') is True
# Should not allow other subdomains
assert watchdog._is_url_allowed('https://mail.example.com') is False
assert watchdog._is_url_allowed('https://sub.test.org') is False
# Should not allow unrelated domains
assert watchdog._is_url_allowed('https://notexample.com') is False
assert watchdog._is_url_allowed('https://www.notexample.com') is False
def test_www_subdomain_not_added_for_country_tlds(self):
"""Test www subdomain is NOT automatically added for country-specific TLDs (2+ dots)."""
from bubus import EventBus
from browser_use.browser.watchdogs.security_watchdog import SecurityWatchdog
# Test with country-specific TLDs - these should NOT get automatic www
browser_profile = BrowserProfile(
allowed_domains=['example.co.uk', 'test.com.au', 'site.co.jp'], headless=True, user_data_dir=None
)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Root domains should work exactly as specified
assert watchdog._is_url_allowed('https://example.co.uk') is True
assert watchdog._is_url_allowed('https://test.com.au') is True
assert watchdog._is_url_allowed('https://site.co.jp') is True
# www subdomains should NOT work automatically (user must specify explicitly)
assert watchdog._is_url_allowed('https://www.example.co.uk') is False
assert watchdog._is_url_allowed('https://www.test.com.au') is False
assert watchdog._is_url_allowed('https://www.site.co.jp') is False
# Other subdomains should not work
assert watchdog._is_url_allowed('https://mail.example.co.uk') is False
assert watchdog._is_url_allowed('https://api.test.com.au') is False
def test_www_subdomain_not_added_for_existing_subdomains(self):
"""Test that www is not automatically added for domains that already have subdomains."""
from bubus import EventBus
from browser_use.browser.watchdogs.security_watchdog import SecurityWatchdog
# Test with existing subdomains - should NOT get automatic www
browser_profile = BrowserProfile(allowed_domains=['mail.example.com', 'api.test.org'], headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Exact subdomain should work
assert watchdog._is_url_allowed('https://mail.example.com') is True
assert watchdog._is_url_allowed('https://api.test.org') is True
# www should NOT be automatically added to subdomains
assert watchdog._is_url_allowed('https://www.mail.example.com') is False
assert watchdog._is_url_allowed('https://www.api.test.org') is False
# Root domains should not work either
assert watchdog._is_url_allowed('https://example.com') is False
assert watchdog._is_url_allowed('https://test.org') is False
def test_www_subdomain_not_added_for_wildcard_patterns(self):
"""Test that www is not automatically added for wildcard patterns."""
from bubus import EventBus
from browser_use.browser.watchdogs.security_watchdog import SecurityWatchdog
# Test with wildcard patterns - should NOT get automatic www logic
browser_profile = BrowserProfile(allowed_domains=['*.example.com'], headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Wildcard should match everything including root and www
assert watchdog._is_url_allowed('https://example.com') is True
assert watchdog._is_url_allowed('https://www.example.com') is True
assert watchdog._is_url_allowed('https://mail.example.com') is True
def test_www_subdomain_not_added_for_url_patterns(self):
"""Test that www is not automatically added for full URL patterns."""
from bubus import EventBus
from browser_use.browser.watchdogs.security_watchdog import SecurityWatchdog
# Test with full URL patterns - should NOT get automatic www logic
browser_profile = BrowserProfile(
allowed_domains=['https://example.com', 'http://test.org'], headless=True, user_data_dir=None
)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Exact URL should work
assert watchdog._is_url_allowed('https://example.com/path') is True
assert watchdog._is_url_allowed('http://test.org/page') is True
# www should NOT be automatically added for full URL patterns
assert watchdog._is_url_allowed('https://www.example.com') is False
assert watchdog._is_url_allowed('http://www.test.org') is False
def test_is_root_domain_helper(self):
"""Test the _is_root_domain helper method logic."""
from bubus import EventBus
from browser_use.browser.watchdogs.security_watchdog import SecurityWatchdog
browser_profile = BrowserProfile(allowed_domains=['example.com'], headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Simple root domains (1 dot) - should return True
assert watchdog._is_root_domain('example.com') is True
assert watchdog._is_root_domain('test.org') is True
assert watchdog._is_root_domain('site.net') is True
# Subdomains (more than 1 dot) - should return False
assert watchdog._is_root_domain('www.example.com') is False
assert watchdog._is_root_domain('mail.example.com') is False
assert watchdog._is_root_domain('example.co.uk') is False
assert watchdog._is_root_domain('test.com.au') is False
# Wildcards - should return False
assert watchdog._is_root_domain('*.example.com') is False
assert watchdog._is_root_domain('*example.com') is False
# Full URLs - should return False
assert watchdog._is_root_domain('https://example.com') is False
assert watchdog._is_root_domain('http://test.org') is False
# Invalid domains - should return False
assert watchdog._is_root_domain('example') is False
assert watchdog._is_root_domain('') is False
class TestUrlProhibitlistSecurity:
"""Tests for URL prohibitlist (blocked domains) behavior and matching semantics."""
def test_simple_prohibited_domains(self):
"""Domain-only patterns block exact host and www, but not other subdomains."""
from bubus import EventBus
from browser_use.browser.watchdogs.security_watchdog import SecurityWatchdog
browser_profile = BrowserProfile(prohibited_domains=['example.com', 'test.org'], headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Block exact and www
assert watchdog._is_url_allowed('https://example.com') is False
assert watchdog._is_url_allowed('https://www.example.com') is False
assert watchdog._is_url_allowed('https://test.org') is False
assert watchdog._is_url_allowed('https://www.test.org') is False
# Allow other subdomains when only root is prohibited
assert watchdog._is_url_allowed('https://mail.example.com') is True
assert watchdog._is_url_allowed('https://api.test.org') is True
# Allow unrelated domains
assert watchdog._is_url_allowed('https://notexample.com') is True
def test_glob_pattern_prohibited(self):
"""Wildcard patterns block subdomains and main domain for http/https only."""
from bubus import EventBus
from browser_use.browser.watchdogs.security_watchdog import SecurityWatchdog
browser_profile = BrowserProfile(prohibited_domains=['*.example.com'], headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Block subdomains and main domain
assert watchdog._is_url_allowed('https://example.com') is False
assert watchdog._is_url_allowed('https://www.example.com') is False
assert watchdog._is_url_allowed('https://mail.example.com') is False
# Allow other domains
assert watchdog._is_url_allowed('https://notexample.com') is True
# Wildcard with domain-only should not apply to non-http(s)
assert watchdog._is_url_allowed('chrome://abc.example.com') is True
def test_full_url_prohibited_patterns(self):
"""Full URL patterns block only matching scheme/host/prefix."""
from bubus import EventBus
from browser_use.browser.watchdogs.security_watchdog import SecurityWatchdog
browser_profile = BrowserProfile(prohibited_domains=['https://wiki.org', 'brave://*'], headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Scheme-specific blocking
assert watchdog._is_url_allowed('http://wiki.org') is True
assert watchdog._is_url_allowed('https://wiki.org') is False
assert watchdog._is_url_allowed('https://wiki.org/path') is False
# Internal URL prefix blocking
assert watchdog._is_url_allowed('brave://anything/') is False
assert watchdog._is_url_allowed('chrome://settings') is True
def test_internal_urls_allowed_even_when_prohibited(self):
"""Internal new-tab/blank URLs are always allowed regardless of prohibited list."""
from bubus import EventBus
from browser_use.browser.watchdogs.security_watchdog import SecurityWatchdog
browser_profile = BrowserProfile(prohibited_domains=['*'], headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
assert watchdog._is_url_allowed('about:blank') is True
assert watchdog._is_url_allowed('chrome://new-tab-page/') is True
assert watchdog._is_url_allowed('chrome://new-tab-page') is True
assert watchdog._is_url_allowed('chrome://newtab/') is True
def test_prohibited_ignored_when_allowlist_present(self):
"""When allowlist is set, prohibited list is ignored by design."""
from bubus import EventBus
from browser_use.browser.watchdogs.security_watchdog import SecurityWatchdog
browser_profile = BrowserProfile(
allowed_domains=['*.example.com'],
prohibited_domains=['https://example.com'],
headless=True,
user_data_dir=None,
)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Allowed by allowlist even though exact URL is in prohibited list
assert watchdog._is_url_allowed('https://example.com') is True
assert watchdog._is_url_allowed('https://www.example.com') is True
# Not in allowlist => blocked (prohibited list is not consulted in this mode)
assert watchdog._is_url_allowed('https://api.example.com') is True # wildcard allowlist includes this
# A domain outside the allowlist should be blocked
assert watchdog._is_url_allowed('https://notexample.com') is False
def test_auth_credentials_do_not_cause_false_block(self):
"""Credentials injection with prohibited domain in username should not block unrelated hosts."""
from bubus import EventBus
from browser_use.browser.watchdogs.security_watchdog import SecurityWatchdog
browser_profile = BrowserProfile(prohibited_domains=['example.com'], headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Host is malicious.com, should not be blocked just because username contains example.com
assert watchdog._is_url_allowed('https://example.com:password@malicious.com') is True
assert watchdog._is_url_allowed('https://example.com@malicious.com') is True
assert watchdog._is_url_allowed('https://example.com%20@malicious.com') is True
assert watchdog._is_url_allowed('https://example.com%3A@malicious.com') is True
# Legitimate credentials to a prohibited host should be blocked
assert watchdog._is_url_allowed('https://user:password@example.com') is False
def test_case_insensitive_prohibited_domains(self):
"""Prohibited domain matching should be case-insensitive."""
from bubus import EventBus
from browser_use.browser.watchdogs.security_watchdog import SecurityWatchdog
browser_profile = BrowserProfile(prohibited_domains=['Example.COM'], headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
assert watchdog._is_url_allowed('https://example.com') is False
assert watchdog._is_url_allowed('https://WWW.EXAMPLE.COM') is False
assert watchdog._is_url_allowed('https://mail.example.com') is True
class TestDomainListOptimization:
"""Tests for domain list optimization (set conversion for large lists)."""
def test_small_list_keeps_pattern_support(self):
"""Test that lists < 100 items keep pattern matching support."""
from bubus import EventBus
from browser_use.browser.watchdogs.security_watchdog import SecurityWatchdog
browser_profile = BrowserProfile(
prohibited_domains=['*.google.com', 'x.com', 'facebook.com'], headless=True, user_data_dir=None
)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Should still be a list
assert isinstance(browser_session.browser_profile.prohibited_domains, list)
# Pattern matching should work
assert watchdog._is_url_allowed('https://www.google.com') is False
assert watchdog._is_url_allowed('https://mail.google.com') is False
assert watchdog._is_url_allowed('https://google.com') is False
# Exact matches should work
assert watchdog._is_url_allowed('https://x.com') is False
assert watchdog._is_url_allowed('https://facebook.com') is False
# Other domains should be allowed
assert watchdog._is_url_allowed('https://example.com') is True
def test_large_list_converts_to_set(self):
"""Test that lists >= 100 items are converted to sets."""
from bubus import EventBus
from browser_use.browser.watchdogs.security_watchdog import SecurityWatchdog
# Create a list of 100 domains
large_list = [f'blocked{i}.com' for i in range(100)]
browser_profile = BrowserProfile(prohibited_domains=large_list, headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Should be converted to set
assert isinstance(browser_session.browser_profile.prohibited_domains, set)
assert len(browser_session.browser_profile.prohibited_domains) == 100
# Exact matches should work
assert watchdog._is_url_allowed('https://blocked0.com') is False
assert watchdog._is_url_allowed('https://blocked50.com') is False
assert watchdog._is_url_allowed('https://blocked99.com') is False
# Other domains should be allowed
assert watchdog._is_url_allowed('https://example.com') is True
assert watchdog._is_url_allowed('https://blocked100.com') is True # Not in list
def test_www_variant_matching_with_sets(self):
"""Test that www variants are checked in set-based lookups."""
from bubus import EventBus
from browser_use.browser.watchdogs.security_watchdog import SecurityWatchdog
# Create a list with 100 domains (some with www, some without)
large_list = [f'site{i}.com' for i in range(50)] + [f'www.domain{i}.org' for i in range(50)]
browser_profile = BrowserProfile(prohibited_domains=large_list, headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Should be converted to set
assert isinstance(browser_session.browser_profile.prohibited_domains, set)
# Test www variant matching for domains without www prefix
assert watchdog._is_url_allowed('https://site0.com') is False
assert watchdog._is_url_allowed('https://www.site0.com') is False # Should also be blocked
# Test www variant matching for domains with www prefix
assert watchdog._is_url_allowed('https://www.domain0.org') is False
assert watchdog._is_url_allowed('https://domain0.org') is False # Should also be blocked
# Test that unrelated domains are allowed
assert watchdog._is_url_allowed('https://example.com') is True
assert watchdog._is_url_allowed('https://www.example.com') is True
def test_allowed_domains_with_sets(self):
"""Test that allowed_domains also works with set optimization."""
from bubus import EventBus
from browser_use.browser.watchdogs.security_watchdog import SecurityWatchdog
# Create a large allowlist
large_list = [f'allowed{i}.com' for i in range(100)]
browser_profile = BrowserProfile(allowed_domains=large_list, headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Should be converted to set
assert isinstance(browser_session.browser_profile.allowed_domains, set)
# Allowed domains should work
assert watchdog._is_url_allowed('https://allowed0.com') is True
assert watchdog._is_url_allowed('https://www.allowed0.com') is True
assert watchdog._is_url_allowed('https://allowed99.com') is True
# Other domains should be blocked
assert watchdog._is_url_allowed('https://example.com') is False
assert watchdog._is_url_allowed('https://notallowed.com') is False
def test_manual_set_input(self):
"""Test that users can directly provide a set."""
from bubus import EventBus
from browser_use.browser.watchdogs.security_watchdog import SecurityWatchdog
blocked_set = {f'blocked{i}.com' for i in range(50)}
browser_profile = BrowserProfile(prohibited_domains=blocked_set, headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Should remain a set
assert isinstance(browser_session.browser_profile.prohibited_domains, set)
# Should work correctly
assert watchdog._is_url_allowed('https://blocked0.com') is False
assert watchdog._is_url_allowed('https://example.com') is True
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/ci/security/test_ip_blocking.py | tests/ci/security/test_ip_blocking.py | """
Comprehensive tests for IP address blocking in SecurityWatchdog.
Tests cover IPv4, IPv6, localhost, private networks, edge cases, and interactions
with allowed_domains and prohibited_domains configurations.
"""
from bubus import EventBus
from browser_use.browser import BrowserProfile, BrowserSession
from browser_use.browser.watchdogs.security_watchdog import SecurityWatchdog
class TestIPv4Blocking:
"""Test blocking of IPv4 addresses."""
def test_block_public_ipv4_addresses(self):
"""Test that public IPv4 addresses are blocked when block_ip_addresses=True."""
browser_profile = BrowserProfile(block_ip_addresses=True, headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Public IPv4 addresses should be blocked
assert watchdog._is_url_allowed('http://180.1.1.1/supersafe.txt') is False
assert watchdog._is_url_allowed('https://8.8.8.8/') is False
assert watchdog._is_url_allowed('http://1.1.1.1:8080/api') is False
assert watchdog._is_url_allowed('https://142.250.185.46/search') is False
assert watchdog._is_url_allowed('http://93.184.216.34/') is False
def test_block_private_ipv4_networks(self):
"""Test that private network IPv4 addresses are blocked."""
browser_profile = BrowserProfile(block_ip_addresses=True, headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Private network ranges (RFC 1918)
assert watchdog._is_url_allowed('http://192.168.1.1/') is False
assert watchdog._is_url_allowed('http://192.168.0.100/admin') is False
assert watchdog._is_url_allowed('http://10.0.0.1/') is False
assert watchdog._is_url_allowed('http://10.255.255.255/') is False
assert watchdog._is_url_allowed('http://172.16.0.1/') is False
assert watchdog._is_url_allowed('http://172.31.255.254/') is False
def test_block_localhost_ipv4(self):
"""Test that localhost IPv4 addresses are blocked."""
browser_profile = BrowserProfile(block_ip_addresses=True, headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Localhost/loopback addresses
assert watchdog._is_url_allowed('http://127.0.0.1/') is False
assert watchdog._is_url_allowed('http://127.0.0.1:8080/') is False
assert watchdog._is_url_allowed('https://127.0.0.1:3000/api/test') is False
assert watchdog._is_url_allowed('http://127.1.2.3/') is False # Any 127.x.x.x
def test_block_ipv4_with_ports_and_paths(self):
"""Test that IPv4 addresses with ports and paths are blocked."""
browser_profile = BrowserProfile(block_ip_addresses=True, headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# With various ports
assert watchdog._is_url_allowed('http://8.8.8.8:80/') is False
assert watchdog._is_url_allowed('https://8.8.8.8:443/') is False
assert watchdog._is_url_allowed('http://192.168.1.1:8080/') is False
assert watchdog._is_url_allowed('http://10.0.0.1:3000/api') is False
# With paths and query strings
assert watchdog._is_url_allowed('http://1.2.3.4/path/to/resource') is False
assert watchdog._is_url_allowed('http://5.6.7.8/api?key=value') is False
assert watchdog._is_url_allowed('https://9.10.11.12/path/to/file.html#anchor') is False
def test_allow_ipv4_when_blocking_disabled(self):
"""Test that IPv4 addresses are allowed when block_ip_addresses=False (default)."""
browser_profile = BrowserProfile(block_ip_addresses=False, headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# All IP addresses should be allowed when blocking is disabled
assert watchdog._is_url_allowed('http://180.1.1.1/supersafe.txt') is True
assert watchdog._is_url_allowed('http://192.168.1.1/') is True
assert watchdog._is_url_allowed('http://127.0.0.1:8080/') is True
assert watchdog._is_url_allowed('http://8.8.8.8/') is True
class TestIPv6Blocking:
"""Test blocking of IPv6 addresses."""
def test_block_ipv6_addresses(self):
"""Test that IPv6 addresses are blocked when block_ip_addresses=True."""
browser_profile = BrowserProfile(block_ip_addresses=True, headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Public IPv6 addresses (with brackets as per URL standard)
assert watchdog._is_url_allowed('http://[2001:db8::1]/') is False
assert watchdog._is_url_allowed('https://[2001:4860:4860::8888]/') is False
assert watchdog._is_url_allowed('http://[2606:4700:4700::1111]/path') is False
assert watchdog._is_url_allowed('https://[2001:db8:85a3::8a2e:370:7334]/api') is False
def test_block_ipv6_localhost(self):
"""Test that IPv6 localhost addresses are blocked."""
browser_profile = BrowserProfile(block_ip_addresses=True, headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# IPv6 loopback
assert watchdog._is_url_allowed('http://[::1]/') is False
assert watchdog._is_url_allowed('http://[::1]:8080/') is False
assert watchdog._is_url_allowed('https://[::1]:3000/api') is False
assert watchdog._is_url_allowed('http://[0:0:0:0:0:0:0:1]/') is False # Expanded form
def test_block_ipv6_with_ports_and_paths(self):
"""Test that IPv6 addresses with ports and paths are blocked."""
browser_profile = BrowserProfile(block_ip_addresses=True, headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# IPv6 with ports
assert watchdog._is_url_allowed('http://[2001:db8::1]:80/') is False
assert watchdog._is_url_allowed('https://[2001:db8::1]:443/') is False
assert watchdog._is_url_allowed('http://[::1]:8080/api') is False
# IPv6 with paths
assert watchdog._is_url_allowed('http://[2001:db8::1]/path/to/resource') is False
assert watchdog._is_url_allowed('https://[2001:db8::1]/api?key=value') is False
def test_allow_ipv6_when_blocking_disabled(self):
"""Test that IPv6 addresses are allowed when block_ip_addresses=False."""
browser_profile = BrowserProfile(block_ip_addresses=False, headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# All IPv6 addresses should be allowed
assert watchdog._is_url_allowed('http://[2001:db8::1]/') is True
assert watchdog._is_url_allowed('http://[::1]:8080/') is True
assert watchdog._is_url_allowed('https://[2001:4860:4860::8888]/') is True
class TestDomainNamesStillAllowed:
"""Test that regular domain names are not affected by IP blocking."""
def test_domain_names_allowed_with_ip_blocking(self):
"""Test that domain names continue to work when IP blocking is enabled."""
browser_profile = BrowserProfile(block_ip_addresses=True, headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Regular domain names should still be allowed
assert watchdog._is_url_allowed('https://example.com') is True
assert watchdog._is_url_allowed('https://www.google.com') is True
assert watchdog._is_url_allowed('http://subdomain.example.org/path') is True
assert watchdog._is_url_allowed('https://api.github.com/repos') is True
assert watchdog._is_url_allowed('http://localhost/') is True # "localhost" is a domain name, not IP
assert watchdog._is_url_allowed('http://localhost:8080/api') is True
def test_domains_with_numbers_allowed(self):
"""Test that domain names containing numbers are still allowed."""
browser_profile = BrowserProfile(block_ip_addresses=True, headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Domains with numbers (but not valid IP addresses)
assert watchdog._is_url_allowed('https://example123.com') is True
assert watchdog._is_url_allowed('https://123example.com') is True
assert watchdog._is_url_allowed('https://server1.example.com') is True
assert watchdog._is_url_allowed('http://web2.site.org') is True
class TestIPBlockingWithAllowedDomains:
"""Test interaction between IP blocking and allowed_domains."""
def test_ip_blocked_even_in_allowed_domains(self):
"""Test that IPs are blocked even if they're in allowed_domains list."""
# Note: It doesn't make sense to add IPs to allowed_domains, but if someone does,
# IP blocking should take precedence
browser_profile = BrowserProfile(
block_ip_addresses=True,
allowed_domains=['example.com', '192.168.1.1'], # IP in allowlist
headless=True,
user_data_dir=None,
)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# IP should be blocked despite being in allowed_domains
assert watchdog._is_url_allowed('http://192.168.1.1/') is False
# Regular domain should work as expected
assert watchdog._is_url_allowed('https://example.com') is True
# Other domains not in allowed_domains should be blocked
assert watchdog._is_url_allowed('https://other.com') is False
def test_allowed_domains_with_ip_blocking_enabled(self):
"""Test that allowed_domains works normally with IP blocking enabled."""
browser_profile = BrowserProfile(
block_ip_addresses=True, allowed_domains=['example.com', '*.google.com'], headless=True, user_data_dir=None
)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Allowed domains should work
assert watchdog._is_url_allowed('https://example.com') is True
assert watchdog._is_url_allowed('https://www.google.com') is True
# Not allowed domains should be blocked
assert watchdog._is_url_allowed('https://other.com') is False
# IPs should be blocked regardless
assert watchdog._is_url_allowed('http://8.8.8.8/') is False
assert watchdog._is_url_allowed('http://192.168.1.1/') is False
class TestIPBlockingWithProhibitedDomains:
"""Test interaction between IP blocking and prohibited_domains."""
def test_ip_blocked_regardless_of_prohibited_domains(self):
"""Test that IPs are blocked when IP blocking is on, independent of prohibited_domains."""
browser_profile = BrowserProfile(
block_ip_addresses=True, prohibited_domains=['example.com'], headless=True, user_data_dir=None
)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# IPs should be blocked due to IP blocking
assert watchdog._is_url_allowed('http://192.168.1.1/') is False
assert watchdog._is_url_allowed('http://8.8.8.8/') is False
# Prohibited domain should be blocked
assert watchdog._is_url_allowed('https://example.com') is False
# Other domains should be allowed
assert watchdog._is_url_allowed('https://other.com') is True
def test_prohibited_domains_without_ip_blocking(self):
"""Test that prohibited_domains works normally when IP blocking is disabled."""
browser_profile = BrowserProfile(
block_ip_addresses=False, prohibited_domains=['example.com', '8.8.8.8'], headless=True, user_data_dir=None
)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Prohibited domain should be blocked
assert watchdog._is_url_allowed('https://example.com') is False
# IP in prohibited list should be blocked (by prohibited_domains, not IP blocking)
assert watchdog._is_url_allowed('http://8.8.8.8/') is False
# Other IPs should be allowed (IP blocking is off)
assert watchdog._is_url_allowed('http://192.168.1.1/') is True
# Other domains should be allowed
assert watchdog._is_url_allowed('https://other.com') is True
class TestEdgeCases:
"""Test edge cases and invalid inputs."""
def test_invalid_urls_handled_gracefully(self):
"""Test that invalid URLs don't cause crashes."""
browser_profile = BrowserProfile(block_ip_addresses=True, headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Invalid URLs should return False
assert watchdog._is_url_allowed('not-a-url') is False
assert watchdog._is_url_allowed('') is False
assert watchdog._is_url_allowed('http://') is False
assert watchdog._is_url_allowed('://example.com') is False
def test_internal_browser_urls_allowed(self):
"""Test that internal browser URLs are still allowed with IP blocking."""
browser_profile = BrowserProfile(block_ip_addresses=True, headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Internal URLs should always be allowed
assert watchdog._is_url_allowed('about:blank') is True
assert watchdog._is_url_allowed('chrome://new-tab-page/') is True
assert watchdog._is_url_allowed('chrome://new-tab-page') is True
assert watchdog._is_url_allowed('chrome://newtab/') is True
def test_ipv4_lookalike_domains_allowed(self):
"""Test that domains that look like IPs but aren't are still allowed."""
browser_profile = BrowserProfile(block_ip_addresses=True, headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# These look like IPs but have too many/few octets or invalid ranges
# The IP parser should reject them, so they're treated as domain names
assert watchdog._is_url_allowed('http://999.999.999.999/') is True # Invalid IP range
assert watchdog._is_url_allowed('http://1.2.3.4.5/') is True # Too many octets
assert watchdog._is_url_allowed('http://1.2.3/') is True # Too few octets
def test_different_schemes_with_ips(self):
"""Test that IP blocking works across different URL schemes."""
browser_profile = BrowserProfile(block_ip_addresses=True, headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# HTTP and HTTPS
assert watchdog._is_url_allowed('http://192.168.1.1/') is False
assert watchdog._is_url_allowed('https://192.168.1.1/') is False
# FTP (if browser supports it)
assert watchdog._is_url_allowed('ftp://192.168.1.1/') is False
# WebSocket (parsed as regular URL)
assert watchdog._is_url_allowed('ws://192.168.1.1:8080/') is False
assert watchdog._is_url_allowed('wss://192.168.1.1:8080/') is False
class TestIsIPAddressHelper:
"""Test the _is_ip_address helper method directly."""
def test_valid_ipv4_detection(self):
"""Test that valid IPv4 addresses are correctly detected."""
browser_profile = BrowserProfile(block_ip_addresses=True, headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Valid IPv4 addresses
assert watchdog._is_ip_address('127.0.0.1') is True
assert watchdog._is_ip_address('192.168.1.1') is True
assert watchdog._is_ip_address('8.8.8.8') is True
assert watchdog._is_ip_address('255.255.255.255') is True
assert watchdog._is_ip_address('0.0.0.0') is True
def test_valid_ipv6_detection(self):
"""Test that valid IPv6 addresses are correctly detected."""
browser_profile = BrowserProfile(block_ip_addresses=True, headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Valid IPv6 addresses (without brackets - those are URL-specific)
assert watchdog._is_ip_address('::1') is True
assert watchdog._is_ip_address('2001:db8::1') is True
assert watchdog._is_ip_address('2001:4860:4860::8888') is True
assert watchdog._is_ip_address('fe80::1') is True
assert watchdog._is_ip_address('2001:db8:85a3::8a2e:370:7334') is True
def test_invalid_ip_detection(self):
"""Test that non-IP strings are correctly identified as not IPs."""
browser_profile = BrowserProfile(block_ip_addresses=True, headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Domain names (not IPs)
assert watchdog._is_ip_address('example.com') is False
assert watchdog._is_ip_address('www.google.com') is False
assert watchdog._is_ip_address('localhost') is False
# Invalid IPs
assert watchdog._is_ip_address('999.999.999.999') is False
assert watchdog._is_ip_address('1.2.3') is False
assert watchdog._is_ip_address('1.2.3.4.5') is False
assert watchdog._is_ip_address('not-an-ip') is False
assert watchdog._is_ip_address('') is False
# IPs with ports or paths (not valid for the helper - it only checks hostnames)
assert watchdog._is_ip_address('192.168.1.1:8080') is False
assert watchdog._is_ip_address('192.168.1.1/path') is False
class TestDefaultBehavior:
"""Test that default behavior (no IP blocking) is maintained."""
def test_default_block_ip_addresses_is_false(self):
"""Test that block_ip_addresses defaults to False."""
browser_profile = BrowserProfile(headless=True, user_data_dir=None)
# Default should be False
assert browser_profile.block_ip_addresses is False
def test_no_blocking_by_default(self):
"""Test that IPs are not blocked by default."""
browser_profile = BrowserProfile(headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# All IPs should be allowed by default
assert watchdog._is_url_allowed('http://180.1.1.1/supersafe.txt') is True
assert watchdog._is_url_allowed('http://192.168.1.1/') is True
assert watchdog._is_url_allowed('http://127.0.0.1:8080/') is True
assert watchdog._is_url_allowed('http://[::1]/') is True
assert watchdog._is_url_allowed('https://8.8.8.8/') is True
class TestComplexScenarios:
"""Test complex real-world scenarios."""
def test_mixed_configuration_comprehensive(self):
"""Test a complex configuration with multiple security settings."""
browser_profile = BrowserProfile(
block_ip_addresses=True,
allowed_domains=['example.com', '*.google.com'],
prohibited_domains=['bad.example.com'], # Should be ignored when allowlist is set
headless=True,
user_data_dir=None,
)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Allowed domains should work
assert watchdog._is_url_allowed('https://example.com') is True
assert watchdog._is_url_allowed('https://www.google.com') is True
assert watchdog._is_url_allowed('https://mail.google.com') is True
# IPs should be blocked
assert watchdog._is_url_allowed('http://8.8.8.8/') is False
assert watchdog._is_url_allowed('http://192.168.1.1/') is False
# Domains not in allowlist should be blocked
assert watchdog._is_url_allowed('https://other.com') is False
def test_localhost_development_scenario(self):
"""Test typical local development scenario."""
# Developer wants to block external IPs but allow domain names
browser_profile = BrowserProfile(
block_ip_addresses=True,
headless=True,
user_data_dir=None, # No domain restrictions
)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Domain names should work (including localhost as a name)
assert watchdog._is_url_allowed('http://localhost:3000/') is True
assert watchdog._is_url_allowed('http://localhost:8080/api') is True
# But localhost IP should be blocked
assert watchdog._is_url_allowed('http://127.0.0.1:3000/') is False
# External domains should work
assert watchdog._is_url_allowed('https://api.example.com') is True
# External IPs should be blocked
assert watchdog._is_url_allowed('http://8.8.8.8/') is False
def test_security_hardening_scenario(self):
"""Test maximum security scenario with IP blocking and domain restrictions."""
browser_profile = BrowserProfile(
block_ip_addresses=True,
allowed_domains=['example.com', 'api.example.com'],
headless=True,
user_data_dir=None,
)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Only specified domains allowed
assert watchdog._is_url_allowed('https://example.com') is True
assert watchdog._is_url_allowed('https://api.example.com') is True
# IPs blocked
assert watchdog._is_url_allowed('http://192.168.1.1/') is False
# Other domains blocked
assert watchdog._is_url_allowed('https://other.com') is False
# Even localhost blocked
assert watchdog._is_url_allowed('http://127.0.0.1/') is False
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/ci/browser/test_cloud_browser.py | tests/ci/browser/test_cloud_browser.py | """Tests for cloud browser functionality."""
import tempfile
from pathlib import Path
from unittest.mock import AsyncMock, patch
import pytest
from browser_use.browser.cloud.cloud import (
CloudBrowserAuthError,
CloudBrowserClient,
CloudBrowserError,
)
from browser_use.browser.cloud.views import CreateBrowserRequest
from browser_use.browser.profile import BrowserProfile
from browser_use.browser.session import BrowserSession
from browser_use.sync.auth import CloudAuthConfig
@pytest.fixture
def temp_config_dir(monkeypatch):
"""Create temporary config directory."""
with tempfile.TemporaryDirectory() as tmpdir:
temp_dir = Path(tmpdir) / '.config' / 'browseruse'
temp_dir.mkdir(parents=True, exist_ok=True)
# Use monkeypatch to set the environment variable
monkeypatch.setenv('BROWSER_USE_CONFIG_DIR', str(temp_dir))
yield temp_dir
@pytest.fixture
def mock_auth_config(temp_config_dir):
"""Create a mock auth config with valid token."""
auth_config = CloudAuthConfig(api_token='test-token', user_id='test-user-id', authorized_at=None)
auth_config.save_to_file()
return auth_config
class TestCloudBrowserClient:
"""Test CloudBrowserClient class."""
async def test_create_browser_success(self, mock_auth_config, monkeypatch):
"""Test successful cloud browser creation."""
# Clear environment variable so test uses mock_auth_config
monkeypatch.delenv('BROWSER_USE_API_KEY', raising=False)
# Mock response data matching the API
mock_response_data = {
'id': 'test-browser-id',
'status': 'active',
'liveUrl': 'https://live.browser-use.com?wss=test',
'cdpUrl': 'wss://test.proxy.daytona.works',
'timeoutAt': '2025-09-17T04:35:36.049892',
'startedAt': '2025-09-17T03:35:36.049974',
'finishedAt': None,
}
# Mock the httpx client
with patch('httpx.AsyncClient') as mock_client_class:
mock_response = AsyncMock()
mock_response.status_code = 201
mock_response.is_success = True
mock_response.json = lambda: mock_response_data
mock_client = AsyncMock()
mock_client.post.return_value = mock_response
mock_client_class.return_value = mock_client
client = CloudBrowserClient()
client.client = mock_client
result = await client.create_browser(CreateBrowserRequest())
assert result.id == 'test-browser-id'
assert result.status == 'active'
assert result.cdpUrl == 'wss://test.proxy.daytona.works'
# Verify auth headers were included
mock_client.post.assert_called_once()
call_args = mock_client.post.call_args
assert 'X-Browser-Use-API-Key' in call_args.kwargs['headers']
assert call_args.kwargs['headers']['X-Browser-Use-API-Key'] == 'test-token'
async def test_create_browser_auth_error(self, temp_config_dir, monkeypatch):
"""Test cloud browser creation with auth error."""
# Clear environment variable and don't create auth config - should trigger auth error
monkeypatch.delenv('BROWSER_USE_API_KEY', raising=False)
client = CloudBrowserClient()
with pytest.raises(CloudBrowserAuthError) as exc_info:
await client.create_browser(CreateBrowserRequest())
assert 'BROWSER_USE_API_KEY environment variable' in str(exc_info.value)
async def test_create_browser_http_401(self, mock_auth_config, monkeypatch):
"""Test cloud browser creation with HTTP 401 response."""
# Clear environment variable so test uses mock_auth_config
monkeypatch.delenv('BROWSER_USE_API_KEY', raising=False)
with patch('httpx.AsyncClient') as mock_client_class:
mock_response = AsyncMock()
mock_response.status_code = 401
mock_response.is_success = False
mock_client = AsyncMock()
mock_client.post.return_value = mock_response
mock_client_class.return_value = mock_client
client = CloudBrowserClient()
client.client = mock_client
with pytest.raises(CloudBrowserAuthError) as exc_info:
await client.create_browser(CreateBrowserRequest())
assert 'Authentication failed' in str(exc_info.value)
async def test_create_browser_with_env_var(self, temp_config_dir, monkeypatch):
"""Test cloud browser creation using BROWSER_USE_API_KEY environment variable."""
# Set environment variable
monkeypatch.setenv('BROWSER_USE_API_KEY', 'env-test-token')
# Mock response data matching the API
mock_response_data = {
'id': 'test-browser-id',
'status': 'active',
'liveUrl': 'https://live.browser-use.com?wss=test',
'cdpUrl': 'wss://test.proxy.daytona.works',
'timeoutAt': '2025-09-17T04:35:36.049892',
'startedAt': '2025-09-17T03:35:36.049974',
'finishedAt': None,
}
with patch('httpx.AsyncClient') as mock_client_class:
mock_response = AsyncMock()
mock_response.status_code = 201
mock_response.is_success = True
mock_response.json = lambda: mock_response_data
mock_client = AsyncMock()
mock_client.post.return_value = mock_response
mock_client_class.return_value = mock_client
client = CloudBrowserClient()
client.client = mock_client
result = await client.create_browser(CreateBrowserRequest())
assert result.id == 'test-browser-id'
assert result.status == 'active'
assert result.cdpUrl == 'wss://test.proxy.daytona.works'
# Verify environment variable was used
mock_client.post.assert_called_once()
call_args = mock_client.post.call_args
assert 'X-Browser-Use-API-Key' in call_args.kwargs['headers']
assert call_args.kwargs['headers']['X-Browser-Use-API-Key'] == 'env-test-token'
async def test_stop_browser_success(self, mock_auth_config, monkeypatch):
"""Test successful cloud browser session stop."""
# Clear environment variable so test uses mock_auth_config
monkeypatch.delenv('BROWSER_USE_API_KEY', raising=False)
# Mock response data for stop
mock_response_data = {
'id': 'test-browser-id',
'status': 'stopped',
'liveUrl': 'https://live.browser-use.com?wss=test',
'cdpUrl': 'wss://test.proxy.daytona.works',
'timeoutAt': '2025-09-17T04:35:36.049892',
'startedAt': '2025-09-17T03:35:36.049974',
'finishedAt': '2025-09-17T04:35:36.049892',
}
with patch('httpx.AsyncClient') as mock_client_class:
mock_response = AsyncMock()
mock_response.status_code = 200
mock_response.is_success = True
mock_response.json = lambda: mock_response_data
mock_client = AsyncMock()
mock_client.patch.return_value = mock_response
mock_client_class.return_value = mock_client
client = CloudBrowserClient()
client.client = mock_client
client.current_session_id = 'test-browser-id'
result = await client.stop_browser()
assert result.id == 'test-browser-id'
assert result.status == 'stopped'
assert result.finishedAt is not None
# Verify correct API call
mock_client.patch.assert_called_once()
call_args = mock_client.patch.call_args
assert 'test-browser-id' in call_args.args[0] # URL contains session ID
assert call_args.kwargs['json'] == {'action': 'stop'}
assert 'X-Browser-Use-API-Key' in call_args.kwargs['headers']
async def test_stop_browser_session_not_found(self, mock_auth_config, monkeypatch):
"""Test stopping a browser session that doesn't exist."""
# Clear environment variable so test uses mock_auth_config
monkeypatch.delenv('BROWSER_USE_API_KEY', raising=False)
with patch('httpx.AsyncClient') as mock_client_class:
mock_response = AsyncMock()
mock_response.status_code = 404
mock_response.is_success = False
mock_client = AsyncMock()
mock_client.patch.return_value = mock_response
mock_client_class.return_value = mock_client
client = CloudBrowserClient()
client.client = mock_client
with pytest.raises(CloudBrowserError) as exc_info:
await client.stop_browser('nonexistent-session')
assert 'not found' in str(exc_info.value)
class TestBrowserSessionCloudIntegration:
"""Test BrowserSession integration with cloud browsers."""
async def test_cloud_browser_profile_property(self):
"""Test that cloud_browser property works correctly."""
# Just test the profile and session properties without connecting
profile = BrowserProfile(use_cloud=True)
session = BrowserSession(browser_profile=profile, cdp_url='ws://mock-url') # Provide CDP URL to avoid connection
assert session.cloud_browser is True
assert session.browser_profile.use_cloud is True
async def test_browser_session_cloud_browser_logic(self, mock_auth_config, monkeypatch):
"""Test that cloud browser profile settings work correctly."""
# Clear environment variable so test uses mock_auth_config
monkeypatch.delenv('BROWSER_USE_API_KEY', raising=False)
# Test cloud browser profile creation
profile = BrowserProfile(use_cloud=True)
assert profile.use_cloud is True
# Test that BrowserSession respects cloud_browser setting
# Provide CDP URL to avoid actual connection attempts
session = BrowserSession(browser_profile=profile, cdp_url='ws://mock-url')
assert session.cloud_browser is True
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/ci/browser/test_tabs.py | tests/ci/browser/test_tabs.py | """
Test multi-tab operations: creation, switching, closing, and background tabs.
Tests verify that:
1. Agent can create multiple tabs (3) and switch between them
2. Agent can close tabs with vision=True
3. Agent can handle buttons that open new tabs in background
4. Agent can continue and call done() after each tab operation
5. Browser state doesn't timeout during background tab operations
All tests use:
- max_steps=5 to allow multiple tab operations
- 120s timeout to fail if test takes too long
- Mock LLM to verify agent can still make decisions after tab operations
Usage:
uv run pytest tests/ci/browser/test_tabs.py -v -s
"""
import asyncio
import time
import pytest
from pytest_httpserver import HTTPServer
from browser_use.agent.service import Agent
from browser_use.browser import BrowserSession
from browser_use.browser.profile import BrowserProfile
from tests.ci.conftest import create_mock_llm
@pytest.fixture(scope='session')
def http_server():
"""Create and provide a test HTTP server for tab tests."""
server = HTTPServer()
server.start()
# Route 1: Home page
server.expect_request('/home').respond_with_data(
'<html><head><title>Home Page</title></head><body><h1>Home Page</h1><p>This is the home page</p></body></html>',
content_type='text/html',
)
# Route 2: Page 1
server.expect_request('/page1').respond_with_data(
'<html><head><title>Page 1</title></head><body><h1>Page 1</h1><p>First test page</p></body></html>',
content_type='text/html',
)
# Route 3: Page 2
server.expect_request('/page2').respond_with_data(
'<html><head><title>Page 2</title></head><body><h1>Page 2</h1><p>Second test page</p></body></html>',
content_type='text/html',
)
# Route 4: Page 3
server.expect_request('/page3').respond_with_data(
'<html><head><title>Page 3</title></head><body><h1>Page 3</h1><p>Third test page</p></body></html>',
content_type='text/html',
)
# Route 5: Background tab page - has a link that opens a new tab in the background
server.expect_request('/background-tab-test').respond_with_data(
"""
<!DOCTYPE html>
<html>
<head><title>Background Tab Test</title></head>
<body style="padding: 20px; font-family: Arial;">
<h1>Background Tab Test</h1>
<p>Click the link below to open a new tab in the background:</p>
<a href="/page3" target="_blank" id="open-tab-link">Open New Tab (link)</a>
<br><br>
<button id="open-tab-btn" onclick="window.open('/page3', '_blank'); document.getElementById('status').textContent='Tab opened!'">
Open New Tab (button)
</button>
<p id="status" style="margin-top: 20px; color: green;"></p>
</body>
</html>
""",
content_type='text/html',
)
yield server
server.stop()
@pytest.fixture(scope='session')
def base_url(http_server):
"""Return the base URL for the test HTTP server."""
return f'http://{http_server.host}:{http_server.port}'
@pytest.fixture(scope='function')
async def browser_session():
"""Create a browser session for tab tests."""
session = BrowserSession(
browser_profile=BrowserProfile(
headless=True,
user_data_dir=None,
keep_alive=True,
)
)
await session.start()
yield session
await session.kill()
class TestMultiTabOperations:
"""Test multi-tab creation, switching, and closing."""
async def test_create_and_switch_three_tabs(self, browser_session, base_url):
"""Test that agent can create 3 tabs, switch between them, and call done().
This test verifies that browser state is retrieved between each step.
"""
start_time = time.time()
actions = [
# Action 1: Navigate to home page
f"""
{{
"thinking": "I'll start by navigating to the home page",
"evaluation_previous_goal": "Starting task",
"memory": "Navigating to home page",
"next_goal": "Navigate to home page",
"action": [
{{
"navigate": {{
"url": "{base_url}/home",
"new_tab": false
}}
}}
]
}}
""",
# Action 2: Open page1 in new tab
f"""
{{
"thinking": "Now I'll open page 1 in a new tab",
"evaluation_previous_goal": "Home page loaded",
"memory": "Opening page 1 in new tab",
"next_goal": "Open page 1 in new tab",
"action": [
{{
"navigate": {{
"url": "{base_url}/page1",
"new_tab": true
}}
}}
]
}}
""",
# Action 3: Open page2 in new tab
f"""
{{
"thinking": "Now I'll open page 2 in a new tab",
"evaluation_previous_goal": "Page 1 opened in new tab",
"memory": "Opening page 2 in new tab",
"next_goal": "Open page 2 in new tab",
"action": [
{{
"navigate": {{
"url": "{base_url}/page2",
"new_tab": true
}}
}}
]
}}
""",
# Action 4: Switch to first tab
"""
{
"thinking": "Now I'll switch back to the first tab",
"evaluation_previous_goal": "Page 2 opened in new tab",
"memory": "Switching to first tab",
"next_goal": "Switch to first tab",
"action": [
{
"switch": {
"tab_id": "0000"
}
}
]
}
""",
# Action 5: Done
"""
{
"thinking": "I've successfully created 3 tabs and switched between them",
"evaluation_previous_goal": "Switched to first tab",
"memory": "All tabs created and switched",
"next_goal": "Complete task",
"action": [
{
"done": {
"text": "Successfully created 3 tabs and switched between them",
"success": true
}
}
]
}
""",
]
mock_llm = create_mock_llm(actions=actions)
agent = Agent(
task=f'Navigate to {base_url}/home, then open {base_url}/page1 and {base_url}/page2 in new tabs, then switch back to the first tab',
llm=mock_llm,
browser_session=browser_session,
)
# Run with timeout - should complete within 2 minutes
try:
history = await asyncio.wait_for(agent.run(max_steps=5), timeout=120)
elapsed = time.time() - start_time
print(f'\n⏱️ Test completed in {elapsed:.2f} seconds')
print(f'📊 Completed {len(history)} steps')
# Verify each step has browser state
for i, step in enumerate(history.history):
assert step.state is not None, f'Step {i} should have browser state'
assert step.state.url is not None, f'Step {i} should have URL in browser state'
print(f' Step {i + 1}: URL={step.state.url}, tabs={len(step.state.tabs) if step.state.tabs else 0}')
assert len(history) >= 4, 'Agent should have completed at least 4 steps'
# Verify we have 3 tabs open
tabs = await browser_session.get_tabs()
assert len(tabs) >= 3, f'Should have at least 3 tabs open, got {len(tabs)}'
# Verify agent completed successfully
final_result = history.final_result()
assert final_result is not None, 'Agent should return a final result'
assert 'Successfully' in final_result, 'Agent should report success'
# Note: Test is fast (< 1s) because mock LLM returns instantly and pages are simple,
# but browser state IS being retrieved correctly between steps as verified above
except TimeoutError:
pytest.fail('Test timed out after 2 minutes - agent hung during tab operations')
async def test_close_tab_with_vision(self, browser_session, base_url):
"""Test that agent can close a tab with vision=True and call done()."""
actions = [
# Action 1: Navigate to home page
f"""
{{
"thinking": "I'll start by navigating to the home page",
"evaluation_previous_goal": "Starting task",
"memory": "Navigating to home page",
"next_goal": "Navigate to home page",
"action": [
{{
"navigate": {{
"url": "{base_url}/home",
"new_tab": false
}}
}}
]
}}
""",
# Action 2: Open page1 in new tab
f"""
{{
"thinking": "Now I'll open page 1 in a new tab",
"evaluation_previous_goal": "Home page loaded",
"memory": "Opening page 1 in new tab",
"next_goal": "Open page 1 in new tab",
"action": [
{{
"navigate": {{
"url": "{base_url}/page1",
"new_tab": true
}}
}}
]
}}
""",
# Action 3: Close the current tab
"""
{
"thinking": "Now I'll close the current tab (page1)",
"evaluation_previous_goal": "Page 1 opened in new tab",
"memory": "Closing current tab",
"next_goal": "Close current tab",
"action": [
{
"close": {
"tab_id": "0001"
}
}
]
}
""",
# Action 4: Done
"""
{
"thinking": "I've successfully closed the tab",
"evaluation_previous_goal": "Tab closed",
"memory": "Tab closed successfully",
"next_goal": "Complete task",
"action": [
{
"done": {
"text": "Successfully closed the tab",
"success": true
}
}
]
}
""",
]
mock_llm = create_mock_llm(actions=actions)
agent = Agent(
task=f'Navigate to {base_url}/home, then open {base_url}/page1 in a new tab, then close the page1 tab',
llm=mock_llm,
browser_session=browser_session,
use_vision=True, # Enable vision for this test
)
# Run with timeout - should complete within 2 minutes
try:
history = await asyncio.wait_for(agent.run(max_steps=5), timeout=120)
assert len(history) >= 3, 'Agent should have completed at least 3 steps'
# Verify agent completed successfully
final_result = history.final_result()
assert final_result is not None, 'Agent should return a final result'
assert 'Successfully' in final_result, 'Agent should report success'
except TimeoutError:
pytest.fail('Test timed out after 2 minutes - agent hung during tab closing with vision')
async def test_background_tab_open_no_timeout(self, browser_session, base_url):
"""Test that browser state doesn't timeout when a new tab opens in the background."""
start_time = time.time()
actions = [
# Action 1: Navigate to home page
f"""
{{
"thinking": "I'll navigate to the home page first",
"evaluation_previous_goal": "Starting task",
"memory": "Navigating to home page",
"next_goal": "Navigate to home page",
"action": [
{{
"navigate": {{
"url": "{base_url}/home",
"new_tab": false
}}
}}
]
}}
""",
# Action 2: Open page1 in new background tab (stay on home page)
f"""
{{
"thinking": "I'll open page1 in a new background tab",
"evaluation_previous_goal": "Home page loaded",
"memory": "Opening background tab",
"next_goal": "Open background tab without switching to it",
"action": [
{{
"navigate": {{
"url": "{base_url}/page1",
"new_tab": true
}}
}}
]
}}
""",
# Action 3: Immediately check browser state after background tab opens
"""
{
"thinking": "After opening background tab, browser state should still be accessible",
"evaluation_previous_goal": "Background tab opened",
"memory": "Verifying browser state works",
"next_goal": "Complete task",
"action": [
{
"done": {
"text": "Successfully opened background tab, browser state remains accessible",
"success": true
}
}
]
}
""",
]
mock_llm = create_mock_llm(actions=actions)
agent = Agent(
task=f'Navigate to {base_url}/home and open {base_url}/page1 in a new tab',
llm=mock_llm,
browser_session=browser_session,
)
# Run with timeout - this tests if browser state times out when new tabs open
try:
history = await asyncio.wait_for(agent.run(max_steps=3), timeout=120)
elapsed = time.time() - start_time
print(f'\n⏱️ Test completed in {elapsed:.2f} seconds')
print(f'📊 Completed {len(history)} steps')
# Verify each step has browser state (the key test - no timeouts)
for i, step in enumerate(history.history):
assert step.state is not None, f'Step {i} should have browser state'
assert step.state.url is not None, f'Step {i} should have URL in browser state'
print(f' Step {i + 1}: URL={step.state.url}, tabs={len(step.state.tabs) if step.state.tabs else 0}')
assert len(history) >= 2, 'Agent should have completed at least 2 steps'
# Verify agent completed successfully
final_result = history.final_result()
assert final_result is not None, 'Agent should return a final result'
assert 'Successfully' in final_result, 'Agent should report success'
# Verify we have at least 2 tabs
tabs = await browser_session.get_tabs()
print(f' Final tab count: {len(tabs)}')
assert len(tabs) >= 2, f'Should have at least 2 tabs after opening background tab, got {len(tabs)}'
except TimeoutError:
pytest.fail('Test timed out after 2 minutes - browser state timed out after opening background tab')
async def test_rapid_tab_operations_no_timeout(self, browser_session, base_url):
"""Test that browser state doesn't timeout during rapid tab operations."""
actions = [
# Action 1: Navigate to home page
f"""
{{
"thinking": "I'll navigate to the home page",
"evaluation_previous_goal": "Starting task",
"memory": "Navigating to home page",
"next_goal": "Navigate to home page",
"action": [
{{
"navigate": {{
"url": "{base_url}/home",
"new_tab": false
}}
}}
]
}}
""",
# Action 2: Open page1 in new tab
f"""
{{
"thinking": "Opening page1 in new tab",
"evaluation_previous_goal": "Home page loaded",
"memory": "Opening page1",
"next_goal": "Open page1",
"action": [
{{
"navigate": {{
"url": "{base_url}/page1",
"new_tab": true
}}
}}
]
}}
""",
# Action 3: Open page2 in new tab
f"""
{{
"thinking": "Opening page2 in new tab",
"evaluation_previous_goal": "Page1 opened",
"memory": "Opening page2",
"next_goal": "Open page2",
"action": [
{{
"navigate": {{
"url": "{base_url}/page2",
"new_tab": true
}}
}}
]
}}
""",
# Action 4: Open page3 in new tab
f"""
{{
"thinking": "Opening page3 in new tab",
"evaluation_previous_goal": "Page2 opened",
"memory": "Opening page3",
"next_goal": "Open page3",
"action": [
{{
"navigate": {{
"url": "{base_url}/page3",
"new_tab": true
}}
}}
]
}}
""",
# Action 5: Verify browser state is still accessible
"""
{
"thinking": "All tabs opened rapidly, browser state should still be accessible",
"evaluation_previous_goal": "Page3 opened",
"memory": "All tabs opened",
"next_goal": "Complete task",
"action": [
{
"done": {
"text": "Successfully opened 4 tabs rapidly without timeout",
"success": true
}
}
]
}
""",
]
mock_llm = create_mock_llm(actions=actions)
agent = Agent(
task='Open multiple tabs rapidly and verify browser state remains accessible',
llm=mock_llm,
browser_session=browser_session,
)
# Run with timeout - should complete within 2 minutes
try:
history = await asyncio.wait_for(agent.run(max_steps=5), timeout=120)
assert len(history) >= 4, 'Agent should have completed at least 4 steps'
# Verify we have 4 tabs open
tabs = await browser_session.get_tabs()
assert len(tabs) >= 4, f'Should have at least 4 tabs open, got {len(tabs)}'
# Verify agent completed successfully
final_result = history.final_result()
assert final_result is not None, 'Agent should return a final result'
assert 'Successfully' in final_result, 'Agent should report success'
except TimeoutError:
pytest.fail('Test timed out after 2 minutes - browser state timed out during rapid tab operations')
async def test_multiple_tab_switches_and_close(self, browser_session, base_url):
"""Test that agent can switch between multiple tabs and close one."""
actions = [
# Action 1: Navigate to home page
f"""
{{
"thinking": "I'll start by navigating to the home page",
"evaluation_previous_goal": "Starting task",
"memory": "Navigating to home page",
"next_goal": "Navigate to home page",
"action": [
{{
"navigate": {{
"url": "{base_url}/home",
"new_tab": false
}}
}}
]
}}
""",
# Action 2: Open page1 in new tab
f"""
{{
"thinking": "Opening page 1 in new tab",
"evaluation_previous_goal": "Home page loaded",
"memory": "Opening page 1",
"next_goal": "Open page 1",
"action": [
{{
"navigate": {{
"url": "{base_url}/page1",
"new_tab": true
}}
}}
]
}}
""",
# Action 3: Open page2 in new tab
f"""
{{
"thinking": "Opening page 2 in new tab",
"evaluation_previous_goal": "Page 1 opened",
"memory": "Opening page 2",
"next_goal": "Open page 2",
"action": [
{{
"navigate": {{
"url": "{base_url}/page2",
"new_tab": true
}}
}}
]
}}
""",
# Action 4: Switch to tab 1
"""
{
"thinking": "Switching to tab 1 (page1)",
"evaluation_previous_goal": "Page 2 opened",
"memory": "Switching to page 1",
"next_goal": "Switch to page 1",
"action": [
{
"switch": {
"tab_id": "0001"
}
}
]
}
""",
# Action 5: Close current tab
"""
{
"thinking": "Closing the current tab (page1)",
"evaluation_previous_goal": "Switched to page 1",
"memory": "Closing page 1",
"next_goal": "Close page 1",
"action": [
{
"close": {
"tab_id": "0001"
}
}
]
}
""",
# Action 6: Done
"""
{
"thinking": "Successfully completed all tab operations",
"evaluation_previous_goal": "Tab closed",
"memory": "All operations completed",
"next_goal": "Complete task",
"action": [
{
"done": {
"text": "Successfully created, switched, and closed tabs",
"success": true
}
}
]
}
""",
]
mock_llm = create_mock_llm(actions=actions)
agent = Agent(
task='Create 3 tabs, switch to the second one, then close it',
llm=mock_llm,
browser_session=browser_session,
)
# Run with timeout - should complete within 2 minutes
try:
history = await asyncio.wait_for(agent.run(max_steps=6), timeout=120)
assert len(history) >= 5, 'Agent should have completed at least 5 steps'
# Verify agent completed successfully
final_result = history.final_result()
assert final_result is not None, 'Agent should return a final result'
assert 'Successfully' in final_result, 'Agent should report success'
except TimeoutError:
pytest.fail('Test timed out after 2 minutes - agent hung during multiple tab operations')
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/ci/browser/test_dom_serializer.py | tests/ci/browser/test_dom_serializer.py | """
Test DOM serializer with complex scenarios: shadow DOM, same-origin and cross-origin iframes.
This test verifies that the DOM serializer correctly:
1. Extracts interactive elements from shadow DOM
2. Processes same-origin iframes
3. Handles cross-origin iframes (should be blocked)
4. Generates correct selector_map with expected element counts
Usage:
uv run pytest tests/ci/browser/test_dom_serializer.py -v -s
"""
import pytest
from pytest_httpserver import HTTPServer
from browser_use.agent.service import Agent
from browser_use.browser import BrowserSession
from browser_use.browser.profile import BrowserProfile, ViewportSize
from tests.ci.conftest import create_mock_llm
@pytest.fixture(scope='session')
def http_server():
"""Create and provide a test HTTP server for DOM serializer tests."""
from pathlib import Path
server = HTTPServer()
server.start()
# Load HTML templates from files
test_dir = Path(__file__).parent
main_page_html = (test_dir / 'test_page_template.html').read_text()
iframe_html = (test_dir / 'iframe_template.html').read_text()
stacked_page_html = (test_dir / 'test_page_stacked_template.html').read_text()
# Route 1: Main page with shadow DOM and iframes
server.expect_request('/dom-test-main').respond_with_data(main_page_html, content_type='text/html')
# Route 2: Same-origin iframe content
server.expect_request('/iframe-same-origin').respond_with_data(iframe_html, content_type='text/html')
# Route 3: Stacked complex scenarios test page
server.expect_request('/stacked-test').respond_with_data(stacked_page_html, content_type='text/html')
yield server
server.stop()
@pytest.fixture(scope='session')
def base_url(http_server):
"""Return the base URL for the test HTTP server."""
return f'http://{http_server.host}:{http_server.port}'
@pytest.fixture(scope='function')
async def browser_session():
"""Create a browser session for DOM serializer tests."""
session = BrowserSession(
browser_profile=BrowserProfile(
headless=True,
user_data_dir=None,
keep_alive=True,
window_size=ViewportSize(width=1920, height=1400), # Taller window to fit all stacked elements
cross_origin_iframes=True, # Enable cross-origin iframe extraction via CDP target switching
)
)
await session.start()
yield session
await session.kill()
class TestDOMSerializer:
"""Test DOM serializer with complex scenarios."""
async def test_dom_serializer_with_shadow_dom_and_iframes(self, browser_session, base_url):
"""Test DOM serializer extracts elements from shadow DOM, same-origin iframes, and cross-origin iframes.
This test verifies:
1. Elements are in the serializer (selector_map)
2. We can click elements using click(index)
Expected interactive elements:
- Regular DOM: 3 elements (button, input, link on main page)
- Shadow DOM: 3 elements (2 buttons, 1 input inside shadow root)
- Same-origin iframe: 2 elements (button, input inside iframe)
- Cross-origin iframe placeholder: about:blank (no interactive elements)
- Iframe tags: 2 elements (the iframe elements themselves)
Total: ~10 interactive elements
"""
from browser_use.tools.service import Tools
tools = Tools()
# Create mock LLM actions that will click elements from each category
# We'll generate actions dynamically after we know the indices
actions = [
f"""
{{
"thinking": "I'll navigate to the DOM test page",
"evaluation_previous_goal": "Starting task",
"memory": "Navigating to test page",
"next_goal": "Navigate to test page",
"action": [
{{
"navigate": {{
"url": "{base_url}/dom-test-main",
"new_tab": false
}}
}}
]
}}
"""
]
await tools.navigate(url=f'{base_url}/dom-test-main', new_tab=False, browser_session=browser_session)
import asyncio
await asyncio.sleep(1)
# Get the browser state to access selector_map
browser_state_summary = await browser_session.get_browser_state_summary(
include_screenshot=False,
include_recent_events=False,
)
assert browser_state_summary is not None, 'Browser state summary should not be None'
assert browser_state_summary.dom_state is not None, 'DOM state should not be None'
selector_map = browser_state_summary.dom_state.selector_map
print(f' Selector map: {selector_map.keys()}')
print('\n📊 DOM Serializer Analysis:')
print(f' Total interactive elements found: {len(selector_map)}')
serilized_text = browser_state_summary.dom_state.llm_representation()
print(f' Serialized text: {serilized_text}')
# assume all selector map keys are as text in the serialized text
# for idx, element in selector_map.items():
# assert str(idx) in serilized_text, f'Element {idx} should be in serialized text'
# print(f' ✓ Element {idx} found in serialized text')
# assume at least 10 interactive elements are in the selector map
assert len(selector_map) >= 10, f'Should find at least 10 interactive elements, found {len(selector_map)}'
# assert all interactive elements marked with [123] from serialized text are in selector map
# find all [index] from serialized text with regex
import re
indices = re.findall(r'\[(\d+)\]', serilized_text)
for idx in indices:
assert int(idx) in selector_map.keys(), f'Element {idx} should be in selector map'
print(f' ✓ Element {idx} found in selector map')
regular_elements = []
shadow_elements = []
iframe_content_elements = []
iframe_tags = []
# Categorize elements by their IDs (more stable than hardcoded indices)
# Check element attributes to identify their location
for idx, element in selector_map.items():
# Check if this is an iframe tag (not content inside iframe)
if element.tag_name == 'iframe':
iframe_tags.append((idx, element))
# Check if element has an ID attribute
elif hasattr(element, 'attributes') and 'id' in element.attributes:
elem_id = element.attributes['id'].lower()
# Shadow DOM elements have IDs starting with "shadow-"
if elem_id.startswith('shadow-'):
shadow_elements.append((idx, element))
# Iframe content elements have IDs starting with "iframe-"
elif elem_id.startswith('iframe-'):
iframe_content_elements.append((idx, element))
# Everything else is regular DOM
else:
regular_elements.append((idx, element))
# Elements without IDs are regular DOM
else:
regular_elements.append((idx, element))
# Verify element counts based on our test page structure:
# - Regular DOM: 3-4 elements (button, input, link on main page + possible cross-origin content)
# - Shadow DOM: 3 elements (2 buttons, 1 input inside shadow root)
# - Iframe content: 2 elements (button, input from same-origin iframe)
# - Iframe tags: 2 elements (the iframe elements themselves)
# Total: ~10-11 interactive elements depending on cross-origin iframe extraction
print('\n✅ DOM Serializer Test Summary:')
print(f' • Regular DOM: {len(regular_elements)} elements {"✓" if len(regular_elements) >= 3 else "✗"}')
print(f' • Shadow DOM: {len(shadow_elements)} elements {"✓" if len(shadow_elements) >= 3 else "✗"}')
print(
f' • Same-origin iframe content: {len(iframe_content_elements)} elements {"✓" if len(iframe_content_elements) >= 2 else "✗"}'
)
print(f' • Iframe tags: {len(iframe_tags)} elements {"✓" if len(iframe_tags) >= 2 else "✗"}')
print(f' • Total elements: {len(selector_map)}')
# Verify we found elements from all sources
assert len(selector_map) >= 8, f'Should find at least 8 interactive elements, found {len(selector_map)}'
assert len(regular_elements) >= 1, f'Should find at least 1 regular DOM element, found {len(regular_elements)}'
assert len(shadow_elements) >= 1, f'Should find at least 1 shadow DOM element, found {len(shadow_elements)}'
assert len(iframe_content_elements) >= 1, (
f'Should find at least 1 iframe content element, found {len(iframe_content_elements)}'
)
# Now test clicking elements from each category using tools.click(index)
print('\n🖱️ Testing Click Functionality:')
# Helper to call tools.click(index) and verify it worked
async def click(index: int, element_description: str, browser_session: BrowserSession):
result = await tools.click(index=index, browser_session=browser_session)
# Check both error field and extracted_content for failure messages
if result.error:
raise AssertionError(f'Click on {element_description} [{index}] failed: {result.error}')
if result.extracted_content and (
'not available' in result.extracted_content.lower() or 'failed' in result.extracted_content.lower()
):
raise AssertionError(f'Click on {element_description} [{index}] failed: {result.extracted_content}')
print(f' ✓ {element_description} [{index}] clicked successfully')
return result
# Test clicking a regular DOM element (button)
if regular_elements:
regular_button_idx = next((idx for idx, el in regular_elements if 'regular-btn' in el.attributes.get('id', '')), None)
if regular_button_idx:
await click(regular_button_idx, 'Regular DOM button', browser_session)
# Test clicking a shadow DOM element (button)
if shadow_elements:
shadow_button_idx = next((idx for idx, el in shadow_elements if 'btn' in el.attributes.get('id', '')), None)
if shadow_button_idx:
await click(shadow_button_idx, 'Shadow DOM button', browser_session)
# Test clicking a same-origin iframe element (button)
if iframe_content_elements:
iframe_button_idx = next((idx for idx, el in iframe_content_elements if 'btn' in el.attributes.get('id', '')), None)
if iframe_button_idx:
await click(iframe_button_idx, 'Same-origin iframe button', browser_session)
# Validate click counter - verify all 3 clicks actually executed JavaScript
print('\n✅ Validating click counter...')
# Get the CDP session for the main page (use target from a regular DOM element)
# Note: browser_session.agent_focus_target_id may point to a different target than the page
if regular_elements and regular_elements[0][1].target_id:
cdp_session = await browser_session.get_or_create_cdp_session(target_id=regular_elements[0][1].target_id)
else:
cdp_session = await browser_session.get_or_create_cdp_session()
result = await cdp_session.cdp_client.send.Runtime.evaluate(
params={
'expression': 'window.getClickCount()',
'returnByValue': True,
},
session_id=cdp_session.session_id,
)
click_count = result.get('result', {}).get('value', 0)
print(f' Click counter value: {click_count}')
assert click_count == 3, (
f'Expected 3 clicks (Regular DOM + Shadow DOM + Iframe), but counter shows {click_count}. '
f'This means some clicks did not execute JavaScript properly.'
)
print('\n🎉 DOM Serializer test completed successfully!')
async def test_dom_serializer_element_counts_detailed(self, browser_session, base_url):
"""Detailed test to verify specific element types are captured correctly."""
actions = [
f"""
{{
"thinking": "Navigating to test page",
"evaluation_previous_goal": "Starting",
"memory": "Navigate",
"next_goal": "Navigate",
"action": [
{{
"navigate": {{
"url": "{base_url}/dom-test-main",
"new_tab": false
}}
}}
]
}}
""",
"""
{
"thinking": "Done",
"evaluation_previous_goal": "Navigated",
"memory": "Complete",
"next_goal": "Done",
"action": [
{
"done": {
"text": "Done",
"success": true
}
}
]
}
""",
]
mock_llm = create_mock_llm(actions=actions)
agent = Agent(
task=f'Navigate to {base_url}/dom-test-main',
llm=mock_llm,
browser_session=browser_session,
)
history = await agent.run(max_steps=2)
# Get current browser state to access selector_map
browser_state_summary = await browser_session.get_browser_state_summary(
include_screenshot=False,
include_recent_events=False,
)
selector_map = browser_state_summary.dom_state.selector_map
# Count different element types
buttons = 0
inputs = 0
links = 0
for idx, element in selector_map.items():
element_str = str(element).lower()
if 'button' in element_str or '<button' in element_str:
buttons += 1
elif 'input' in element_str or '<input' in element_str:
inputs += 1
elif 'link' in element_str or '<a' in element_str or 'href' in element_str:
links += 1
print('\n📊 Element Type Counts:')
print(f' Buttons: {buttons}')
print(f' Inputs: {inputs}')
print(f' Links: {links}')
print(f' Total: {len(selector_map)}')
# We should have at least some of each type from the regular DOM
assert buttons >= 1, f'Should find at least 1 button, found {buttons}'
assert inputs >= 1, f'Should find at least 1 input, found {inputs}'
print('\n✅ Element type verification passed!')
async def test_stacked_complex_scenarios(self, browser_session, base_url):
"""Test clicking through stacked complex scenarios and verify cross-origin iframe extraction.
This test verifies:
1. Open shadow DOM element interaction
2. Closed shadow DOM element interaction (nested inside open shadow)
3. Same-origin iframe element interaction (inside closed shadow)
4. Cross-origin iframe placeholder with about:blank (no external dependencies)
5. Truly nested structure: Open Shadow → Closed Shadow → Iframe
"""
from browser_use.tools.service import Tools
tools = Tools()
# Navigate to stacked test page
await tools.navigate(url=f'{base_url}/stacked-test', new_tab=False, browser_session=browser_session)
import asyncio
await asyncio.sleep(1)
# Get browser state
browser_state_summary = await browser_session.get_browser_state_summary(
include_screenshot=False,
include_recent_events=False,
)
selector_map = browser_state_summary.dom_state.selector_map
print(f'\n📊 Stacked Test - Found {len(selector_map)} elements')
# Debug: Show all elements
print('\n🔍 All elements found:')
for idx, element in selector_map.items():
elem_id = element.attributes.get('id', 'NO_ID') if hasattr(element, 'attributes') else 'NO_ATTR'
print(f' [{idx}] {element.tag_name} id={elem_id} target={element.target_id[-4:] if element.target_id else "None"}')
# Categorize elements
open_shadow_elements = []
closed_shadow_elements = []
iframe_elements = []
final_button = None
for idx, element in selector_map.items():
if hasattr(element, 'attributes') and 'id' in element.attributes:
elem_id = element.attributes['id'].lower()
if 'open-shadow' in elem_id:
open_shadow_elements.append((idx, element))
elif 'closed-shadow' in elem_id:
closed_shadow_elements.append((idx, element))
elif 'iframe' in elem_id and element.tag_name != 'iframe':
iframe_elements.append((idx, element))
elif 'final-button' in elem_id:
final_button = (idx, element)
print('\n📋 Element Distribution:')
print(f' Open Shadow: {len(open_shadow_elements)} elements')
print(f' Closed Shadow: {len(closed_shadow_elements)} elements')
print(f' Iframe content: {len(iframe_elements)} elements')
print(f' Final button: {"Found" if final_button else "Not found"}')
# Test clicking through each stacked layer
print('\n🖱️ Testing Click Functionality Through Stacked Layers:')
async def click(index: int, element_description: str, browser_session: BrowserSession):
result = await tools.click(index=index, browser_session=browser_session)
if result.error:
raise AssertionError(f'Click on {element_description} [{index}] failed: {result.error}')
if result.extracted_content and (
'not available' in result.extracted_content.lower() or 'failed' in result.extracted_content.lower()
):
raise AssertionError(f'Click on {element_description} [{index}] failed: {result.extracted_content}')
print(f' ✓ {element_description} [{index}] clicked successfully')
return result
clicks_performed = 0
# 1. Click open shadow button
if open_shadow_elements:
open_shadow_btn = next((idx for idx, el in open_shadow_elements if 'btn' in el.attributes.get('id', '')), None)
if open_shadow_btn:
await click(open_shadow_btn, 'Open Shadow DOM button', browser_session)
clicks_performed += 1
# 2. Click closed shadow button
if closed_shadow_elements:
closed_shadow_btn = next((idx for idx, el in closed_shadow_elements if 'btn' in el.attributes.get('id', '')), None)
if closed_shadow_btn:
await click(closed_shadow_btn, 'Closed Shadow DOM button', browser_session)
clicks_performed += 1
# 3. Click iframe button
if iframe_elements:
iframe_btn = next((idx for idx, el in iframe_elements if 'btn' in el.attributes.get('id', '')), None)
if iframe_btn:
await click(iframe_btn, 'Same-origin iframe button', browser_session)
clicks_performed += 1
# 4. Try clicking cross-origin iframe tag (can click the tag, but not elements inside)
cross_origin_iframe_tag = None
for idx, element in selector_map.items():
if (
element.tag_name == 'iframe'
and hasattr(element, 'attributes')
and 'cross-origin' in element.attributes.get('id', '').lower()
):
cross_origin_iframe_tag = (idx, element)
break
# Verify cross-origin iframe extraction is working
# Check the full DOM tree (not just selector_map which only has interactive elements)
def count_targets_in_tree(node, targets=None):
if targets is None:
targets = set()
# SimplifiedNode has original_node which is an EnhancedDOMTreeNode
if hasattr(node, 'original_node') and node.original_node and node.original_node.target_id:
targets.add(node.original_node.target_id)
# Recursively check children
if hasattr(node, 'children') and node.children:
for child in node.children:
count_targets_in_tree(child, targets)
return targets
all_targets = count_targets_in_tree(browser_state_summary.dom_state._root)
print('\n📊 Cross-Origin Iframe Extraction:')
print(f' Found elements from {len(all_targets)} different CDP targets in full DOM tree')
if len(all_targets) >= 2:
print(' ✅ Multi-target iframe extraction IS WORKING!')
print(' ✓ Successfully extracted DOM from multiple CDP targets')
print(' ✓ CDP target switching feature is enabled and functional')
else:
print(' ⚠️ Only found elements from 1 target (cross-origin extraction may not be working)')
if cross_origin_iframe_tag:
print(f'\n 📌 Found cross-origin iframe tag [{cross_origin_iframe_tag[0]}]')
# Note: We don't increment clicks_performed since this doesn't trigger our counter
# await click(cross_origin_iframe_tag[0], 'Cross-origin iframe tag (scroll)', browser_session)
# 5. Click final button (after all stacked elements)
if final_button:
await click(final_button[0], 'Final button (after stack)', browser_session)
clicks_performed += 1
# Validate click counter
print('\n✅ Validating click counter...')
# Get CDP session from a non-iframe element (open shadow or final button)
if open_shadow_elements:
cdp_session = await browser_session.get_or_create_cdp_session(target_id=open_shadow_elements[0][1].target_id)
elif final_button:
cdp_session = await browser_session.get_or_create_cdp_session(target_id=final_button[1].target_id)
else:
cdp_session = await browser_session.get_or_create_cdp_session()
result = await cdp_session.cdp_client.send.Runtime.evaluate(
params={
'expression': 'window.getClickCount()',
'returnByValue': True,
},
session_id=cdp_session.session_id,
)
click_count = result.get('result', {}).get('value', 0)
print(f' Click counter value: {click_count}')
print(f' Expected clicks: {clicks_performed}')
assert click_count == clicks_performed, (
f'Expected {clicks_performed} clicks, but counter shows {click_count}. '
f'Some clicks did not execute JavaScript properly.'
)
print('\n🎉 Stacked scenario test completed successfully!')
print(' ✓ Open shadow DOM clicks work')
print(' ✓ Closed shadow DOM clicks work')
print(' ✓ Same-origin iframe clicks work (can access elements inside)')
print(' ✓ Cross-origin iframe extraction works (CDP target switching enabled)')
print(' ✓ Truly nested structure works: Open Shadow → Closed Shadow → Iframe')
if __name__ == '__main__':
"""Run test in debug mode with manual fixture setup."""
import asyncio
import logging
# Set up debug logging
logging.basicConfig(
level=logging.DEBUG,
format='%(levelname)-8s [%(name)s] %(message)s',
)
async def main():
# Set up HTTP server fixture
from pathlib import Path
from pytest_httpserver import HTTPServer
server = HTTPServer()
server.start()
# Load HTML templates from files (same as http_server fixture)
test_dir = Path(__file__).parent
main_page_html = (test_dir / 'test_page_stacked_template.html').read_text()
# Set up routes using templates
server.expect_request('/stacked-test').respond_with_data(main_page_html, content_type='text/html')
base_url = f'http://{server.host}:{server.port}'
print(f'\n🌐 HTTP Server running at {base_url}')
# Set up browser session
from browser_use.browser import BrowserSession
from browser_use.browser.profile import BrowserProfile
session = BrowserSession(
browser_profile=BrowserProfile(
headless=False, # Set to False to see browser in action
user_data_dir=None,
keep_alive=True,
)
)
try:
await session.start()
print('🚀 Browser session started\n')
# Run the test
test = TestDOMSerializer()
await test.test_stacked_complex_scenarios(session, base_url)
print('\n✅ Test completed successfully!')
finally:
# Cleanup
await session.kill()
server.stop()
print('\n🧹 Cleanup complete')
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/ci/browser/test_true_cross_origin_click.py | tests/ci/browser/test_true_cross_origin_click.py | """Test clicking elements inside TRUE cross-origin iframes (external domains)."""
import asyncio
import pytest
from browser_use.browser.profile import BrowserProfile, ViewportSize
from browser_use.browser.session import BrowserSession
from browser_use.tools.service import Tools
@pytest.fixture
async def browser_session():
"""Create browser session with cross-origin iframe support."""
session = BrowserSession(
browser_profile=BrowserProfile(
headless=True,
user_data_dir=None,
keep_alive=True,
window_size=ViewportSize(width=1920, height=1400),
cross_origin_iframes=True, # Enable cross-origin iframe extraction
)
)
await session.start()
yield session
await session.kill()
class TestTrueCrossOriginIframeClick:
"""Test clicking elements inside true cross-origin iframes."""
async def test_click_element_in_true_cross_origin_iframe(self, httpserver, browser_session: BrowserSession):
"""Verify that elements inside TRUE cross-origin iframes (example.com) can be clicked.
This test uses example.com which is a real external domain, testing actual cross-origin
iframe extraction and clicking via CDP target switching.
"""
# Create main page with TRUE cross-origin iframe pointing to example.com
main_html = """
<!DOCTYPE html>
<html>
<head><title>True Cross-Origin Test</title></head>
<body>
<h1>Main Page</h1>
<button id="main-button">Main Button</button>
<iframe id="cross-origin" src="https://example.com" style="width: 800px; height: 600px;"></iframe>
</body>
</html>
"""
# Serve the main page
httpserver.expect_request('/true-cross-origin-test').respond_with_data(main_html, content_type='text/html')
url = httpserver.url_for('/true-cross-origin-test')
# Navigate to the page
await browser_session.navigate_to(url)
# Wait for cross-origin iframe to load (network request)
await asyncio.sleep(5)
# Get DOM state with cross-origin iframe extraction enabled
browser_state = await browser_session.get_browser_state_summary(
include_screenshot=False,
include_recent_events=False,
)
assert browser_state.dom_state is not None
state = browser_state.dom_state
print(f'\n📊 Found {len(state.selector_map)} total elements')
# Find elements from different targets
targets_found = set()
main_page_elements = []
cross_origin_elements = []
for idx, element in state.selector_map.items():
target_id = element.target_id
targets_found.add(target_id)
# Check if element is from cross-origin iframe (example.com)
# Look for links - example.com has a link to iana.org/domains/reserved
if element.attributes:
href = element.attributes.get('href', '')
element_id = element.attributes.get('id', '')
# example.com has a link to iana.org/domains/reserved
if 'iana.org' in href:
cross_origin_elements.append((idx, element))
print(f' ✅ Found cross-origin element: [{idx}] {element.tag_name} href={href}')
elif element_id == 'main-button':
main_page_elements.append((idx, element))
# Verify we found elements from at least 2 different targets
print(f'\n🎯 Found elements from {len(targets_found)} different CDP targets')
# Check if cross-origin iframe loaded
if len(targets_found) < 2:
print('⚠️ Warning: Cross-origin iframe did not create separate CDP target')
print(' This may indicate cross_origin_iframes feature is not working as expected')
pytest.skip('Cross-origin iframe did not create separate CDP target - skipping test')
if len(cross_origin_elements) == 0:
print('⚠️ Warning: No elements found from example.com iframe')
print(' Network may be restricted in CI environment')
pytest.skip('No elements extracted from example.com - skipping click test')
# Verify we found at least one element from the cross-origin iframe
assert len(cross_origin_elements) > 0, 'Expected to find at least one element from cross-origin iframe (example.com)'
# Try clicking the cross-origin element
print('\n🖱️ Testing Click on True Cross-Origin Iframe Element:')
tools = Tools()
link_idx, link_element = cross_origin_elements[0]
print(f' Attempting to click element [{link_idx}] from example.com iframe...')
try:
result = await tools.click(index=link_idx, browser_session=browser_session)
# Check for errors
if result.error:
pytest.fail(f'Click on cross-origin element [{link_idx}] failed with error: {result.error}')
if result.extracted_content and (
'not available' in result.extracted_content.lower() or 'failed' in result.extracted_content.lower()
):
pytest.fail(f'Click on cross-origin element [{link_idx}] failed: {result.extracted_content}')
print(f' ✅ Click succeeded on cross-origin element [{link_idx}]!')
print(' 🎉 True cross-origin iframe element clicking works!')
except Exception as e:
pytest.fail(f'Exception while clicking cross-origin element [{link_idx}]: {e}')
print('\n✅ Test passed: True cross-origin iframe elements can be clicked')
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/ci/browser/test_proxy.py | tests/ci/browser/test_proxy.py | import asyncio
from typing import Any
import pytest
from browser_use.browser import BrowserProfile, BrowserSession
from browser_use.browser.profile import ProxySettings
from browser_use.config import CONFIG
def test_chromium_args_include_proxy_flags():
profile = BrowserProfile(
headless=True,
user_data_dir=str(CONFIG.BROWSER_USE_PROFILES_DIR / 'proxy-smoke'),
proxy=ProxySettings(
server='http://proxy.local:8080',
bypass='localhost,127.0.0.1',
),
)
args = profile.get_args()
assert any(a == '--proxy-server=http://proxy.local:8080' for a in args), args
assert any(a == '--proxy-bypass-list=localhost,127.0.0.1' for a in args), args
@pytest.mark.asyncio
async def test_cdp_proxy_auth_handler_registers_and_responds():
# Create profile with proxy auth credentials
profile = BrowserProfile(
headless=True,
user_data_dir=str(CONFIG.BROWSER_USE_PROFILES_DIR / 'proxy-smoke'),
proxy=ProxySettings(username='user', password='pass'),
)
session = BrowserSession(browser_profile=profile)
# Stub CDP client with minimal Fetch support
class StubCDP:
def __init__(self) -> None:
self.enabled = False
self.last_auth: dict[str, Any] | None = None
self.last_default: dict[str, Any] | None = None
self.auth_callback = None
self.request_paused_callback = None
class _FetchSend:
def __init__(self, outer: 'StubCDP') -> None:
self._outer = outer
async def enable(self, params: dict, session_id: str | None = None) -> None:
self._outer.enabled = True
async def continueWithAuth(self, params: dict, session_id: str | None = None) -> None:
self._outer.last_auth = {'params': params, 'session_id': session_id}
async def continueRequest(self, params: dict, session_id: str | None = None) -> None:
# no-op; included to mirror CDP API surface used by impl
pass
class _Send:
def __init__(self, outer: 'StubCDP') -> None:
self.Fetch = _FetchSend(outer)
class _FetchRegister:
def __init__(self, outer: 'StubCDP') -> None:
self._outer = outer
def authRequired(self, callback) -> None:
self._outer.auth_callback = callback
def requestPaused(self, callback) -> None:
self._outer.request_paused_callback = callback
class _Register:
def __init__(self, outer: 'StubCDP') -> None:
self.Fetch = _FetchRegister(outer)
self.send = _Send(self)
self.register = _Register(self)
root = StubCDP()
# Attach stubs to session
session._cdp_client_root = root # type: ignore[attr-defined]
# No need to attach a real CDPSession; _setup_proxy_auth works with root client
# Should register Fetch handler and enable auth handling without raising
await session._setup_proxy_auth()
assert root.enabled is True
assert callable(root.auth_callback)
# Simulate proxy auth required event
ev = {'requestId': 'r1', 'authChallenge': {'source': 'Proxy'}}
root.auth_callback(ev, session_id='s1') # type: ignore[misc]
# Let scheduled task run
await asyncio.sleep(0.05)
assert root.last_auth is not None
params = root.last_auth['params']
assert params['authChallengeResponse']['response'] == 'ProvideCredentials'
assert params['authChallengeResponse']['username'] == 'user'
assert params['authChallengeResponse']['password'] == 'pass'
assert root.last_auth['session_id'] == 's1'
# Now simulate a non-proxy auth challenge and ensure default handling
ev2 = {'requestId': 'r2', 'authChallenge': {'source': 'Server'}}
root.auth_callback(ev2, session_id='s2') # type: ignore[misc]
await asyncio.sleep(0.05)
# After non-proxy challenge, last_auth should reflect Default response
assert root.last_auth is not None
params2 = root.last_auth['params']
assert params2['requestId'] == 'r2'
assert params2['authChallengeResponse']['response'] == 'Default'
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/ci/browser/test_output_paths.py | tests/ci/browser/test_output_paths.py | """Test all recording and save functionality for Agent and BrowserSession."""
from pathlib import Path
import pytest
from browser_use import Agent, AgentHistoryList
from browser_use.browser import BrowserProfile, BrowserSession
from tests.ci.conftest import create_mock_llm
@pytest.fixture
def test_dir(tmp_path):
"""Create a test directory that gets cleaned up after each test."""
test_path = tmp_path / 'test_recordings'
test_path.mkdir(exist_ok=True)
yield test_path
@pytest.fixture
async def httpserver_url(httpserver):
"""Simple test page."""
# Use expect_ordered_request with multiple handlers to handle repeated requests
for _ in range(10): # Allow up to 10 requests to the same URL
httpserver.expect_ordered_request('/').respond_with_data(
"""
<!DOCTYPE html>
<html>
<head>
<title>Test Page</title>
</head>
<body>
<h1>Test Recording Page</h1>
<input type="text" id="search" placeholder="Search here" />
<button type="button" id="submit">Submit</button>
</body>
</html>
""",
content_type='text/html',
)
return httpserver.url_for('/')
@pytest.fixture
def llm():
"""Create mocked LLM instance for tests."""
return create_mock_llm()
@pytest.fixture
def interactive_llm(httpserver_url):
"""Create mocked LLM that navigates to page and interacts with elements."""
actions = [
# First action: Navigate to the page
f"""
{{
"thinking": "null",
"evaluation_previous_goal": "Starting the task",
"memory": "Need to navigate to the test page",
"next_goal": "Navigate to the URL",
"action": [
{{
"navigate": {{
"url": "{httpserver_url}",
"new_tab": false
}}
}}
]
}}
""",
# Second action: Click in the search box
"""
{
"thinking": "null",
"evaluation_previous_goal": "Successfully navigated to the page",
"memory": "Page loaded, can see search box and submit button",
"next_goal": "Click on the search box to focus it",
"action": [
{
"click": {
"index": 0
}
}
]
}
""",
# Third action: Type text in the search box
"""
{
"thinking": "null",
"evaluation_previous_goal": "Clicked on search box",
"memory": "Search box is focused and ready for input",
"next_goal": "Type 'test' in the search box",
"action": [
{
"input_text": {
"index": 0,
"text": "test"
}
}
]
}
""",
# Fourth action: Click submit button
"""
{
"thinking": "null",
"evaluation_previous_goal": "Typed 'test' in search box",
"memory": "Text 'test' has been entered successfully",
"next_goal": "Click the submit button to complete the task",
"action": [
{
"click": {
"index": 1
}
}
]
}
""",
# Fifth action: Done - task completed
"""
{
"thinking": "null",
"evaluation_previous_goal": "Clicked the submit button",
"memory": "Successfully navigated to the page, typed 'test' in the search box, and clicked submit",
"next_goal": "Task completed",
"action": [
{
"done": {
"text": "Task completed - typed 'test' in search box and clicked submit",
"success": true
}
}
]
}
""",
]
return create_mock_llm(actions)
class TestAgentRecordings:
"""Test Agent save_conversation_path and generate_gif parameters."""
@pytest.mark.parametrize('path_type', ['with_slash', 'without_slash', 'deep_directory'])
async def test_save_conversation_path(self, test_dir, httpserver_url, llm, path_type):
"""Test saving conversation with different path types."""
if path_type == 'with_slash':
conversation_path = test_dir / 'logs' / 'conversation'
elif path_type == 'without_slash':
conversation_path = test_dir / 'logs'
else: # deep_directory
conversation_path = test_dir / 'logs' / 'deep' / 'directory' / 'conversation'
browser_session = BrowserSession(browser_profile=BrowserProfile(headless=True, disable_security=True, user_data_dir=None))
await browser_session.start()
try:
agent = Agent(
task=f'go to {httpserver_url} and type "test" in the search box',
llm=llm,
browser_session=browser_session,
save_conversation_path=str(conversation_path),
)
history: AgentHistoryList = await agent.run(max_steps=2)
result = history.final_result()
assert result is not None
# Check that the conversation directory and files were created
assert conversation_path.exists(), f'{path_type}: conversation directory was not created'
# Files are now always created as conversation_<agent_id>_<step>.txt inside the directory
conversation_files = list(conversation_path.glob('conversation_*.txt'))
assert len(conversation_files) > 0, f'{path_type}: conversation file was not created in {conversation_path}'
finally:
await browser_session.kill()
@pytest.mark.skip(reason='TODO: fix')
@pytest.mark.parametrize('generate_gif', [False, True, 'custom_path'])
async def test_generate_gif(self, test_dir, httpserver_url, llm, generate_gif):
"""Test GIF generation with different settings."""
# Clean up any existing GIFs first
for gif in Path.cwd().glob('agent_*.gif'):
gif.unlink()
gif_param = generate_gif
expected_gif_path = None
if generate_gif == 'custom_path':
expected_gif_path = test_dir / 'custom_agent.gif'
gif_param = str(expected_gif_path)
browser_session = BrowserSession(browser_profile=BrowserProfile(headless=True, disable_security=True, user_data_dir=None))
await browser_session.start()
try:
agent = Agent(
task=f'go to {httpserver_url}',
llm=llm,
browser_session=browser_session,
generate_gif=gif_param,
)
history: AgentHistoryList = await agent.run(max_steps=2)
result = history.final_result()
assert result is not None
# Check GIF creation
if generate_gif is False:
gif_files = list(Path.cwd().glob('*.gif'))
assert len(gif_files) == 0, 'GIF file was created when generate_gif=False'
elif generate_gif is True:
# With mock LLM that doesn't navigate, all screenshots will be about:blank placeholders
# So no GIF will be created (this is expected behavior)
gif_files = list(Path.cwd().glob('agent_history.gif'))
assert len(gif_files) == 0, 'GIF should not be created when all screenshots are placeholders'
else: # custom_path
assert expected_gif_path is not None, 'expected_gif_path should be set for custom_path'
# With mock LLM that doesn't navigate, no GIF will be created
assert not expected_gif_path.exists(), 'GIF should not be created when all screenshots are placeholders'
finally:
await browser_session.kill()
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/ci/browser/test_cross_origin_click.py | tests/ci/browser/test_cross_origin_click.py | """Test clicking elements inside cross-origin iframes."""
import asyncio
import pytest
from browser_use.browser.profile import BrowserProfile, ViewportSize
from browser_use.browser.session import BrowserSession
from browser_use.tools.service import Tools
@pytest.fixture
async def browser_session():
"""Create browser session with cross-origin iframe support."""
session = BrowserSession(
browser_profile=BrowserProfile(
headless=True,
user_data_dir=None,
keep_alive=True,
window_size=ViewportSize(width=1920, height=1400),
cross_origin_iframes=True, # Enable cross-origin iframe extraction
)
)
await session.start()
yield session
await session.kill()
class TestCrossOriginIframeClick:
"""Test clicking elements inside cross-origin iframes."""
async def test_click_element_in_cross_origin_iframe(self, httpserver, browser_session: BrowserSession):
"""Verify that elements inside iframes in different CDP targets can be clicked."""
# Create iframe content with clickable elements
iframe_html = """
<!DOCTYPE html>
<html>
<head><title>Iframe Page</title></head>
<body>
<h1>Iframe Content</h1>
<a href="https://test-domain.example/page" id="iframe-link">Test Link</a>
<button id="iframe-button">Iframe Button</button>
</body>
</html>
"""
# Create main page with iframe pointing to our test server
main_html = """
<!DOCTYPE html>
<html>
<head><title>Multi-Target Test</title></head>
<body>
<h1>Main Page</h1>
<button id="main-button">Main Button</button>
<iframe id="test-iframe" src="/iframe-content" style="width: 800px; height: 600px;"></iframe>
</body>
</html>
"""
# Serve both pages
httpserver.expect_request('/multi-target-test').respond_with_data(main_html, content_type='text/html')
httpserver.expect_request('/iframe-content').respond_with_data(iframe_html, content_type='text/html')
url = httpserver.url_for('/multi-target-test')
# Navigate to the page
await browser_session.navigate_to(url)
# Wait for iframe to load
await asyncio.sleep(2)
# Get DOM state with cross-origin iframe extraction enabled
# Use browser_session.get_browser_state_summary() instead of directly creating DomService
# This goes through the proper event bus and watchdog system
browser_state = await browser_session.get_browser_state_summary(
include_screenshot=False,
include_recent_events=False,
)
assert browser_state.dom_state is not None
state = browser_state.dom_state
print(f'\n📊 Found {len(state.selector_map)} total elements')
# Find elements from different targets
targets_found = set()
main_page_elements = []
iframe_elements = []
for idx, element in state.selector_map.items():
target_id = element.target_id
targets_found.add(target_id)
# Check if element is from iframe (identified by id attributes we set)
# Iframe elements will have a different target_id when cross_origin_iframes=True
if element.attributes:
element_id = element.attributes.get('id', '')
if element_id in ('iframe-link', 'iframe-button'):
iframe_elements.append((idx, element))
print(f' ✅ Found iframe element: [{idx}] {element.tag_name} id={element_id}')
elif element_id == 'main-button':
main_page_elements.append((idx, element))
# Verify we found elements from at least 2 different targets
print(f'\n🎯 Found elements from {len(targets_found)} different CDP targets')
# Check if iframe elements were found
if len(iframe_elements) == 0:
pytest.fail('Expected to find at least one element from iframe, but found none')
# Verify we found at least one element from the iframe
assert len(iframe_elements) > 0, 'Expected to find at least one element from iframe'
# Try clicking the iframe element
print('\n🖱️ Testing Click on Iframe Element:')
tools = Tools()
link_idx, link_element = iframe_elements[0]
print(f' Attempting to click element [{link_idx}] from iframe...')
try:
result = await tools.click(index=link_idx, browser_session=browser_session)
# Check for errors
if result.error:
pytest.fail(f'Click on iframe element [{link_idx}] failed with error: {result.error}')
if result.extracted_content and (
'not available' in result.extracted_content.lower() or 'failed' in result.extracted_content.lower()
):
pytest.fail(f'Click on iframe element [{link_idx}] failed: {result.extracted_content}')
print(f' ✅ Click succeeded on iframe element [{link_idx}]!')
print(' 🎉 Iframe element clicking works!')
except Exception as e:
pytest.fail(f'Exception while clicking iframe element [{link_idx}]: {e}')
print('\n✅ Test passed: Iframe elements can be clicked')
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/ci/browser/test_session_start.py | tests/ci/browser/test_session_start.py | """
Test script for BrowserSession.start() method to ensure proper initialization,
concurrency handling, and error handling.
Tests cover:
- Calling .start() on a session that's already started
- Simultaneously calling .start() from two parallel coroutines
- Calling .start() on a session that's started but has a closed browser connection
- Calling .close() on a session that hasn't been started yet
"""
import asyncio
import logging
import pytest
from browser_use.browser.profile import (
BROWSERUSE_DEFAULT_CHANNEL,
BrowserChannel,
BrowserProfile,
)
from browser_use.browser.session import BrowserSession
from browser_use.config import CONFIG
# Set up test logging
logger = logging.getLogger('browser_session_start_tests')
# logger.setLevel(logging.DEBUG)
# run with pytest -k test_user_data_dir_not_allowed_to_corrupt_default_profile
class TestBrowserSessionStart:
"""Tests for BrowserSession.start() method initialization and concurrency."""
@pytest.fixture(scope='module')
async def browser_profile(self):
"""Create and provide a BrowserProfile with headless mode."""
profile = BrowserProfile(headless=True, user_data_dir=None, keep_alive=False)
yield profile
@pytest.fixture(scope='function')
async def browser_session(self, browser_profile):
"""Create a BrowserSession instance without starting it."""
session = BrowserSession(browser_profile=browser_profile)
yield session
await session.kill()
async def test_start_already_started_session(self, browser_session):
"""Test calling .start() on a session that's already started."""
# logger.info('Testing start on already started session')
# Start the session for the first time
await browser_session.start()
assert browser_session._cdp_client_root is not None
# Start the session again - should return immediately without re-initialization
await browser_session.start()
assert browser_session._cdp_client_root is not None
# @pytest.mark.skip(reason="Race condition - DOMWatchdog tries to inject scripts into tab that's being closed")
# async def test_page_lifecycle_management(self, browser_session: BrowserSession):
# """Test session handles page lifecycle correctly."""
# # logger.info('Testing page lifecycle management')
# # Start the session and get initial state
# await browser_session.start()
# initial_tabs = await browser_session.get_tabs()
# initial_count = len(initial_tabs)
# # Get current tab info
# current_url = await browser_session.get_current_page_url()
# assert current_url is not None
# # Get current tab ID
# current_tab_id = browser_session.agent_focus.target_id if browser_session.agent_focus else None
# assert current_tab_id is not None
# # Close the current tab using the event system
# from browser_use.browser.events import CloseTabEvent
# close_event = browser_session.event_bus.dispatch(CloseTabEvent(target_id=current_tab_id))
# await close_event
# # Operations should still work - may create new page or use existing
# tabs_after_close = await browser_session.get_tabs()
# assert isinstance(tabs_after_close, list)
# # Create a new tab explicitly
# event = browser_session.event_bus.dispatch(NavigateToUrlEvent(url='about:blank', new_tab=True))
# await event
# await event.event_result(raise_if_any=True, raise_if_none=False)
# # Should have at least one tab now
# final_tabs = await browser_session.get_tabs()
# assert len(final_tabs) >= 1
async def test_user_data_dir_not_allowed_to_corrupt_default_profile(self):
"""Test user_data_dir handling for different browser channels and version mismatches."""
# Test 1: Chromium with default user_data_dir and default channel should work fine
session = BrowserSession(
browser_profile=BrowserProfile(
headless=True,
user_data_dir=CONFIG.BROWSER_USE_DEFAULT_USER_DATA_DIR,
channel=BROWSERUSE_DEFAULT_CHANNEL, # chromium
keep_alive=False,
),
)
try:
await session.start()
assert session._cdp_client_root is not None
# Verify the user_data_dir wasn't changed
assert session.browser_profile.user_data_dir == CONFIG.BROWSER_USE_DEFAULT_USER_DATA_DIR
finally:
await session.kill()
# Test 2: Chrome with default user_data_dir should change dir AND copy to temp
profile2 = BrowserProfile(
headless=True,
user_data_dir=CONFIG.BROWSER_USE_DEFAULT_USER_DATA_DIR,
channel=BrowserChannel.CHROME,
keep_alive=False,
)
# The validator should have changed the user_data_dir to avoid corruption
# And then _copy_profile copies it to a temp directory (Chrome only)
assert profile2.user_data_dir != CONFIG.BROWSER_USE_DEFAULT_USER_DATA_DIR
assert 'browser-use-user-data-dir-' in str(profile2.user_data_dir)
# Test 3: Edge with default user_data_dir should also change
profile3 = BrowserProfile(
headless=True,
user_data_dir=CONFIG.BROWSER_USE_DEFAULT_USER_DATA_DIR,
channel=BrowserChannel.MSEDGE,
keep_alive=False,
)
assert profile3.user_data_dir != CONFIG.BROWSER_USE_DEFAULT_USER_DATA_DIR
assert profile3.user_data_dir == CONFIG.BROWSER_USE_DEFAULT_USER_DATA_DIR.parent / 'default-msedge'
assert 'browser-use-user-data-dir-' not in str(profile3.user_data_dir)
class TestBrowserSessionReusePatterns:
"""Tests for all browser re-use patterns documented in docs/customize/real-browser.mdx"""
async def test_sequential_agents_same_profile_different_browser(self, mock_llm):
"""Test Sequential Agents, Same Profile, Different Browser pattern"""
from browser_use import Agent
from browser_use.browser.profile import BrowserProfile
# Create a reusable profile
reused_profile = BrowserProfile(
user_data_dir=None, # Use temp dir for testing
headless=True,
)
# First agent
agent1 = Agent(
task='The first task...',
llm=mock_llm,
browser_profile=reused_profile,
)
await agent1.run()
# Verify first agent's session is closed
assert agent1.browser_session is not None
assert not agent1.browser_session._cdp_client_root is not None
# Second agent with same profile
agent2 = Agent(
task='The second task...',
llm=mock_llm,
browser_profile=reused_profile,
# Disable memory for tests
)
await agent2.run()
# Verify second agent created a new session
assert agent2.browser_session is not None
assert agent1.browser_session is not agent2.browser_session
assert not agent2.browser_session._cdp_client_root is not None
async def test_sequential_agents_same_profile_same_browser(self, mock_llm):
"""Test Sequential Agents, Same Profile, Same Browser pattern"""
from browser_use import Agent, BrowserSession
# Create a reusable session with keep_alive
reused_session = BrowserSession(
browser_profile=BrowserProfile(
user_data_dir=None, # Use temp dir for testing
headless=True,
keep_alive=True, # Don't close browser after agent.run()
),
)
try:
# Start the session manually (agents will reuse this initialized session)
await reused_session.start()
# First agent
agent1 = Agent(
task='The first task...',
llm=mock_llm,
browser_session=reused_session,
# Disable memory for tests
)
await agent1.run()
# Verify session is still alive
assert reused_session._cdp_client_root is not None
# Second agent reusing the same session
agent2 = Agent(
task='The second task...',
llm=mock_llm,
browser_session=reused_session,
# Disable memory for tests
)
await agent2.run()
# Verify same browser was used (using __eq__ to check browser_pid, cdp_url)
assert agent1.browser_session == agent2.browser_session
assert agent1.browser_session == reused_session
assert reused_session._cdp_client_root is not None
finally:
await reused_session.kill()
class TestBrowserSessionEventSystem:
"""Tests for the new event system integration in BrowserSession."""
@pytest.fixture(scope='function')
async def browser_session(self):
"""Create a BrowserSession instance for event system testing."""
profile = BrowserProfile(headless=True, user_data_dir=None, keep_alive=False)
session = BrowserSession(browser_profile=profile)
yield session
await session.kill()
async def test_event_bus_initialization(self, browser_session):
"""Test that event bus is properly initialized with unique name."""
# Event bus should be created during __init__
assert browser_session.event_bus is not None
assert browser_session.event_bus.name.startswith('EventBus_')
# Event bus name format may vary, just check it exists
async def test_event_handlers_registration(self, browser_session: BrowserSession):
"""Test that event handlers are properly registered."""
# Attach all watchdogs to register their handlers
await browser_session.attach_all_watchdogs()
# Check that handlers are registered in the event bus
from browser_use.browser.events import (
BrowserStartEvent,
BrowserStateRequestEvent,
BrowserStopEvent,
ClickElementEvent,
CloseTabEvent,
ScreenshotEvent,
ScrollEvent,
TypeTextEvent,
)
# These event types should have handlers registered
event_types_with_handlers = [
BrowserStartEvent,
BrowserStopEvent,
ClickElementEvent,
TypeTextEvent,
ScrollEvent,
CloseTabEvent,
BrowserStateRequestEvent,
ScreenshotEvent,
]
for event_type in event_types_with_handlers:
handlers = browser_session.event_bus.handlers.get(event_type.__name__, [])
assert len(handlers) > 0, f'No handlers registered for {event_type.__name__}'
async def test_direct_event_dispatching(self, browser_session):
"""Test direct event dispatching without using the public API."""
from browser_use.browser.events import BrowserConnectedEvent, BrowserStartEvent
# Dispatch BrowserStartEvent directly
start_event = browser_session.event_bus.dispatch(BrowserStartEvent())
# Wait for event to complete
await start_event
# Check if BrowserConnectedEvent was dispatched
assert browser_session._cdp_client_root is not None
# Check event history
event_history = list(browser_session.event_bus.event_history.values())
assert len(event_history) >= 2 # BrowserStartEvent + BrowserConnectedEvent + others
# Find the BrowserConnectedEvent in history
started_events = [e for e in event_history if isinstance(e, BrowserConnectedEvent)]
assert len(started_events) >= 1
assert started_events[0].cdp_url is not None
async def test_event_system_error_handling(self, browser_session):
"""Test error handling in event system."""
from browser_use.browser.events import BrowserStartEvent
# Create session with invalid CDP URL to trigger error
error_session = BrowserSession(
browser_profile=BrowserProfile(headless=True),
cdp_url='http://localhost:99999', # Invalid port
)
try:
# Dispatch start event directly - should trigger error handling
start_event = error_session.event_bus.dispatch(BrowserStartEvent())
# The event bus catches and logs the error, but the event awaits successfully
await start_event
# The session should not be initialized due to the error
assert error_session._cdp_client_root is None, 'Session should not be initialized after connection error'
# Verify the error was logged in the event history (good enough for error handling test)
assert len(error_session.event_bus.event_history) > 0, 'Event should be tracked even with errors'
finally:
await error_session.kill()
async def test_concurrent_event_dispatching(self, browser_session: BrowserSession):
"""Test that concurrent events are handled properly."""
from browser_use.browser.events import ScreenshotEvent
# Start browser first
await browser_session.start()
# Dispatch multiple events concurrently
screenshot_event1 = browser_session.event_bus.dispatch(ScreenshotEvent())
screenshot_event2 = browser_session.event_bus.dispatch(ScreenshotEvent())
# Both should complete successfully
results = await asyncio.gather(screenshot_event1, screenshot_event2, return_exceptions=True)
# Check that no exceptions were raised
for result in results:
assert not isinstance(result, Exception), f'Event failed with: {result}'
# async def test_many_parallel_browser_sessions(self):
# """Test spawning 12 parallel browser_sessions with different settings and ensure they all work"""
# from browser_use import BrowserSession
# browser_sessions = []
# for i in range(3):
# browser_sessions.append(
# BrowserSession(
# browser_profile=BrowserProfile(
# user_data_dir=None,
# headless=True,
# keep_alive=True,
# ),
# )
# )
# for i in range(3):
# browser_sessions.append(
# BrowserSession(
# browser_profile=BrowserProfile(
# user_data_dir=Path(tempfile.mkdtemp(prefix=f'browseruse-tmp-{i}')),
# headless=True,
# keep_alive=True,
# ),
# )
# )
# for i in range(3):
# browser_sessions.append(
# BrowserSession(
# browser_profile=BrowserProfile(
# user_data_dir=None,
# headless=True,
# keep_alive=False,
# ),
# )
# )
# for i in range(3):
# browser_sessions.append(
# BrowserSession(
# browser_profile=BrowserProfile(
# user_data_dir=Path(tempfile.mkdtemp(prefix=f'browseruse-tmp-{i}')),
# headless=True,
# keep_alive=False,
# ),
# )
# )
# print('Starting many parallel browser sessions...')
# await asyncio.gather(*[browser_session.start() for browser_session in browser_sessions])
# print('Ensuring all parallel browser sessions are connected and usable...')
# new_tab_tasks = []
# for browser_session in browser_sessions:
# assert browser_session._cdp_client_root is not None
# assert browser_session._cdp_client_root is not None
# new_tab_tasks.append(browser_session.create_new_tab('chrome://version'))
# await asyncio.gather(*new_tab_tasks)
# print('killing every 3rd browser_session to test parallel shutdown')
# kill_tasks = []
# for i in range(0, len(browser_sessions), 3):
# kill_tasks.append(browser_sessions[i].kill())
# browser_sessions[i] = None
# results = await asyncio.gather(*kill_tasks, return_exceptions=True)
# # Check that no exceptions were raised during cleanup
# for i, result in enumerate(results):
# if isinstance(result, Exception):
# print(f'Warning: Browser session kill raised exception: {type(result).__name__}: {result}')
# print('ensuring the remaining browser_sessions are still connected and usable')
# new_tab_tasks = []
# screenshot_tasks = []
# for browser_session in filter(bool, browser_sessions):
# assert browser_session._cdp_client_root is not None
# assert browser_session._cdp_client_root is not None
# new_tab_tasks.append(browser_session.create_new_tab('chrome://version'))
# screenshot_tasks.append(browser_session.take_screenshot())
# await asyncio.gather(*new_tab_tasks)
# await asyncio.gather(*screenshot_tasks)
# kill_tasks = []
# print('killing the remaining browser_sessions')
# for browser_session in filter(bool, browser_sessions):
# kill_tasks.append(browser_session.kill())
# results = await asyncio.gather(*kill_tasks, return_exceptions=True)
# # Check that no exceptions were raised during cleanup
# for i, result in enumerate(results):
# if isinstance(result, Exception):
# print(f'Warning: Browser session kill raised exception: {type(result).__name__}: {result}')
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/ci/browser/test_navigation.py | tests/ci/browser/test_navigation.py | """
Test navigation edge cases: broken pages, slow loading, non-existing pages.
Tests verify that:
1. Agent can handle navigation to broken/malformed HTML pages
2. Agent can handle slow-loading pages without hanging
3. Agent can handle non-existing pages (404, connection refused, etc.)
4. Agent can recover and continue making LLM calls after encountering these issues
All tests use:
- max_steps=3 to limit agent actions
- 120s timeout to fail if test takes too long
- Mock LLM to verify agent can still make decisions after navigation errors
Usage:
uv run pytest tests/ci/browser/test_navigation.py -v -s
"""
import asyncio
import time
import pytest
from pytest_httpserver import HTTPServer
from werkzeug import Response
from browser_use.agent.service import Agent
from browser_use.browser import BrowserSession
from browser_use.browser.profile import BrowserProfile
from tests.ci.conftest import create_mock_llm
@pytest.fixture(scope='session')
def http_server():
"""Create and provide a test HTTP server for navigation tests."""
server = HTTPServer()
server.start()
# Route 1: Broken/malformed HTML page
server.expect_request('/broken').respond_with_data(
'<html><head><title>Broken Page</title></head><body><h1>Incomplete HTML',
content_type='text/html',
)
# Route 2: Valid page for testing navigation after error recovery
server.expect_request('/valid').respond_with_data(
'<html><head><title>Valid Page</title></head><body><h1>Valid Page</h1><p>This page loaded successfully</p></body></html>',
content_type='text/html',
)
# Route 3: Slow loading page - delays 10 seconds before responding
def slow_handler(request):
time.sleep(10)
return Response(
'<html><head><title>Slow Page</title></head><body><h1>Slow Loading Page</h1><p>This page took 10 seconds to load</p></body></html>',
content_type='text/html',
)
server.expect_request('/slow').respond_with_handler(slow_handler)
# Route 4: 404 page
server.expect_request('/notfound').respond_with_data(
'<html><head><title>404 Not Found</title></head><body><h1>404 - Page Not Found</h1></body></html>',
status=404,
content_type='text/html',
)
yield server
server.stop()
@pytest.fixture(scope='session')
def base_url(http_server):
"""Return the base URL for the test HTTP server."""
return f'http://{http_server.host}:{http_server.port}'
@pytest.fixture(scope='function')
async def browser_session():
"""Create a browser session for navigation tests."""
session = BrowserSession(
browser_profile=BrowserProfile(
headless=True,
user_data_dir=None,
keep_alive=True,
)
)
await session.start()
yield session
await session.kill()
class TestNavigationEdgeCases:
"""Test navigation error handling and recovery."""
async def test_broken_page_navigation(self, browser_session, base_url):
"""Test that agent can handle broken/malformed HTML and still make LLM calls."""
# Create actions for the agent:
# 1. Navigate to broken page
# 2. Check if page exists
# 3. Done
actions = [
f"""
{{
"thinking": "I need to navigate to the broken page",
"evaluation_previous_goal": "Starting task",
"memory": "Navigating to broken page",
"next_goal": "Navigate to broken page",
"action": [
{{
"navigate": {{
"url": "{base_url}/broken"
}}
}}
]
}}
""",
"""
{
"thinking": "I should check if the page loaded",
"evaluation_previous_goal": "Navigated to page",
"memory": "Checking page state",
"next_goal": "Verify page exists",
"action": [
{
"done": {
"text": "Page exists despite broken HTML",
"success": true
}
}
]
}
""",
]
mock_llm = create_mock_llm(actions=actions)
agent = Agent(
task=f'Navigate to {base_url}/broken and check if page exists',
llm=mock_llm,
browser_session=browser_session,
)
# Run with timeout - should complete within 2 minutes
try:
history = await asyncio.wait_for(agent.run(max_steps=3), timeout=120)
assert len(history) > 0, 'Agent should have completed at least one step'
# If agent completes successfully, it means LLM was called and functioning
final_result = history.final_result()
assert final_result is not None, 'Agent should return a final result'
except TimeoutError:
pytest.fail('Test timed out after 2 minutes - agent hung on broken page')
async def test_slow_loading_page(self, browser_session, base_url):
"""Test that agent can handle slow-loading pages without hanging."""
actions = [
f"""
{{
"thinking": "I need to navigate to the slow page",
"evaluation_previous_goal": "Starting task",
"memory": "Navigating to slow page",
"next_goal": "Navigate to slow page",
"action": [
{{
"navigate": {{
"url": "{base_url}/slow"
}}
}}
]
}}
""",
"""
{
"thinking": "The page loaded, even though it was slow",
"evaluation_previous_goal": "Successfully navigated",
"memory": "Page loaded after delay",
"next_goal": "Complete task",
"action": [
{
"done": {
"text": "Slow page loaded successfully",
"success": true
}
}
]
}
""",
]
mock_llm = create_mock_llm(actions=actions)
agent = Agent(
task=f'Navigate to {base_url}/slow and wait for it to load',
llm=mock_llm,
browser_session=browser_session,
)
# Run with timeout - should complete within 2 minutes
start_time = time.time()
try:
history = await asyncio.wait_for(agent.run(max_steps=3), timeout=120)
elapsed = time.time() - start_time
assert len(history) > 0, 'Agent should have completed at least one step'
assert elapsed >= 10, f'Agent should have waited for slow page (10s delay), but only took {elapsed:.1f}s'
final_result = history.final_result()
assert final_result is not None, 'Agent should return a final result'
except TimeoutError:
pytest.fail('Test timed out after 2 minutes - agent hung on slow page')
async def test_nonexisting_page_404(self, browser_session, base_url):
"""Test that agent can handle 404 pages and still make LLM calls."""
actions = [
f"""
{{
"thinking": "I need to navigate to the non-existing page",
"evaluation_previous_goal": "Starting task",
"memory": "Navigating to 404 page",
"next_goal": "Navigate to non-existing page",
"action": [
{{
"navigate": {{
"url": "{base_url}/notfound"
}}
}}
]
}}
""",
"""
{
"thinking": "I got a 404 error but the browser still works",
"evaluation_previous_goal": "Navigated to 404 page",
"memory": "Page not found",
"next_goal": "Report that page does not exist",
"action": [
{
"done": {
"text": "Page does not exist (404 error)",
"success": false
}
}
]
}
""",
]
mock_llm = create_mock_llm(actions=actions)
agent = Agent(
task=f'Navigate to {base_url}/notfound and check if page exists',
llm=mock_llm,
browser_session=browser_session,
)
# Run with timeout - should complete within 2 minutes
try:
history = await asyncio.wait_for(agent.run(max_steps=3), timeout=120)
assert len(history) > 0, 'Agent should have completed at least one step'
final_result = history.final_result()
assert final_result is not None, 'Agent should return a final result'
except TimeoutError:
pytest.fail('Test timed out after 2 minutes - agent hung on 404 page')
async def test_nonexisting_domain(self, browser_session):
"""Test that agent can handle completely non-existing domains (connection refused)."""
# Use a localhost port that's not listening
nonexisting_url = 'http://localhost:59999/page'
actions = [
f"""
{{
"thinking": "I need to navigate to a non-existing domain",
"evaluation_previous_goal": "Starting task",
"memory": "Attempting to navigate",
"next_goal": "Navigate to non-existing domain",
"action": [
{{
"navigate": {{
"url": "{nonexisting_url}"
}}
}}
]
}}
""",
"""
{
"thinking": "The connection failed but I can still proceed",
"evaluation_previous_goal": "Connection failed",
"memory": "Domain does not exist",
"next_goal": "Report failure",
"action": [
{
"done": {
"text": "Domain does not exist (connection refused)",
"success": false
}
}
]
}
""",
]
mock_llm = create_mock_llm(actions=actions)
agent = Agent(
task=f'Navigate to {nonexisting_url} and check if it exists',
llm=mock_llm,
browser_session=browser_session,
)
# Run with timeout - should complete within 2 minutes
try:
history = await asyncio.wait_for(agent.run(max_steps=3), timeout=120)
assert len(history) > 0, 'Agent should have completed at least one step'
final_result = history.final_result()
assert final_result is not None, 'Agent should return a final result'
except TimeoutError:
pytest.fail('Test timed out after 2 minutes - agent hung on non-existing domain')
async def test_recovery_after_navigation_error(self, browser_session, base_url):
"""Test that agent can recover and navigate to valid page after encountering error."""
actions = [
f"""
{{
"thinking": "First, I'll try the broken page",
"evaluation_previous_goal": "Starting task",
"memory": "Navigating to broken page",
"next_goal": "Navigate to broken page first",
"action": [
{{
"navigate": {{
"url": "{base_url}/broken"
}}
}}
]
}}
""",
f"""
{{
"thinking": "That page was broken, let me try a valid page now",
"evaluation_previous_goal": "Broken page loaded",
"memory": "Now navigating to valid page",
"next_goal": "Navigate to valid page",
"action": [
{{
"navigate": {{
"url": "{base_url}/valid"
}}
}}
]
}}
""",
"""
{
"thinking": "The valid page loaded successfully after the broken one",
"evaluation_previous_goal": "Valid page loaded",
"memory": "Successfully recovered from error",
"next_goal": "Complete task",
"action": [
{
"done": {
"text": "Successfully navigated to valid page after broken page",
"success": true
}
}
]
}
""",
]
mock_llm = create_mock_llm(actions=actions)
agent = Agent(
task=f'First navigate to {base_url}/broken, then navigate to {base_url}/valid',
llm=mock_llm,
browser_session=browser_session,
)
# Run with timeout - should complete within 2 minutes
try:
history = await asyncio.wait_for(agent.run(max_steps=3), timeout=120)
assert len(history) >= 2, 'Agent should have completed at least 2 steps (broken -> valid)'
# Verify final page is the valid one
final_url = await browser_session.get_current_page_url()
assert final_url.endswith('/valid'), f'Final URL should be /valid, got {final_url}'
# Verify agent completed successfully
final_result = history.final_result()
assert final_result is not None, 'Agent should return a final result'
except TimeoutError:
pytest.fail('Test timed out after 2 minutes - agent could not recover from broken page')
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/ci/browser/test_screenshot.py | tests/ci/browser/test_screenshot.py | import pytest
from pytest_httpserver import HTTPServer
from browser_use.agent.service import Agent
from browser_use.browser.events import NavigateToUrlEvent
from browser_use.browser.profile import BrowserProfile
from browser_use.browser.session import BrowserSession
from tests.ci.conftest import create_mock_llm
@pytest.fixture(scope='session')
def http_server():
"""Create and provide a test HTTP server for screenshot tests."""
server = HTTPServer()
server.start()
# Route: Page with visible content for screenshot testing
server.expect_request('/screenshot-page').respond_with_data(
"""
<!DOCTYPE html>
<html>
<head>
<title>Screenshot Test Page</title>
<style>
body { font-family: Arial; padding: 20px; background: #f0f0f0; }
h1 { color: #333; font-size: 32px; }
.content { background: white; padding: 20px; border-radius: 8px; margin: 10px 0; }
</style>
</head>
<body>
<h1>Screenshot Test Page</h1>
<div class="content">
<p>This page is used to test screenshot capture with vision enabled.</p>
<p>The agent should capture a screenshot when navigating to this page.</p>
</div>
</body>
</html>
""",
content_type='text/html',
)
yield server
server.stop()
@pytest.fixture(scope='session')
def base_url(http_server):
"""Return the base URL for the test HTTP server."""
return f'http://{http_server.host}:{http_server.port}'
@pytest.fixture(scope='function')
async def browser_session():
session = BrowserSession(browser_profile=BrowserProfile(headless=True))
await session.start()
yield session
await session.kill()
@pytest.mark.asyncio
async def test_basic_screenshots(browser_session: BrowserSession, httpserver):
"""Navigate to a local page and ensure screenshot helpers return bytes."""
html = """
<html><body><h1 id='title'>Hello</h1><p>Screenshot demo.</p></body></html>
"""
httpserver.expect_request('/demo').respond_with_data(html, content_type='text/html')
url = httpserver.url_for('/demo')
nav = browser_session.event_bus.dispatch(NavigateToUrlEvent(url=url, new_tab=False))
await nav
data = await browser_session.take_screenshot(full_page=False)
assert data, 'Viewport screenshot returned no data'
element = await browser_session.screenshot_element('h1')
assert element, 'Element screenshot returned no data'
async def test_agent_screenshot_with_vision_enabled(browser_session, base_url):
"""Test that agent captures screenshots when vision is enabled.
This integration test verifies that:
1. Agent with vision=True navigates to a page
2. After prepare_context/update message manager, screenshot is captured
3. Screenshot is included in the agent's history state
"""
# Create mock LLM actions
actions = [
f"""
{{
"thinking": "I'll navigate to the screenshot test page",
"evaluation_previous_goal": "Starting task",
"memory": "Navigating to page",
"next_goal": "Navigate to test page",
"action": [
{{
"navigate": {{
"url": "{base_url}/screenshot-page",
"new_tab": false
}}
}}
]
}}
""",
"""
{
"thinking": "Page loaded, completing task",
"evaluation_previous_goal": "Page loaded",
"memory": "Task completed",
"next_goal": "Complete task",
"action": [
{
"done": {
"text": "Successfully navigated and captured screenshot",
"success": true
}
}
]
}
""",
]
mock_llm = create_mock_llm(actions=actions)
# Create agent with vision enabled
agent = Agent(
task=f'Navigate to {base_url}/screenshot-page',
llm=mock_llm,
browser_session=browser_session,
use_vision=True, # Enable vision/screenshots
)
# Run agent
history = await agent.run(max_steps=2)
# Verify agent completed successfully
assert len(history) >= 1, 'Agent should have completed at least 1 step'
final_result = history.final_result()
assert final_result is not None, 'Agent should return a final result'
# Verify screenshots were captured in the history
screenshot_found = False
for i, step in enumerate(history.history):
# Check if browser state has screenshot path
if step.state and hasattr(step.state, 'screenshot_path') and step.state.screenshot_path:
screenshot_found = True
print(f'\n✅ Step {i + 1}: Screenshot captured at {step.state.screenshot_path}')
# Verify screenshot file exists (it should be saved to disk)
import os
assert os.path.exists(step.state.screenshot_path), f'Screenshot file should exist at {step.state.screenshot_path}'
# Verify screenshot file has content
screenshot_size = os.path.getsize(step.state.screenshot_path)
assert screenshot_size > 0, f'Screenshot file should have content, got {screenshot_size} bytes'
print(f' Screenshot size: {screenshot_size} bytes')
assert screenshot_found, 'At least one screenshot should be captured when vision is enabled'
print('\n🎉 Integration test passed: Screenshots are captured correctly with vision enabled')
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/ci/browser/test_cdp_headers.py | tests/ci/browser/test_cdp_headers.py | """
Test that headers are properly passed to CDPClient for authenticated remote browser connections.
This tests the fix for: When using browser-use with remote browser services that require
authentication headers, these headers need to be included in the WebSocket handshake.
"""
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from browser_use.browser.profile import BrowserProfile
from browser_use.browser.session import BrowserSession
def test_browser_profile_headers_attribute():
"""Test that BrowserProfile correctly stores headers attribute."""
test_headers = {'Authorization': 'Bearer token123', 'X-API-Key': 'key456'}
profile = BrowserProfile(headers=test_headers)
# Verify headers are stored correctly
assert profile.headers == test_headers
# Test with profile without headers
profile_no_headers = BrowserProfile()
assert profile_no_headers.headers is None
def test_browser_profile_headers_inherited():
"""Test that BrowserSession can access headers from its profile."""
test_headers = {'Authorization': 'Bearer test-token'}
session = BrowserSession(cdp_url='wss://example.com/cdp', headers=test_headers)
assert session.browser_profile.headers == test_headers
@pytest.mark.asyncio
async def test_cdp_client_headers_passed_on_connect():
"""Test that headers from BrowserProfile are passed to CDPClient on connect()."""
test_headers = {
'Authorization': 'AWS4-HMAC-SHA256 Credential=test...',
'X-Amz-Date': '20250914T163733Z',
'X-Amz-Security-Token': 'test-token',
'Host': 'remote-browser.example.com',
}
session = BrowserSession(cdp_url='wss://remote-browser.example.com/cdp', headers=test_headers)
with patch('browser_use.browser.session.CDPClient') as mock_cdp_client_class:
# Setup mock CDPClient instance
mock_cdp_client = AsyncMock()
mock_cdp_client_class.return_value = mock_cdp_client
mock_cdp_client.start = AsyncMock()
mock_cdp_client.stop = AsyncMock()
# Mock CDP methods
mock_cdp_client.send = MagicMock()
mock_cdp_client.send.Target = MagicMock()
mock_cdp_client.send.Target.setAutoAttach = AsyncMock()
mock_cdp_client.send.Target.getTargets = AsyncMock(return_value={'targetInfos': []})
mock_cdp_client.send.Target.createTarget = AsyncMock(return_value={'targetId': 'test-target-id'})
# Mock SessionManager (imported inside connect() from browser_use.browser.session_manager)
with patch('browser_use.browser.session_manager.SessionManager') as mock_session_manager_class:
mock_session_manager = MagicMock()
mock_session_manager_class.return_value = mock_session_manager
mock_session_manager.start_monitoring = AsyncMock()
mock_session_manager.get_all_page_targets = MagicMock(return_value=[])
try:
await session.connect()
except Exception:
# May fail due to incomplete mocking, but we can still verify the key assertion
pass
# Verify CDPClient was instantiated with the headers
mock_cdp_client_class.assert_called_once()
call_kwargs = mock_cdp_client_class.call_args
# Check positional args and keyword args
assert call_kwargs[0][0] == 'wss://remote-browser.example.com/cdp', 'CDP URL should be first arg'
assert call_kwargs[1].get('additional_headers') == test_headers, 'Headers should be passed as additional_headers'
assert call_kwargs[1].get('max_ws_frame_size') == 200 * 1024 * 1024, 'max_ws_frame_size should be set'
@pytest.mark.asyncio
async def test_cdp_client_no_headers_when_none():
"""Test that CDPClient is created with None headers when profile has no headers."""
session = BrowserSession(cdp_url='wss://example.com/cdp')
assert session.browser_profile.headers is None
with patch('browser_use.browser.session.CDPClient') as mock_cdp_client_class:
mock_cdp_client = AsyncMock()
mock_cdp_client_class.return_value = mock_cdp_client
mock_cdp_client.start = AsyncMock()
mock_cdp_client.stop = AsyncMock()
mock_cdp_client.send = MagicMock()
mock_cdp_client.send.Target = MagicMock()
mock_cdp_client.send.Target.setAutoAttach = AsyncMock()
mock_cdp_client.send.Target.getTargets = AsyncMock(return_value={'targetInfos': []})
mock_cdp_client.send.Target.createTarget = AsyncMock(return_value={'targetId': 'test-target-id'})
with patch('browser_use.browser.session_manager.SessionManager') as mock_session_manager_class:
mock_session_manager = MagicMock()
mock_session_manager_class.return_value = mock_session_manager
mock_session_manager.start_monitoring = AsyncMock()
mock_session_manager.get_all_page_targets = MagicMock(return_value=[])
try:
await session.connect()
except Exception:
pass
# Verify CDPClient was called with None for additional_headers
call_kwargs = mock_cdp_client_class.call_args
assert call_kwargs[1].get('additional_headers') is None
@pytest.mark.asyncio
async def test_headers_used_for_json_version_endpoint():
"""Test that headers are also used when fetching WebSocket URL from /json/version."""
test_headers = {'Authorization': 'Bearer test-token'}
# Use HTTP URL (not ws://) to trigger /json/version fetch
session = BrowserSession(cdp_url='http://remote-browser.example.com:9222', headers=test_headers)
with patch('browser_use.browser.session.httpx.AsyncClient') as mock_client_class:
mock_client = AsyncMock()
mock_client_class.return_value.__aenter__ = AsyncMock(return_value=mock_client)
mock_client_class.return_value.__aexit__ = AsyncMock()
# Mock the /json/version response
mock_response = MagicMock()
mock_response.json.return_value = {'webSocketDebuggerUrl': 'ws://remote-browser.example.com:9222/devtools/browser/abc'}
mock_client.get = AsyncMock(return_value=mock_response)
with patch('browser_use.browser.session.CDPClient') as mock_cdp_client_class:
mock_cdp_client = AsyncMock()
mock_cdp_client_class.return_value = mock_cdp_client
mock_cdp_client.start = AsyncMock()
mock_cdp_client.send = MagicMock()
mock_cdp_client.send.Target = MagicMock()
mock_cdp_client.send.Target.setAutoAttach = AsyncMock()
with patch('browser_use.browser.session_manager.SessionManager') as mock_sm_class:
mock_sm = MagicMock()
mock_sm_class.return_value = mock_sm
mock_sm.start_monitoring = AsyncMock()
mock_sm.get_all_page_targets = MagicMock(return_value=[])
try:
await session.connect()
except Exception:
pass
# Verify headers were passed to the HTTP GET request
mock_client.get.assert_called_once()
call_kwargs = mock_client.get.call_args
assert call_kwargs[1].get('headers') == test_headers
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/ci/interactions/test_radio_buttons.py | tests/ci/interactions/test_radio_buttons.py | # @file purpose: Test radio button interactions and serialization in browser-use
"""
Test file for verifying radio button clicking functionality and DOM serialization.
This test creates a simple HTML page with radio buttons, sends an agent to click them,
and logs the final agent message to show how radio buttons are represented in the serializer.
The serialization shows radio buttons as:
[index]<input type=radio name=groupname value=optionvalue checked=true/false />
Usage:
uv run pytest tests/ci/test_radio_buttons.py -v -s
Note: This test requires a real LLM API key and is skipped in CI environments.
"""
import os
from pathlib import Path
import pytest
from pytest_httpserver import HTTPServer
from browser_use.agent.service import Agent
from browser_use.browser import BrowserSession
from browser_use.browser.profile import BrowserProfile
@pytest.fixture(scope='session')
def http_server():
"""Create and provide a test HTTP server that serves static content."""
server = HTTPServer()
server.start()
# Read the HTML file content
html_file = Path(__file__).parent / 'test_radio_buttons.html'
with open(html_file) as f:
html_content = f.read()
# Add route for radio buttons test page
server.expect_request('/radio-test').respond_with_data(
html_content,
content_type='text/html',
)
yield server
server.stop()
@pytest.fixture(scope='session')
def base_url(http_server):
"""Return the base URL for the test HTTP server."""
return f'http://{http_server.host}:{http_server.port}'
@pytest.fixture(scope='module')
async def browser_session():
"""Create and provide a Browser instance with security disabled."""
browser_session = BrowserSession(
browser_profile=BrowserProfile(
headless=True,
user_data_dir=None,
keep_alive=True,
)
)
await browser_session.start()
yield browser_session
await browser_session.kill()
@pytest.mark.skipif(
os.getenv('CI') == 'true' or os.getenv('GITHUB_ACTIONS') == 'true',
reason='Skipped in CI: requires real LLM API key which blocks other tests',
)
class TestRadioButtons:
"""Test cases for radio button interactions."""
async def test_radio_button_clicking(self, browser_session, base_url):
"""Test that agent can click radio buttons by checking for secret message."""
task = f"Go to {base_url}/radio-test and click on the 'Blue' radio button and the 'Dog' radio button. After clicking both buttons, look for any text message that appears on the page and report exactly what you see."
agent = Agent(
task=task,
browser_session=browser_session,
max_actions_per_step=5,
flash_mode=True,
)
# Run the agent
history = await agent.run(max_steps=8)
# Check if the secret message appears in the final response
secret_found = False
final_response = history.final_result()
if final_response and 'SECRET_SUCCESS_12345' in final_response:
secret_found = True
print('\n✅ SUCCESS: Secret message found! Radio buttons were clicked correctly.')
assert secret_found, (
"Secret message 'SECRET_SUCCESS_12345' should be present, indicating both Blue and Dog radio buttons were clicked. Actual response: "
+ str(final_response)
)
print(f'\n🎉 Test completed successfully! Agent completed {len(history)} steps and found the secret message.')
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/ci/interactions/test_dropdown_aria_menus.py | tests/ci/interactions/test_dropdown_aria_menus.py | import pytest
from pytest_httpserver import HTTPServer
from browser_use.agent.views import ActionResult
from browser_use.browser import BrowserSession
from browser_use.browser.profile import BrowserProfile
from browser_use.tools.service import Tools
@pytest.fixture(scope='session')
def http_server():
"""Create and provide a test HTTP server that serves static content."""
server = HTTPServer()
server.start()
# Add route for ARIA menu test page
server.expect_request('/aria-menu').respond_with_data(
"""
<!DOCTYPE html>
<html>
<head>
<title>ARIA Menu Test</title>
<style>
.menu {
list-style: none;
padding: 0;
margin: 0;
border: 1px solid #ccc;
background: white;
width: 200px;
}
.menu-item {
padding: 10px 20px;
border-bottom: 1px solid #eee;
}
.menu-item:hover {
background: #f0f0f0;
}
.menu-item-anchor {
text-decoration: none;
color: #333;
display: block;
}
#result {
margin-top: 20px;
padding: 10px;
border: 1px solid #ddd;
min-height: 20px;
}
</style>
</head>
<body>
<h1>ARIA Menu Test</h1>
<p>This menu uses ARIA roles instead of native select elements</p>
<!-- Exactly like the HTML provided in the issue -->
<ul class="menu menu-format-standard menu-regular" role="menu" id="pyNavigation1752753375773" style="display: block;">
<li class="menu-item menu-item-enabled" role="presentation">
<a href="#" onclick="pd(event);" class="menu-item-anchor" tabindex="0" role="menuitem">
<span class="menu-item-title-wrap"><span class="menu-item-title">Filter</span></span>
</a>
</li>
<li class="menu-item menu-item-enabled" role="presentation" id="menu-item-$PpyNavigation1752753375773$ppyElements$l2">
<a href="#" onclick="pd(event);" class="menu-item-anchor menu-item-expand" tabindex="0" role="menuitem" aria-haspopup="true">
<span class="menu-item-title-wrap"><span class="menu-item-title">Sort</span></span>
</a>
<div class="menu-panel-wrapper">
<ul class="menu menu-format-standard menu-regular" role="menu" id="$PpyNavigation1752753375773$ppyElements$l2">
<li class="menu-item menu-item-enabled" role="presentation">
<a href="#" onclick="pd(event);" class="menu-item-anchor" tabindex="0" role="menuitem">
<span class="menu-item-title-wrap"><span class="menu-item-title">Lowest to highest</span></span>
</a>
</li>
<li class="menu-item menu-item-enabled" role="presentation">
<a href="#" onclick="pd(event);" class="menu-item-anchor" tabindex="0" role="menuitem">
<span class="menu-item-title-wrap"><span class="menu-item-title">Highest to lowest</span></span>
</a>
</li>
</ul>
</div>
</li>
<li class="menu-item menu-item-enabled" role="presentation">
<a href="#" onclick="pd(event);" class="menu-item-anchor" tabindex="0" role="menuitem">
<span class="menu-item-title-wrap"><span class="menu-item-title">Appearance</span></span>
</a>
</li>
<li class="menu-item menu-item-enabled" role="presentation">
<a href="#" onclick="pd(event);" class="menu-item-anchor" tabindex="0" role="menuitem">
<span class="menu-item-title-wrap"><span class="menu-item-title">Summarize</span></span>
</a>
</li>
<li class="menu-item menu-item-enabled" role="presentation">
<a href="#" onclick="pd(event);" class="menu-item-anchor" tabindex="0" role="menuitem">
<span class="menu-item-title-wrap"><span class="menu-item-title">Delete</span></span>
</a>
</li>
</ul>
<div id="result">Click an option to see the result</div>
<script>
// Mock the pd function that prevents default
function pd(event) {
event.preventDefault();
const text = event.target.closest('[role="menuitem"]').textContent.trim();
document.getElementById('result').textContent = 'Clicked: ' + text;
}
</script>
</body>
</html>
""",
content_type='text/html',
)
yield server
server.stop()
@pytest.fixture(scope='session')
def base_url(http_server):
"""Return the base URL for the test HTTP server."""
return f'http://{http_server.host}:{http_server.port}'
@pytest.fixture(scope='module')
async def browser_session():
"""Create and provide a Browser instance with security disabled."""
browser_session = BrowserSession(
browser_profile=BrowserProfile(
headless=True,
user_data_dir=None,
keep_alive=True,
chromium_sandbox=False, # Disable sandbox for CI environment
)
)
await browser_session.start()
yield browser_session
await browser_session.kill()
@pytest.fixture(scope='function')
def tools():
"""Create and provide a Tools instance."""
return Tools()
class TestARIAMenuDropdown:
"""Test ARIA menu support for get_dropdown_options and select_dropdown_option."""
@pytest.mark.skip(reason='TODO: fix')
async def test_get_dropdown_options_with_aria_menu(self, tools, browser_session: BrowserSession, base_url):
"""Test that get_dropdown_options can retrieve options from ARIA menus."""
# Navigate to the ARIA menu test page
await tools.navigate(url=f'{base_url}/aria-menu', new_tab=False, browser_session=browser_session)
# Wait for the page to load
from browser_use.browser.events import NavigationCompleteEvent
await browser_session.event_bus.expect(NavigationCompleteEvent, timeout=10.0)
# Initialize the DOM state to populate the selector map
await browser_session.get_browser_state_summary()
# Find the ARIA menu element by ID
menu_index = await browser_session.get_index_by_id('pyNavigation1752753375773')
assert menu_index is not None, 'Could not find ARIA menu element'
# Execute the action with the menu index
result = await tools.dropdown_options(index=menu_index, browser_session=browser_session)
# Verify the result structure
assert isinstance(result, ActionResult)
assert result.extracted_content is not None
# Expected ARIA menu options
expected_options = ['Filter', 'Sort', 'Appearance', 'Summarize', 'Delete']
# Verify all options are returned
for option in expected_options:
assert option in result.extracted_content, f"Option '{option}' not found in result content"
# Verify the instruction for using the text in select_dropdown is included
assert 'Use the exact text string in select_dropdown' in result.extracted_content
@pytest.mark.skip(reason='TODO: fix')
async def test_select_dropdown_option_with_aria_menu(self, tools, browser_session: BrowserSession, base_url):
"""Test that select_dropdown_option can select an option from ARIA menus."""
# Navigate to the ARIA menu test page
await tools.navigate(url=f'{base_url}/aria-menu', new_tab=False, browser_session=browser_session)
# Wait for the page to load
from browser_use.browser.events import NavigationCompleteEvent
await browser_session.event_bus.expect(NavigationCompleteEvent, timeout=10.0)
# Initialize the DOM state to populate the selector map
await browser_session.get_browser_state_summary()
# Find the ARIA menu element by ID
menu_index = await browser_session.get_index_by_id('pyNavigation1752753375773')
assert menu_index is not None, 'Could not find ARIA menu element'
# Execute the action with the menu index to select "Filter"
result = await tools.select_dropdown(index=menu_index, text='Filter', browser_session=browser_session)
# Verify the result structure
assert isinstance(result, ActionResult)
# Core logic validation: Verify selection was successful
assert result.extracted_content is not None
assert 'selected option' in result.extracted_content.lower() or 'clicked' in result.extracted_content.lower()
assert 'Filter' in result.extracted_content
# Verify the click actually had an effect on the page using CDP
cdp_session = await browser_session.get_or_create_cdp_session()
result = await cdp_session.cdp_client.send.Runtime.evaluate(
params={'expression': "document.getElementById('result').textContent", 'returnByValue': True},
session_id=cdp_session.session_id,
)
result_text = result.get('result', {}).get('value', '')
assert 'Filter' in result_text, f"Expected 'Filter' in result text, got '{result_text}'"
@pytest.mark.skip(reason='TODO: fix')
async def test_get_dropdown_options_with_nested_aria_menu(self, tools, browser_session: BrowserSession, base_url):
"""Test that get_dropdown_options can handle nested ARIA menus (like Sort submenu)."""
# Navigate to the ARIA menu test page
await tools.navigate(url=f'{base_url}/aria-menu', new_tab=False, browser_session=browser_session)
# Wait for the page to load
from browser_use.browser.events import NavigationCompleteEvent
await browser_session.event_bus.expect(NavigationCompleteEvent, timeout=10.0)
# Initialize the DOM state to populate the selector map
await browser_session.get_browser_state_summary()
# Get the selector map
selector_map = await browser_session.get_selector_map()
# Find the nested ARIA menu element in the selector map
nested_menu_index = None
for idx, element in selector_map.items():
# Look for the nested UL with id containing "$PpyNavigation"
if (
element.tag_name.lower() == 'ul'
and '$PpyNavigation' in str(element.attributes.get('id', ''))
and element.attributes.get('role') == 'menu'
):
nested_menu_index = idx
break
# The nested menu might not be in the selector map initially if it's hidden
# In that case, we should test the main menu
if nested_menu_index is None:
# Find the main menu instead
for idx, element in selector_map.items():
if element.tag_name.lower() == 'ul' and element.attributes.get('id') == 'pyNavigation1752753375773':
nested_menu_index = idx
break
assert nested_menu_index is not None, (
f'Could not find any ARIA menu element in selector map. Available elements: {[f"{idx}: {element.tag_name}" for idx, element in selector_map.items()]}'
)
# Execute the action with the menu index
result = await tools.dropdown_options(index=nested_menu_index, browser_session=browser_session)
# Verify the result structure
assert isinstance(result, ActionResult)
assert result.extracted_content is not None
# The action should return some menu options
assert 'Use the exact text string in select_dropdown' in result.extracted_content
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/tests/ci/interactions/test_dropdown_native.py | tests/ci/interactions/test_dropdown_native.py | """Test GetDropdownOptionsEvent and SelectDropdownOptionEvent functionality.
This file consolidates all tests related to dropdown functionality including:
- Native <select> dropdowns
- ARIA role="menu" dropdowns
- Custom dropdown implementations
"""
import pytest
from pytest_httpserver import HTTPServer
from browser_use.agent.views import ActionResult
from browser_use.browser import BrowserSession
from browser_use.browser.events import GetDropdownOptionsEvent, NavigationCompleteEvent, SelectDropdownOptionEvent
from browser_use.browser.profile import BrowserProfile
from browser_use.tools.service import Tools
@pytest.fixture(scope='session')
def http_server():
"""Create and provide a test HTTP server that serves static content."""
server = HTTPServer()
server.start()
# Add route for native dropdown test page
server.expect_request('/native-dropdown').respond_with_data(
"""
<!DOCTYPE html>
<html>
<head>
<title>Native Dropdown Test</title>
</head>
<body>
<h1>Native Dropdown Test</h1>
<select id="test-dropdown" name="test-dropdown">
<option value="">Please select</option>
<option value="option1">First Option</option>
<option value="option2">Second Option</option>
<option value="option3">Third Option</option>
</select>
<div id="result">No selection made</div>
<script>
document.getElementById('test-dropdown').addEventListener('change', function(e) {
document.getElementById('result').textContent = 'Selected: ' + e.target.options[e.target.selectedIndex].text;
});
</script>
</body>
</html>
""",
content_type='text/html',
)
# Add route for ARIA menu test page
server.expect_request('/aria-menu').respond_with_data(
"""
<!DOCTYPE html>
<html>
<head>
<title>ARIA Menu Test</title>
<style>
.menu {
list-style: none;
padding: 0;
margin: 0;
border: 1px solid #ccc;
background: white;
width: 200px;
}
.menu-item {
padding: 10px 20px;
border-bottom: 1px solid #eee;
}
.menu-item:hover {
background: #f0f0f0;
}
.menu-item-anchor {
text-decoration: none;
color: #333;
display: block;
}
#result {
margin-top: 20px;
padding: 10px;
border: 1px solid #ddd;
min-height: 20px;
}
</style>
</head>
<body>
<h1>ARIA Menu Test</h1>
<p>This menu uses ARIA roles instead of native select elements</p>
<ul class="menu menu-format-standard menu-regular" role="menu" id="pyNavigation1752753375773" style="display: block;">
<li class="menu-item menu-item-enabled" role="presentation">
<a href="#" onclick="pd(event);" class="menu-item-anchor" tabindex="0" role="menuitem">
<span class="menu-item-title-wrap"><span class="menu-item-title">Filter</span></span>
</a>
</li>
<li class="menu-item menu-item-enabled" role="presentation" id="menu-item-$PpyNavigation1752753375773$ppyElements$l2">
<a href="#" onclick="pd(event);" class="menu-item-anchor menu-item-expand" tabindex="0" role="menuitem" aria-haspopup="true">
<span class="menu-item-title-wrap"><span class="menu-item-title">Sort</span></span>
</a>
<div class="menu-panel-wrapper">
<ul class="menu menu-format-standard menu-regular" role="menu" id="$PpyNavigation1752753375773$ppyElements$l2">
<li class="menu-item menu-item-enabled" role="presentation">
<a href="#" onclick="pd(event);" class="menu-item-anchor" tabindex="0" role="menuitem">
<span class="menu-item-title-wrap"><span class="menu-item-title">Lowest to highest</span></span>
</a>
</li>
<li class="menu-item menu-item-enabled" role="presentation">
<a href="#" onclick="pd(event);" class="menu-item-anchor" tabindex="0" role="menuitem">
<span class="menu-item-title-wrap"><span class="menu-item-title">Highest to lowest</span></span>
</a>
</li>
</ul>
</div>
</li>
<li class="menu-item menu-item-enabled" role="presentation">
<a href="#" onclick="pd(event);" class="menu-item-anchor" tabindex="0" role="menuitem">
<span class="menu-item-title-wrap"><span class="menu-item-title">Appearance</span></span>
</a>
</li>
<li class="menu-item menu-item-enabled" role="presentation">
<a href="#" onclick="pd(event);" class="menu-item-anchor" tabindex="0" role="menuitem">
<span class="menu-item-title-wrap"><span class="menu-item-title">Summarize</span></span>
</a>
</li>
<li class="menu-item menu-item-enabled" role="presentation">
<a href="#" onclick="pd(event);" class="menu-item-anchor" tabindex="0" role="menuitem">
<span class="menu-item-title-wrap"><span class="menu-item-title">Delete</span></span>
</a>
</li>
</ul>
<div id="result">Click an option to see the result</div>
<script>
// Mock the pd function that prevents default
function pd(event) {
event.preventDefault();
const text = event.target.closest('[role="menuitem"]').textContent.trim();
document.getElementById('result').textContent = 'Clicked: ' + text;
}
</script>
</body>
</html>
""",
content_type='text/html',
)
# Add route for custom dropdown test page
server.expect_request('/custom-dropdown').respond_with_data(
"""
<!DOCTYPE html>
<html>
<head>
<title>Custom Dropdown Test</title>
<style>
.dropdown {
position: relative;
display: inline-block;
width: 200px;
}
.dropdown-button {
padding: 10px;
border: 1px solid #ccc;
background: white;
cursor: pointer;
width: 100%;
}
.dropdown-menu {
position: absolute;
top: 100%;
left: 0;
right: 0;
border: 1px solid #ccc;
background: white;
display: block;
z-index: 1000;
}
.dropdown-menu.hidden {
display: none;
}
.dropdown .item {
padding: 10px;
cursor: pointer;
}
.dropdown .item:hover {
background: #f0f0f0;
}
.dropdown .item.selected {
background: #e0e0e0;
}
#result {
margin-top: 20px;
padding: 10px;
border: 1px solid #ddd;
}
</style>
</head>
<body>
<h1>Custom Dropdown Test</h1>
<p>This is a custom dropdown implementation (like Semantic UI)</p>
<div class="dropdown ui" id="custom-dropdown">
<div class="dropdown-button" onclick="toggleDropdown()">
<span id="selected-text">Choose an option</span>
</div>
<div class="dropdown-menu" id="dropdown-menu">
<div class="item" data-value="red" onclick="selectOption('Red', 'red')">Red</div>
<div class="item" data-value="green" onclick="selectOption('Green', 'green')">Green</div>
<div class="item" data-value="blue" onclick="selectOption('Blue', 'blue')">Blue</div>
<div class="item" data-value="yellow" onclick="selectOption('Yellow', 'yellow')">Yellow</div>
</div>
</div>
<div id="result">No selection made</div>
<script>
function toggleDropdown() {
const menu = document.getElementById('dropdown-menu');
menu.classList.toggle('hidden');
}
function selectOption(text, value) {
document.getElementById('selected-text').textContent = text;
document.getElementById('result').textContent = 'Selected: ' + text + ' (value: ' + value + ')';
// Mark as selected
document.querySelectorAll('.item').forEach(item => item.classList.remove('selected'));
event.target.classList.add('selected');
// Close dropdown
document.getElementById('dropdown-menu').classList.add('hidden');
}
</script>
</body>
</html>
""",
content_type='text/html',
)
yield server
server.stop()
@pytest.fixture(scope='session')
def base_url(http_server):
"""Return the base URL for the test HTTP server."""
return f'http://{http_server.host}:{http_server.port}'
@pytest.fixture(scope='module')
async def browser_session():
"""Create and provide a Browser instance with security disabled."""
browser_session = BrowserSession(
browser_profile=BrowserProfile(
headless=True,
user_data_dir=None,
keep_alive=True,
chromium_sandbox=False, # Disable sandbox for CI environment
)
)
await browser_session.start()
yield browser_session
await browser_session.kill()
@pytest.fixture(scope='function')
def tools():
"""Create and provide a Tools instance."""
return Tools()
class TestGetDropdownOptionsEvent:
"""Test GetDropdownOptionsEvent functionality for various dropdown types."""
@pytest.mark.skip(reason='Dropdown text assertion issue - test expects specific text format')
async def test_native_select_dropdown(self, tools, browser_session: BrowserSession, base_url):
"""Test get_dropdown_options with native HTML select element."""
# Navigate to the native dropdown test page
await tools.navigate(url=f'{base_url}/native-dropdown', new_tab=False, browser_session=browser_session)
# Initialize the DOM state to populate the selector map
await browser_session.get_browser_state_summary()
# Find the select element by ID
dropdown_index = await browser_session.get_index_by_id('test-dropdown')
assert dropdown_index is not None, 'Could not find select element'
# Test via tools action
result = await tools.dropdown_options(index=dropdown_index, browser_session=browser_session)
# Verify the result
assert isinstance(result, ActionResult)
assert result.extracted_content is not None
# Verify all expected options are present
expected_options = ['Please select', 'First Option', 'Second Option', 'Third Option']
for option in expected_options:
assert option in result.extracted_content, f"Option '{option}' not found in result content"
# Verify instruction is included
assert 'Use the exact text string' in result.extracted_content and 'select_dropdown' in result.extracted_content
# Also test direct event dispatch
node = await browser_session.get_element_by_index(dropdown_index)
assert node is not None
event = browser_session.event_bus.dispatch(GetDropdownOptionsEvent(node=node))
dropdown_data = await event.event_result(timeout=3.0)
assert dropdown_data is not None
assert 'options' in dropdown_data
assert 'type' in dropdown_data
assert dropdown_data['type'] == 'select'
@pytest.mark.skip(reason='ARIA menu detection issue - element not found in selector map')
async def test_aria_menu_dropdown(self, tools, browser_session: BrowserSession, base_url):
"""Test get_dropdown_options with ARIA role='menu' element."""
# Navigate to the ARIA menu test page
await tools.navigate(url=f'{base_url}/aria-menu', new_tab=False, browser_session=browser_session)
# Initialize the DOM state
await browser_session.get_browser_state_summary()
# Find the ARIA menu by ID
menu_index = await browser_session.get_index_by_id('pyNavigation1752753375773')
assert menu_index is not None, 'Could not find ARIA menu element'
# Test via tools action
result = await tools.dropdown_options(index=menu_index, browser_session=browser_session)
# Verify the result
assert isinstance(result, ActionResult)
assert result.extracted_content is not None
# Verify expected ARIA menu options are present
expected_options = ['Filter', 'Sort', 'Appearance', 'Summarize', 'Delete']
for option in expected_options:
assert option in result.extracted_content, f"Option '{option}' not found in result content"
# Also test direct event dispatch
node = await browser_session.get_element_by_index(menu_index)
assert node is not None
event = browser_session.event_bus.dispatch(GetDropdownOptionsEvent(node=node))
dropdown_data = await event.event_result(timeout=3.0)
assert dropdown_data is not None
assert 'options' in dropdown_data
assert 'type' in dropdown_data
assert dropdown_data['type'] == 'aria'
@pytest.mark.skip(reason='Custom dropdown detection issue - element not found in selector map')
async def test_custom_dropdown(self, tools, browser_session: BrowserSession, base_url):
"""Test get_dropdown_options with custom dropdown implementation."""
# Navigate to the custom dropdown test page
await tools.navigate(url=f'{base_url}/custom-dropdown', new_tab=False, browser_session=browser_session)
# Initialize the DOM state
await browser_session.get_browser_state_summary()
# Find the custom dropdown by ID
dropdown_index = await browser_session.get_index_by_id('custom-dropdown')
assert dropdown_index is not None, 'Could not find custom dropdown element'
# Test via tools action
result = await tools.dropdown_options(index=dropdown_index, browser_session=browser_session)
# Verify the result
assert isinstance(result, ActionResult)
assert result.extracted_content is not None
# Verify expected custom dropdown options are present
expected_options = ['Red', 'Green', 'Blue', 'Yellow']
for option in expected_options:
assert option in result.extracted_content, f"Option '{option}' not found in result content"
# Also test direct event dispatch
node = await browser_session.get_element_by_index(dropdown_index)
assert node is not None
event = browser_session.event_bus.dispatch(GetDropdownOptionsEvent(node=node))
dropdown_data = await event.event_result(timeout=3.0)
assert dropdown_data is not None
assert 'options' in dropdown_data
assert 'type' in dropdown_data
assert dropdown_data['type'] == 'custom'
class TestSelectDropdownOptionEvent:
"""Test SelectDropdownOptionEvent functionality for various dropdown types."""
@pytest.mark.skip(reason='Timeout issue - test takes too long to complete')
async def test_select_native_dropdown_option(self, tools, browser_session: BrowserSession, base_url):
"""Test select_dropdown_option with native HTML select element."""
# Navigate to the native dropdown test page
await tools.navigate(url=f'{base_url}/native-dropdown', new_tab=False, browser_session=browser_session)
await browser_session.event_bus.expect(NavigationCompleteEvent, timeout=10.0)
# Initialize the DOM state
await browser_session.get_browser_state_summary()
# Find the select element by ID
dropdown_index = await browser_session.get_index_by_id('test-dropdown')
assert dropdown_index is not None
# Test via tools action
result = await tools.select_dropdown(index=dropdown_index, text='Second Option', browser_session=browser_session)
# Verify the result
assert isinstance(result, ActionResult)
assert result.extracted_content is not None
assert 'Second Option' in result.extracted_content
# Verify the selection actually worked using CDP
cdp_session = await browser_session.get_or_create_cdp_session()
result = await cdp_session.cdp_client.send.Runtime.evaluate(
params={'expression': "document.getElementById('test-dropdown').selectedIndex", 'returnByValue': True},
session_id=cdp_session.session_id,
)
selected_index = result.get('result', {}).get('value', -1)
assert selected_index == 2, f'Expected selected index 2, got {selected_index}'
@pytest.mark.skip(reason='Timeout issue - test takes too long to complete')
async def test_select_aria_menu_option(self, tools, browser_session: BrowserSession, base_url):
"""Test select_dropdown_option with ARIA menu."""
# Navigate to the ARIA menu test page
await tools.navigate(url=f'{base_url}/aria-menu', new_tab=False, browser_session=browser_session)
await browser_session.event_bus.expect(NavigationCompleteEvent, timeout=10.0)
# Initialize the DOM state
await browser_session.get_browser_state_summary()
# Find the ARIA menu by ID
menu_index = await browser_session.get_index_by_id('pyNavigation1752753375773')
assert menu_index is not None
# Test via tools action
result = await tools.select_dropdown(index=menu_index, text='Filter', browser_session=browser_session)
# Verify the result
assert isinstance(result, ActionResult)
assert result.extracted_content is not None
assert 'Filter' in result.extracted_content
# Verify the click had an effect using CDP
cdp_session = await browser_session.get_or_create_cdp_session()
result = await cdp_session.cdp_client.send.Runtime.evaluate(
params={'expression': "document.getElementById('result').textContent", 'returnByValue': True},
session_id=cdp_session.session_id,
)
result_text = result.get('result', {}).get('value', '')
assert 'Filter' in result_text, f"Expected 'Filter' in result text, got '{result_text}'"
@pytest.mark.skip(reason='Timeout issue - test takes too long to complete')
async def test_select_custom_dropdown_option(self, tools, browser_session: BrowserSession, base_url):
"""Test select_dropdown_option with custom dropdown."""
# Navigate to the custom dropdown test page
await tools.navigate(url=f'{base_url}/custom-dropdown', new_tab=False, browser_session=browser_session)
await browser_session.event_bus.expect(NavigationCompleteEvent, timeout=10.0)
# Initialize the DOM state
await browser_session.get_browser_state_summary()
# Find the custom dropdown by ID
dropdown_index = await browser_session.get_index_by_id('custom-dropdown')
assert dropdown_index is not None
# Test via tools action
result = await tools.select_dropdown(index=dropdown_index, text='Blue', browser_session=browser_session)
# Verify the result
assert isinstance(result, ActionResult)
assert result.extracted_content is not None
assert 'Blue' in result.extracted_content
# Verify the selection worked using CDP
cdp_session = await browser_session.get_or_create_cdp_session()
result = await cdp_session.cdp_client.send.Runtime.evaluate(
params={'expression': "document.getElementById('result').textContent", 'returnByValue': True},
session_id=cdp_session.session_id,
)
result_text = result.get('result', {}).get('value', '')
assert 'Blue' in result_text, f"Expected 'Blue' in result text, got '{result_text}'"
@pytest.mark.skip(reason='Timeout issue - test takes too long to complete')
async def test_select_invalid_option_error(self, tools, browser_session: BrowserSession, base_url):
"""Test select_dropdown_option with non-existent option text."""
# Navigate to the native dropdown test page
await tools.navigate(url=f'{base_url}/native-dropdown', new_tab=False, browser_session=browser_session)
await browser_session.event_bus.expect(NavigationCompleteEvent, timeout=10.0)
# Initialize the DOM state
await browser_session.get_browser_state_summary()
# Find the select element by ID
dropdown_index = await browser_session.get_index_by_id('test-dropdown')
assert dropdown_index is not None
# Try to select non-existent option via direct event
node = await browser_session.get_element_by_index(dropdown_index)
assert node is not None
event = browser_session.event_bus.dispatch(SelectDropdownOptionEvent(node=node, text='Non-existent Option'))
try:
selection_data = await event.event_result(timeout=3.0)
# Should have an error in the result
assert selection_data is not None
assert 'error' in selection_data or 'not found' in str(selection_data).lower()
except Exception as e:
# Or raise an exception
assert 'not found' in str(e).lower() or 'no option' in str(e).lower()
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/simple.py | examples/simple.py | """
Setup:
1. Get your API key from https://cloud.browser-use.com/new-api-key
2. Set environment variable: export BROWSER_USE_API_KEY="your-key"
"""
from dotenv import load_dotenv
from browser_use import Agent, ChatBrowserUse
load_dotenv()
agent = Agent(
task='Find the number of stars of the following repos: browser-use, playwright, stagehand, react, nextjs',
llm=ChatBrowserUse(),
)
agent.run_sync()
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.