sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
browser-use/browser-use:tests/ci/models/test_azure_responses_api.py | """Tests for Azure OpenAI Responses API support."""
import os
import pytest
from browser_use.llm.azure.chat import RESPONSES_API_ONLY_MODELS, ChatAzureOpenAI
from browser_use.llm.messages import (
AssistantMessage,
ContentPartImageParam,
ContentPartTextParam,
Function,
ImageURL,
SystemMessage,
ToolCall,
UserMessage,
)
from browser_use.llm.openai.responses_serializer import ResponsesAPIMessageSerializer
class TestResponsesAPIMessageSerializer:
"""Tests for the ResponsesAPIMessageSerializer class."""
def test_serialize_user_message_string_content(self):
"""Test serializing a user message with string content."""
message = UserMessage(content='Hello, world!')
result = ResponsesAPIMessageSerializer.serialize(message)
assert result['role'] == 'user'
assert result['content'] == 'Hello, world!'
def test_serialize_user_message_text_parts(self):
"""Test serializing a user message with text content parts."""
message = UserMessage(
content=[
ContentPartTextParam(type='text', text='First part'),
ContentPartTextParam(type='text', text='Second part'),
]
)
result = ResponsesAPIMessageSerializer.serialize(message)
assert result['role'] == 'user'
assert isinstance(result['content'], list)
assert len(result['content']) == 2
assert result['content'][0]['type'] == 'input_text'
assert result['content'][0]['text'] == 'First part'
assert result['content'][1]['type'] == 'input_text'
assert result['content'][1]['text'] == 'Second part'
def test_serialize_user_message_with_image(self):
"""Test serializing a user message with image content."""
message = UserMessage(
content=[
ContentPartTextParam(type='text', text='What is in this image?'),
ContentPartImageParam(
type='image_url',
image_url=ImageURL(url='https://example.com/image.png', detail='auto'),
),
]
)
result = ResponsesAPIMessageSerializer.serialize(message)
assert result['role'] == 'user'
assert isinstance(result['content'], list)
assert len(result['content']) == 2
assert result['content'][0]['type'] == 'input_text'
assert result['content'][1]['type'] == 'input_image'
assert result['content'][1].get('image_url') == 'https://example.com/image.png'
assert result['content'][1].get('detail') == 'auto'
def test_serialize_system_message_string_content(self):
"""Test serializing a system message with string content."""
message = SystemMessage(content='You are a helpful assistant.')
result = ResponsesAPIMessageSerializer.serialize(message)
assert result['role'] == 'system'
assert result['content'] == 'You are a helpful assistant.'
def test_serialize_system_message_text_parts(self):
"""Test serializing a system message with text content parts."""
message = SystemMessage(content=[ContentPartTextParam(type='text', text='System instruction')])
result = ResponsesAPIMessageSerializer.serialize(message)
assert result['role'] == 'system'
assert isinstance(result['content'], list)
assert len(result['content']) == 1
assert result['content'][0]['type'] == 'input_text'
def test_serialize_assistant_message_string_content(self):
"""Test serializing an assistant message with string content."""
message = AssistantMessage(content='Here is my response.')
result = ResponsesAPIMessageSerializer.serialize(message)
assert result['role'] == 'assistant'
assert result['content'] == 'Here is my response.'
def test_serialize_assistant_message_none_content_with_tool_calls(self):
"""Test serializing an assistant message with None content and tool calls."""
message = AssistantMessage(
content=None,
tool_calls=[
ToolCall(
id='call_123',
type='function',
function=Function(name='search', arguments='{"query": "test"}'),
)
],
)
result = ResponsesAPIMessageSerializer.serialize(message)
assert result['role'] == 'assistant'
assert '[Tool call: search({"query": "test"})]' in result['content']
def test_serialize_assistant_message_none_content_no_tool_calls(self):
"""Test serializing an assistant message with None content and no tool calls."""
message = AssistantMessage(content=None)
result = ResponsesAPIMessageSerializer.serialize(message)
assert result['role'] == 'assistant'
assert result['content'] == ''
def test_serialize_messages_list(self):
"""Test serializing a list of messages."""
messages = [
SystemMessage(content='You are helpful.'),
UserMessage(content='Hello!'),
AssistantMessage(content='Hi there!'),
]
results = ResponsesAPIMessageSerializer.serialize_messages(messages)
assert len(results) == 3
assert results[0]['role'] == 'system'
assert results[1]['role'] == 'user'
assert results[2]['role'] == 'assistant'
class TestChatAzureOpenAIShouldUseResponsesAPI:
"""Tests for the _should_use_responses_api method."""
def test_use_responses_api_true(self):
"""Test that use_responses_api=True forces Responses API."""
llm = ChatAzureOpenAI(
model='gpt-4o',
api_key='test',
azure_endpoint='https://test.openai.azure.com',
use_responses_api=True,
)
assert llm._should_use_responses_api() is True
def test_use_responses_api_false(self):
"""Test that use_responses_api=False forces Chat Completions API."""
llm = ChatAzureOpenAI(
model='gpt-5.1-codex-mini', # Even with a Responses-only model
api_key='test',
azure_endpoint='https://test.openai.azure.com',
use_responses_api=False,
)
assert llm._should_use_responses_api() is False
def test_use_responses_api_auto_with_responses_only_model(self):
"""Test that auto mode detects Responses-only models."""
for model_name in RESPONSES_API_ONLY_MODELS:
llm = ChatAzureOpenAI(
model=model_name,
api_key='test',
azure_endpoint='https://test.openai.azure.com',
use_responses_api='auto',
)
assert llm._should_use_responses_api() is True, f'Expected Responses API for {model_name}'
def test_use_responses_api_auto_with_regular_model(self):
"""Test that auto mode uses Chat Completions for regular models."""
regular_models = ['gpt-4o', 'gpt-4.1-mini', 'gpt-3.5-turbo', 'gpt-4']
for model_name in regular_models:
llm = ChatAzureOpenAI(
model=model_name,
api_key='test',
azure_endpoint='https://test.openai.azure.com',
use_responses_api='auto',
)
assert llm._should_use_responses_api() is False, f'Expected Chat Completions for {model_name}'
def test_use_responses_api_auto_is_default(self):
"""Test that 'auto' is the default value for use_responses_api."""
llm = ChatAzureOpenAI(
model='gpt-4o',
api_key='test',
azure_endpoint='https://test.openai.azure.com',
)
assert llm.use_responses_api == 'auto'
def test_responses_api_only_models_list(self):
"""Test that the RESPONSES_API_ONLY_MODELS list contains expected models."""
expected_models = [
'gpt-5.1-codex',
'gpt-5.1-codex-mini',
'gpt-5.1-codex-max',
'gpt-5-codex',
'codex-mini-latest',
'computer-use-preview',
]
for model in expected_models:
assert model in RESPONSES_API_ONLY_MODELS, f'{model} should be in RESPONSES_API_ONLY_MODELS'
class TestChatAzureOpenAIIntegration:
"""Integration tests for Azure OpenAI with Responses API.
These tests require valid Azure OpenAI credentials and are skipped if not available.
"""
@pytest.fixture
def azure_credentials(self):
"""Get Azure OpenAI credentials from environment."""
api_key = os.getenv('AZURE_OPENAI_KEY') or os.getenv('AZURE_OPENAI_API_KEY')
endpoint = os.getenv('AZURE_OPENAI_ENDPOINT')
if not api_key or not endpoint:
pytest.skip('Azure OpenAI credentials not available')
return {'api_key': api_key, 'azure_endpoint': endpoint}
async def test_chat_completions_api_basic_call(self, azure_credentials):
"""Test basic call using Chat Completions API."""
llm = ChatAzureOpenAI(
model='gpt-4.1-mini',
api_key=azure_credentials['api_key'],
azure_endpoint=azure_credentials['azure_endpoint'],
use_responses_api=False, # Force Chat Completions API
)
messages = [
SystemMessage(content='You are a helpful assistant.'),
UserMessage(content='Say "hello" and nothing else.'),
]
result = await llm.ainvoke(messages)
assert result.completion is not None
assert 'hello' in result.completion.lower()
async def test_responses_api_basic_call(self, azure_credentials):
"""Test basic call using Responses API.
This test only runs if the Azure deployment supports the Responses API
(api_version >= 2025-03-01-preview).
"""
llm = ChatAzureOpenAI(
model='gpt-4.1-mini',
api_key=azure_credentials['api_key'],
azure_endpoint=azure_credentials['azure_endpoint'],
api_version='2025-03-01-preview', # Required for Responses API
use_responses_api=True, # Force Responses API
)
messages = [
SystemMessage(content='You are a helpful assistant.'),
UserMessage(content='Say "hello" and nothing else.'),
]
try:
result = await llm.ainvoke(messages)
assert result.completion is not None
assert 'hello' in result.completion.lower()
except Exception as e:
# Skip if Responses API is not supported
if 'Responses API' in str(e) or '404' in str(e):
pytest.skip('Responses API not supported by this Azure deployment')
raise
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/models/test_azure_responses_api.py",
"license": "MIT License",
"lines": 224,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:tests/ci/test_ai_step.py | """Tests for AI step private method used during rerun"""
from unittest.mock import AsyncMock
from browser_use.agent.service import Agent
from browser_use.agent.views import ActionResult
from tests.ci.conftest import create_mock_llm
async def test_execute_ai_step_basic():
"""Test that _execute_ai_step extracts content with AI"""
# Create mock LLM that returns text response
async def custom_ainvoke(*args, **kwargs):
from browser_use.llm.views import ChatInvokeCompletion
return ChatInvokeCompletion(completion='Extracted: Test content from page', usage=None)
mock_llm = AsyncMock()
mock_llm.ainvoke.side_effect = custom_ainvoke
mock_llm.model = 'mock-model'
llm = create_mock_llm(actions=None)
agent = Agent(task='Test task', llm=llm)
await agent.browser_session.start()
try:
# Execute _execute_ai_step with mock LLM
result = await agent._execute_ai_step(
query='Extract the main heading',
include_screenshot=False,
extract_links=False,
ai_step_llm=mock_llm,
)
# Verify result
assert isinstance(result, ActionResult)
assert result.extracted_content is not None
assert 'Extracted: Test content from page' in result.extracted_content
assert result.long_term_memory is not None
finally:
await agent.close()
async def test_execute_ai_step_with_screenshot():
"""Test that _execute_ai_step includes screenshot when requested"""
# Create mock LLM
async def custom_ainvoke(*args, **kwargs):
from browser_use.llm.views import ChatInvokeCompletion
# Verify that we received a message with image content
messages = args[0] if args else []
assert len(messages) >= 1, 'Should have at least one message'
# Check if any message has image content
has_image = False
for msg in messages:
if hasattr(msg, 'content') and isinstance(msg.content, list):
for part in msg.content:
if hasattr(part, 'type') and part.type == 'image_url':
has_image = True
break
assert has_image, 'Should include screenshot in message'
return ChatInvokeCompletion(completion='Extracted content with screenshot analysis', usage=None)
mock_llm = AsyncMock()
mock_llm.ainvoke.side_effect = custom_ainvoke
mock_llm.model = 'mock-model'
llm = create_mock_llm(actions=None)
agent = Agent(task='Test task', llm=llm)
await agent.browser_session.start()
try:
# Execute _execute_ai_step with screenshot
result = await agent._execute_ai_step(
query='Analyze this page',
include_screenshot=True,
extract_links=False,
ai_step_llm=mock_llm,
)
# Verify result
assert isinstance(result, ActionResult)
assert result.extracted_content is not None
assert 'Extracted content with screenshot analysis' in result.extracted_content
finally:
await agent.close()
async def test_execute_ai_step_error_handling():
"""Test that _execute_ai_step handles errors gracefully"""
# Create mock LLM that raises an error
mock_llm = AsyncMock()
mock_llm.ainvoke.side_effect = Exception('LLM service unavailable')
mock_llm.model = 'mock-model'
llm = create_mock_llm(actions=None)
agent = Agent(task='Test task', llm=llm)
await agent.browser_session.start()
try:
# Execute _execute_ai_step - should return ActionResult with error
result = await agent._execute_ai_step(
query='Extract data',
include_screenshot=False,
ai_step_llm=mock_llm,
)
# Verify error is in result (not raised)
assert isinstance(result, ActionResult)
assert result.error is not None
assert 'AI step failed' in result.error
finally:
await agent.close()
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/test_ai_step.py",
"license": "MIT License",
"lines": 92,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:tests/ci/test_ax_name_matching.py | """Tests for ax_name (accessible name) element matching in history rerun.
This tests Level 4 matching which uses the accessibility tree's name property
to match elements when hash, stable_hash, and xpath all fail.
This is particularly useful for dynamic SPAs where DOM structure changes
but accessible names remain stable.
Also tests dropdown/menu re-opening behavior when menu items can't be found
because the dropdown closed during the wait between steps.
"""
from unittest.mock import AsyncMock
from browser_use.agent.service import Agent
from browser_use.agent.views import ActionResult, AgentHistory, AgentHistoryList, RerunSummaryAction, StepMetadata
from browser_use.browser.views import BrowserStateHistory
from browser_use.dom.views import DOMInteractedElement, DOMRect, MatchLevel, NodeType
from tests.ci.conftest import create_mock_llm
async def test_ax_name_matching_succeeds_when_hash_fails(httpserver):
"""Test that ax_name matching finds elements when hash/xpath matching fails.
This simulates a dynamic SPA where the element hash and xpath change between
sessions, but the accessible name (ax_name) remains stable.
"""
# Set up a test page with a menu item that has an aria-label
# The aria-label becomes the accessible name (ax_name)
test_html = """<!DOCTYPE html>
<html>
<body>
<div role="menuitem" aria-label="New Contact" id="menu-1">New Contact</div>
<div role="menuitem" aria-label="Search" id="menu-2">Search</div>
</body>
</html>"""
httpserver.expect_request('/test').respond_with_data(test_html, content_type='text/html')
test_url = httpserver.url_for('/test')
# Create a mock LLM for summary
summary_action = RerunSummaryAction(
summary='Rerun completed',
success=True,
completion_status='complete',
)
async def custom_ainvoke(*args, **kwargs):
output_format = args[1] if len(args) > 1 else kwargs.get('output_format')
if output_format is RerunSummaryAction:
from browser_use.llm.views import ChatInvokeCompletion
return ChatInvokeCompletion(completion=summary_action, usage=None)
raise ValueError('Unexpected output_format')
mock_summary_llm = AsyncMock()
mock_summary_llm.ainvoke.side_effect = custom_ainvoke
llm = create_mock_llm(actions=None)
agent = Agent(task='Test task', llm=llm)
AgentOutput = agent.AgentOutput
# Create an element with DIFFERENT hash/xpath but SAME ax_name as the real element
# This simulates what happens in dynamic SPAs where the DOM changes but
# accessible names remain stable
historical_element = DOMInteractedElement(
node_id=9999, # Different node_id
backend_node_id=9999, # Different backend_node_id
frame_id=None,
node_type=NodeType.ELEMENT_NODE,
node_value='',
node_name='DIV', # Same node type
# Note: aria-label is NOT in attributes - this tests that ax_name matching
# is used as a fallback when attribute matching fails
attributes={'role': 'menuitem', 'class': 'dynamic-class-12345'},
x_path='html/body/div[1]/div[4]/div[4]/div[1]', # Different xpath
element_hash=123456789, # Different hash (won't match)
stable_hash=987654321, # Different stable_hash (won't match)
bounds=DOMRect(x=0, y=0, width=100, height=50),
ax_name='New Contact', # SAME ax_name - this should match!
)
# Step 1: Navigate to test page
navigate_step = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Navigate to test page',
next_goal=None,
action=[{'navigate': {'url': test_url}}], # type: ignore[arg-type]
),
result=[ActionResult(long_term_memory='Navigated')],
state=BrowserStateHistory(
url=test_url,
title='Test Page',
tabs=[],
interacted_element=[None],
),
metadata=StepMetadata(
step_start_time=0,
step_end_time=1,
step_number=1,
step_interval=0.1,
),
)
# Step 2: Click on element that has different hash/xpath but same ax_name
click_step = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Click New Contact menu',
next_goal=None,
action=[{'click': {'index': 100}}], # type: ignore[arg-type] # Original index doesn't matter
),
result=[ActionResult(long_term_memory='Clicked New Contact')],
state=BrowserStateHistory(
url=test_url,
title='Test Page',
tabs=[],
interacted_element=[historical_element],
),
metadata=StepMetadata(
step_start_time=1,
step_end_time=2,
step_number=2,
step_interval=0.1,
),
)
history = AgentHistoryList(history=[navigate_step, click_step])
try:
# Run rerun - should succeed because ax_name matching finds the element
results = await agent.rerun_history(
history,
skip_failures=False,
max_retries=1,
summary_llm=mock_summary_llm,
)
# Should have 3 results: navigate + click + AI summary
assert len(results) == 3
# First result should be navigation success
nav_result = results[0]
assert nav_result.error is None
# Second result should be click success (matched via ax_name)
click_result = results[1]
assert click_result.error is None, f'Click should succeed via ax_name matching, got error: {click_result.error}'
# Third result should be AI summary
summary_result = results[2]
assert summary_result.is_done is True
finally:
await agent.close()
async def test_ax_name_matching_requires_same_node_type(httpserver):
"""Test that ax_name matching also requires matching node type.
Even if ax_name matches, the node type (DIV, BUTTON, etc.) must also match.
"""
test_html = """<!DOCTYPE html>
<html>
<body>
<button aria-label="Submit">Submit</button>
<div aria-label="Submit">Submit Label</div>
</body>
</html>"""
httpserver.expect_request('/test').respond_with_data(test_html, content_type='text/html')
test_url = httpserver.url_for('/test')
llm = create_mock_llm(actions=None)
agent = Agent(task='Test task', llm=llm)
AgentOutput = agent.AgentOutput
# Historical element is a SPAN with ax_name "Submit"
# Page has BUTTON and DIV with same ax_name, but no SPAN
historical_element = DOMInteractedElement(
node_id=1,
backend_node_id=1,
frame_id=None,
node_type=NodeType.ELEMENT_NODE,
node_value='',
node_name='SPAN', # SPAN - won't match BUTTON or DIV
attributes={},
x_path='html/body/span',
element_hash=111,
stable_hash=111,
bounds=DOMRect(x=0, y=0, width=100, height=50),
ax_name='Submit', # Same ax_name, but wrong node type
)
navigate_step = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Navigate',
next_goal=None,
action=[{'navigate': {'url': test_url}}], # type: ignore[arg-type]
),
result=[ActionResult(long_term_memory='Navigated')],
state=BrowserStateHistory(
url=test_url,
title='Test',
tabs=[],
interacted_element=[None],
),
metadata=StepMetadata(step_start_time=0, step_end_time=1, step_number=1, step_interval=0.1),
)
click_step = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Click Submit',
next_goal=None,
action=[{'click': {'index': 1}}], # type: ignore[arg-type]
),
result=[ActionResult(long_term_memory='Clicked')],
state=BrowserStateHistory(
url=test_url,
title='Test',
tabs=[],
interacted_element=[historical_element],
),
metadata=StepMetadata(step_start_time=1, step_end_time=2, step_number=2, step_interval=0.1),
)
history = AgentHistoryList(history=[navigate_step, click_step])
try:
# Should fail because no SPAN with ax_name "Submit" exists
await agent.rerun_history(
history,
skip_failures=False,
max_retries=1,
)
assert False, 'Expected RuntimeError - no matching SPAN element'
except RuntimeError as e:
# Expected - no SPAN element with ax_name "Submit"
assert 'failed after 1 attempts' in str(e)
finally:
await agent.close()
def test_match_level_enum_includes_ax_name():
"""Test that MatchLevel enum includes AX_NAME level."""
assert hasattr(MatchLevel, 'AX_NAME')
assert MatchLevel.AX_NAME.value == 4
assert MatchLevel.ATTRIBUTE.value == 5 # AX_NAME comes before ATTRIBUTE
async def test_ax_name_matching_before_attribute_matching(httpserver):
"""Test that ax_name matching (Level 4) is tried before attribute matching (Level 5).
This ensures the correct matching order: EXACT -> STABLE -> XPATH -> AX_NAME -> ATTRIBUTE
"""
# Page has element with text content that becomes its ax_name
# The DIV has role="menuitem" and text "Contact" which becomes its accessible name
# but NO aria-label/id/name attributes - so attribute matching will fail but ax_name should work
test_html = """<!DOCTYPE html>
<html>
<body>
<div role="menuitem">Contact</div>
</body>
</html>"""
httpserver.expect_request('/test').respond_with_data(test_html, content_type='text/html')
test_url = httpserver.url_for('/test')
summary_action = RerunSummaryAction(
summary='Rerun completed',
success=True,
completion_status='complete',
)
async def custom_ainvoke(*args, **kwargs):
output_format = args[1] if len(args) > 1 else kwargs.get('output_format')
if output_format is RerunSummaryAction:
from browser_use.llm.views import ChatInvokeCompletion
return ChatInvokeCompletion(completion=summary_action, usage=None)
raise ValueError('Unexpected output_format')
mock_summary_llm = AsyncMock()
mock_summary_llm.ainvoke.side_effect = custom_ainvoke
llm = create_mock_llm(actions=None)
agent = Agent(task='Test task', llm=llm)
AgentOutput = agent.AgentOutput
# Historical element has NO aria-label attribute (attribute matching will fail)
# but HAS ax_name (ax_name matching should work)
historical_element = DOMInteractedElement(
node_id=1,
backend_node_id=1,
frame_id=None,
node_type=NodeType.ELEMENT_NODE,
node_value='',
node_name='DIV',
# No aria-label, id, or name - attribute matching will fail
attributes={'role': 'menuitem'},
x_path='html/body/div[99]', # Wrong xpath
element_hash=12345, # Wrong hash
stable_hash=12345, # Wrong stable hash
bounds=DOMRect(x=0, y=0, width=100, height=50),
ax_name='Contact', # ax_name from accessibility tree
)
navigate_step = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Navigate',
next_goal=None,
action=[{'navigate': {'url': test_url}}], # type: ignore[arg-type]
),
result=[ActionResult(long_term_memory='Navigated')],
state=BrowserStateHistory(
url=test_url,
title='Test',
tabs=[],
interacted_element=[None],
),
metadata=StepMetadata(step_start_time=0, step_end_time=1, step_number=1, step_interval=0.1),
)
click_step = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Click Contact',
next_goal=None,
action=[{'click': {'index': 1}}], # type: ignore[arg-type]
),
result=[ActionResult(long_term_memory='Clicked')],
state=BrowserStateHistory(
url=test_url,
title='Test',
tabs=[],
interacted_element=[historical_element],
),
metadata=StepMetadata(step_start_time=1, step_end_time=2, step_number=2, step_interval=0.1),
)
history = AgentHistoryList(history=[navigate_step, click_step])
try:
# Should succeed via ax_name matching (Level 4)
# since hash, stable_hash, xpath all fail but ax_name matches
results = await agent.rerun_history(
history,
skip_failures=False,
max_retries=1,
summary_llm=mock_summary_llm,
)
# Navigation + click + summary
assert len(results) == 3
# Click should succeed (matched via ax_name)
click_result = results[1]
assert click_result.error is None, f'Expected ax_name match to succeed, got: {click_result.error}'
finally:
await agent.close()
# Tests for dropdown/menu re-opening behavior
def test_is_menu_opener_step_detects_aria_haspopup():
"""Test that _is_menu_opener_step detects aria-haspopup elements."""
llm = create_mock_llm(actions=None)
agent = Agent(task='Test task', llm=llm)
AgentOutput = agent.AgentOutput
# Element with aria-haspopup="true" should be detected as menu opener
opener_element = DOMInteractedElement(
node_id=1,
backend_node_id=1,
frame_id=None,
node_type=NodeType.ELEMENT_NODE,
node_value='',
node_name='DIV',
attributes={'aria-haspopup': 'true', 'class': 'dropdown-trigger'},
x_path='html/body/div',
element_hash=12345,
stable_hash=12345,
bounds=DOMRect(x=0, y=0, width=100, height=50),
ax_name='Contact',
)
history_item = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Click dropdown',
next_goal=None,
action=[{'click': {'index': 1}}], # type: ignore[arg-type]
),
result=[ActionResult(long_term_memory='Clicked')],
state=BrowserStateHistory(
url='http://test.com',
title='Test',
tabs=[],
interacted_element=[opener_element],
),
metadata=StepMetadata(step_start_time=0, step_end_time=1, step_number=1, step_interval=0.1),
)
assert agent._is_menu_opener_step(history_item) is True
def test_is_menu_opener_step_detects_guidewire_toggle():
"""Test that _is_menu_opener_step detects Guidewire toggleSubMenu pattern."""
llm = create_mock_llm(actions=None)
agent = Agent(task='Test task', llm=llm)
AgentOutput = agent.AgentOutput
# Element with data-gw-click="toggleSubMenu" should be detected
opener_element = DOMInteractedElement(
node_id=1,
backend_node_id=1,
frame_id=None,
node_type=NodeType.ELEMENT_NODE,
node_value='',
node_name='DIV',
attributes={'data-gw-click': 'toggleSubMenu', 'class': 'gw-action--expand-button'},
x_path='html/body/div',
element_hash=12345,
stable_hash=12345,
bounds=DOMRect(x=0, y=0, width=100, height=50),
ax_name=None,
)
history_item = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Toggle menu',
next_goal=None,
action=[{'click': {'index': 1}}], # type: ignore[arg-type]
),
result=[ActionResult(long_term_memory='Toggled')],
state=BrowserStateHistory(
url='http://test.com',
title='Test',
tabs=[],
interacted_element=[opener_element],
),
metadata=StepMetadata(step_start_time=0, step_end_time=1, step_number=1, step_interval=0.1),
)
assert agent._is_menu_opener_step(history_item) is True
def test_is_menu_opener_step_returns_false_for_regular_element():
"""Test that _is_menu_opener_step returns False for non-menu elements."""
llm = create_mock_llm(actions=None)
agent = Agent(task='Test task', llm=llm)
AgentOutput = agent.AgentOutput
# Regular button without menu attributes
regular_element = DOMInteractedElement(
node_id=1,
backend_node_id=1,
frame_id=None,
node_type=NodeType.ELEMENT_NODE,
node_value='',
node_name='BUTTON',
attributes={'class': 'submit-btn', 'type': 'submit'},
x_path='html/body/button',
element_hash=12345,
stable_hash=12345,
bounds=DOMRect(x=0, y=0, width=100, height=50),
ax_name='Submit',
)
history_item = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Click submit',
next_goal=None,
action=[{'click': {'index': 1}}], # type: ignore[arg-type]
),
result=[ActionResult(long_term_memory='Clicked')],
state=BrowserStateHistory(
url='http://test.com',
title='Test',
tabs=[],
interacted_element=[regular_element],
),
metadata=StepMetadata(step_start_time=0, step_end_time=1, step_number=1, step_interval=0.1),
)
assert agent._is_menu_opener_step(history_item) is False
def test_is_menu_item_element_detects_role_menuitem():
"""Test that _is_menu_item_element detects role=menuitem."""
llm = create_mock_llm(actions=None)
agent = Agent(task='Test task', llm=llm)
menu_item = DOMInteractedElement(
node_id=1,
backend_node_id=1,
frame_id=None,
node_type=NodeType.ELEMENT_NODE,
node_value='',
node_name='DIV',
attributes={'role': 'menuitem', 'class': 'menu-option'},
x_path='html/body/div/div',
element_hash=12345,
stable_hash=12345,
bounds=DOMRect(x=0, y=0, width=100, height=50),
ax_name='New Contact',
)
assert agent._is_menu_item_element(menu_item) is True
def test_is_menu_item_element_detects_guidewire_class():
"""Test that _is_menu_item_element detects Guidewire gw-action--inner class."""
llm = create_mock_llm(actions=None)
agent = Agent(task='Test task', llm=llm)
menu_item = DOMInteractedElement(
node_id=1,
backend_node_id=1,
frame_id=None,
node_type=NodeType.ELEMENT_NODE,
node_value='',
node_name='DIV',
attributes={'class': 'gw-action--inner gw-hasDivider', 'aria-haspopup': 'true'},
x_path='html/body/div/div',
element_hash=12345,
stable_hash=12345,
bounds=DOMRect(x=0, y=0, width=100, height=50),
ax_name='New Contact',
)
assert agent._is_menu_item_element(menu_item) is True
def test_is_menu_item_element_returns_false_for_regular_element():
"""Test that _is_menu_item_element returns False for non-menu elements."""
llm = create_mock_llm(actions=None)
agent = Agent(task='Test task', llm=llm)
regular_element = DOMInteractedElement(
node_id=1,
backend_node_id=1,
frame_id=None,
node_type=NodeType.ELEMENT_NODE,
node_value='',
node_name='BUTTON',
attributes={'class': 'submit-btn', 'type': 'submit'},
x_path='html/body/button',
element_hash=12345,
stable_hash=12345,
bounds=DOMRect(x=0, y=0, width=100, height=50),
ax_name='Submit',
)
assert agent._is_menu_item_element(regular_element) is False
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/test_ax_name_matching.py",
"license": "MIT License",
"lines": 481,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:tests/ci/test_coordinate_clicking.py | """Tests for coordinate clicking feature.
This feature allows certain models (Claude Sonnet 4, Claude Opus 4, Gemini 3 Pro, browser-use/* models)
to use coordinate-based clicking, while other models only get index-based clicking.
"""
import pytest
from browser_use.tools.service import Tools
from browser_use.tools.views import ClickElementAction, ClickElementActionIndexOnly
class TestCoordinateClickingTools:
"""Test the Tools class coordinate clicking functionality."""
def test_default_coordinate_clicking_disabled(self):
"""By default, coordinate clicking should be disabled."""
tools = Tools()
assert tools._coordinate_clicking_enabled is False
def test_default_uses_index_only_action(self):
"""Default Tools should use ClickElementActionIndexOnly."""
tools = Tools()
click_action = tools.registry.registry.actions.get('click')
assert click_action is not None
assert click_action.param_model == ClickElementActionIndexOnly
def test_default_click_schema_has_only_index(self):
"""Default click action schema should only have index property."""
tools = Tools()
click_action = tools.registry.registry.actions.get('click')
assert click_action is not None
schema = click_action.param_model.model_json_schema()
assert 'index' in schema['properties']
assert 'coordinate_x' not in schema['properties']
assert 'coordinate_y' not in schema['properties']
def test_enable_coordinate_clicking(self):
"""Enabling coordinate clicking should switch to ClickElementAction."""
tools = Tools()
tools.set_coordinate_clicking(True)
assert tools._coordinate_clicking_enabled is True
click_action = tools.registry.registry.actions.get('click')
assert click_action is not None
assert click_action.param_model == ClickElementAction
def test_enabled_click_schema_has_coordinates(self):
"""Enabled click action schema should have index and coordinate properties."""
tools = Tools()
tools.set_coordinate_clicking(True)
click_action = tools.registry.registry.actions.get('click')
assert click_action is not None
schema = click_action.param_model.model_json_schema()
assert 'index' in schema['properties']
assert 'coordinate_x' in schema['properties']
assert 'coordinate_y' in schema['properties']
def test_disable_coordinate_clicking(self):
"""Disabling coordinate clicking should switch back to index-only."""
tools = Tools()
tools.set_coordinate_clicking(True)
tools.set_coordinate_clicking(False)
assert tools._coordinate_clicking_enabled is False
click_action = tools.registry.registry.actions.get('click')
assert click_action is not None
assert click_action.param_model == ClickElementActionIndexOnly
def test_set_coordinate_clicking_idempotent(self):
"""Setting the same value twice should not cause issues."""
tools = Tools()
# Enable twice
tools.set_coordinate_clicking(True)
tools.set_coordinate_clicking(True)
assert tools._coordinate_clicking_enabled is True
# Disable twice
tools.set_coordinate_clicking(False)
tools.set_coordinate_clicking(False)
assert tools._coordinate_clicking_enabled is False
def test_schema_title_consistent(self):
"""Schema title should be 'ClickElementAction' regardless of mode."""
tools = Tools()
# Check default (disabled)
click_action = tools.registry.registry.actions.get('click')
assert click_action is not None
schema = click_action.param_model.model_json_schema()
assert schema['title'] == 'ClickElementAction'
# Check enabled
tools.set_coordinate_clicking(True)
click_action = tools.registry.registry.actions.get('click')
assert click_action is not None
schema = click_action.param_model.model_json_schema()
assert schema['title'] == 'ClickElementAction'
class TestCoordinateClickingModelDetection:
"""Test the model detection logic for coordinate clicking."""
@pytest.mark.parametrize(
'model_name,expected_coords',
[
# Models that SHOULD have coordinate clicking (claude-sonnet-4*, claude-opus-4*, gemini-3-pro*, browser-use/*)
('claude-sonnet-4-5', True),
('claude-sonnet-4-5-20250101', True),
('claude-sonnet-4-0', True),
('claude-sonnet-4', True),
('claude-opus-4-5', True),
('claude-opus-4-5-latest', True),
('claude-opus-4-0', True),
('claude-opus-4', True),
('gemini-3-pro-preview', True),
('gemini-3-pro', True),
('browser-use/fast', True),
('browser-use/accurate', True),
('CLAUDE-SONNET-4-5', True), # Case insensitive
('CLAUDE-SONNET-4', True), # Case insensitive
('GEMINI-3-PRO', True), # Case insensitive
# Models that should NOT have coordinate clicking
('claude-3-5-sonnet', False),
('claude-sonnet-3-5', False),
('gpt-4o', False),
('gpt-4-turbo', False),
('gemini-2.0-flash', False),
('gemini-1.5-pro', False),
('llama-3.1-70b', False),
('mistral-large', False),
],
)
def test_model_detection_patterns(self, model_name: str, expected_coords: bool):
"""Test that the model detection patterns correctly identify coordinate-capable models."""
model_lower = model_name.lower()
supports_coords = any(
pattern in model_lower for pattern in ['claude-sonnet-4', 'claude-opus-4', 'gemini-3-pro', 'browser-use/']
)
assert supports_coords == expected_coords, f'Model {model_name}: expected {expected_coords}, got {supports_coords}'
class TestCoordinateClickingWithPassedTools:
"""Test that coordinate clicking works correctly when Tools is passed to Agent."""
def test_tools_can_be_modified_after_creation(self):
"""Tools created externally can have coordinate clicking enabled."""
tools = Tools()
assert tools._coordinate_clicking_enabled is False
# Simulate what Agent does for coordinate-capable models
tools.set_coordinate_clicking(True)
click_action = tools.registry.registry.actions.get('click')
assert click_action is not None
assert click_action.param_model == ClickElementAction
def test_tools_state_preserved_after_modification(self):
"""Verify that other tool state is preserved when toggling coordinate clicking."""
tools = Tools(exclude_actions=['search'])
# Search should be excluded
assert 'search' not in tools.registry.registry.actions
# Enable coordinate clicking
tools.set_coordinate_clicking(True)
# Search should still be excluded
assert 'search' not in tools.registry.registry.actions
# Click should have coordinates
click_action = tools.registry.registry.actions.get('click')
assert click_action is not None
assert click_action.param_model == ClickElementAction
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/test_coordinate_clicking.py",
"license": "MIT License",
"lines": 143,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:tests/ci/test_extension_config.py | """Tests for extension configuration environment variables."""
import os
import pytest
class TestDisableExtensionsEnvVar:
"""Test BROWSER_USE_DISABLE_EXTENSIONS environment variable."""
def test_default_value_is_true(self):
"""Without env var set, enable_default_extensions should default to True."""
# Clear the env var if it exists
original = os.environ.pop('BROWSER_USE_DISABLE_EXTENSIONS', None)
try:
# Import fresh to get the default
from browser_use.browser.profile import _get_enable_default_extensions_default
assert _get_enable_default_extensions_default() is True
finally:
if original is not None:
os.environ['BROWSER_USE_DISABLE_EXTENSIONS'] = original
@pytest.mark.parametrize(
'env_value,expected_enabled',
[
# Truthy values for DISABLE = extensions disabled (False)
('true', False),
('True', False),
('TRUE', False),
('1', False),
('yes', False),
('on', False),
# Falsy values for DISABLE = extensions enabled (True)
('false', True),
('False', True),
('FALSE', True),
('0', True),
('no', True),
('off', True),
('', True),
],
)
def test_env_var_values(self, env_value: str, expected_enabled: bool):
"""Test various env var values are parsed correctly."""
original = os.environ.get('BROWSER_USE_DISABLE_EXTENSIONS')
try:
os.environ['BROWSER_USE_DISABLE_EXTENSIONS'] = env_value
from browser_use.browser.profile import _get_enable_default_extensions_default
result = _get_enable_default_extensions_default()
assert result is expected_enabled, (
f"Expected enable_default_extensions={expected_enabled} for DISABLE_EXTENSIONS='{env_value}', got {result}"
)
finally:
if original is not None:
os.environ['BROWSER_USE_DISABLE_EXTENSIONS'] = original
else:
os.environ.pop('BROWSER_USE_DISABLE_EXTENSIONS', None)
def test_browser_profile_uses_env_var(self):
"""Test that BrowserProfile picks up the env var."""
original = os.environ.get('BROWSER_USE_DISABLE_EXTENSIONS')
try:
# Test with env var set to true (disable extensions)
os.environ['BROWSER_USE_DISABLE_EXTENSIONS'] = 'true'
from browser_use.browser.profile import BrowserProfile
profile = BrowserProfile(headless=True)
assert profile.enable_default_extensions is False, (
'BrowserProfile should disable extensions when BROWSER_USE_DISABLE_EXTENSIONS=true'
)
# Test with env var set to false (enable extensions)
os.environ['BROWSER_USE_DISABLE_EXTENSIONS'] = 'false'
profile2 = BrowserProfile(headless=True)
assert profile2.enable_default_extensions is True, (
'BrowserProfile should enable extensions when BROWSER_USE_DISABLE_EXTENSIONS=false'
)
finally:
if original is not None:
os.environ['BROWSER_USE_DISABLE_EXTENSIONS'] = original
else:
os.environ.pop('BROWSER_USE_DISABLE_EXTENSIONS', None)
def test_explicit_param_overrides_env_var(self):
"""Test that explicit enable_default_extensions parameter overrides env var."""
original = os.environ.get('BROWSER_USE_DISABLE_EXTENSIONS')
try:
os.environ['BROWSER_USE_DISABLE_EXTENSIONS'] = 'true'
from browser_use.browser.profile import BrowserProfile
# Explicitly set to True should override env var
profile = BrowserProfile(headless=True, enable_default_extensions=True)
assert profile.enable_default_extensions is True, 'Explicit param should override env var'
finally:
if original is not None:
os.environ['BROWSER_USE_DISABLE_EXTENSIONS'] = original
else:
os.environ.pop('BROWSER_USE_DISABLE_EXTENSIONS', None)
def test_browser_session_uses_env_var(self):
"""Test that BrowserSession picks up the env var via BrowserProfile."""
original = os.environ.get('BROWSER_USE_DISABLE_EXTENSIONS')
try:
os.environ['BROWSER_USE_DISABLE_EXTENSIONS'] = '1'
from browser_use.browser import BrowserSession
session = BrowserSession(headless=True)
assert session.browser_profile.enable_default_extensions is False, (
'BrowserSession should disable extensions when BROWSER_USE_DISABLE_EXTENSIONS=1'
)
finally:
if original is not None:
os.environ['BROWSER_USE_DISABLE_EXTENSIONS'] = original
else:
os.environ.pop('BROWSER_USE_DISABLE_EXTENSIONS', None)
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/test_extension_config.py",
"license": "MIT License",
"lines": 102,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:tests/ci/test_fallback_llm.py | """
Tests for the fallback_llm feature in Agent.
Tests verify that when the primary LLM fails with rate limit (429) or server errors (503, 502, 500, 504),
the agent automatically switches to the fallback LLM and continues execution.
"""
from unittest.mock import AsyncMock
import pytest
from browser_use.agent.views import AgentOutput
from browser_use.llm import BaseChatModel
from browser_use.llm.exceptions import ModelProviderError, ModelRateLimitError
from browser_use.llm.views import ChatInvokeCompletion
from browser_use.tools.service import Tools
def create_mock_llm(
model_name: str = 'mock-llm',
should_fail: bool = False,
fail_with: type[Exception] | None = None,
fail_status_code: int = 429,
fail_message: str = 'Rate limit exceeded',
) -> BaseChatModel:
"""Create a mock LLM for testing.
Args:
model_name: Name of the mock model
should_fail: If True, the LLM will raise an exception
fail_with: Exception type to raise (ModelRateLimitError or ModelProviderError)
fail_status_code: HTTP status code for the error
fail_message: Error message
"""
tools = Tools()
ActionModel = tools.registry.create_action_model()
AgentOutputWithActions = AgentOutput.type_with_custom_actions(ActionModel)
llm = AsyncMock(spec=BaseChatModel)
llm.model = model_name
llm._verified_api_keys = True
llm.provider = 'mock'
llm.name = model_name
llm.model_name = model_name
default_done_action = """
{
"thinking": "null",
"evaluation_previous_goal": "Successfully completed the task",
"memory": "Task completed",
"next_goal": "Task completed",
"action": [
{
"done": {
"text": "Task completed successfully",
"success": true
}
}
]
}
"""
async def mock_ainvoke(*args, **kwargs):
if should_fail:
if fail_with == ModelRateLimitError:
raise ModelRateLimitError(message=fail_message, status_code=fail_status_code, model=model_name)
elif fail_with == ModelProviderError:
raise ModelProviderError(message=fail_message, status_code=fail_status_code, model=model_name)
else:
raise Exception(fail_message)
output_format = kwargs.get('output_format')
if output_format is None:
return ChatInvokeCompletion(completion=default_done_action, usage=None)
else:
parsed = output_format.model_validate_json(default_done_action)
return ChatInvokeCompletion(completion=parsed, usage=None)
llm.ainvoke.side_effect = mock_ainvoke
return llm
class TestFallbackLLMParameter:
"""Test fallback_llm parameter initialization."""
def test_fallback_llm_none_by_default(self):
"""Verify fallback_llm defaults to None."""
from browser_use import Agent
primary = create_mock_llm('primary-model')
agent = Agent(task='Test task', llm=primary)
assert agent._fallback_llm is None
assert agent._using_fallback_llm is False
assert agent._original_llm is primary
def test_fallback_llm_single_model(self):
"""Test passing a fallback LLM."""
from browser_use import Agent
primary = create_mock_llm('primary-model')
fallback = create_mock_llm('fallback-model')
agent = Agent(task='Test task', llm=primary, fallback_llm=fallback)
assert agent._fallback_llm is fallback
assert agent._using_fallback_llm is False
def test_public_properties(self):
"""Test the public properties for fallback status."""
from browser_use import Agent
primary = create_mock_llm('primary-model')
fallback = create_mock_llm('fallback-model')
agent = Agent(task='Test task', llm=primary, fallback_llm=fallback)
# Before fallback
assert agent.is_using_fallback_llm is False
assert agent.current_llm_model == 'primary-model'
# Trigger fallback
error = ModelRateLimitError(message='Rate limit', status_code=429, model='primary')
agent._try_switch_to_fallback_llm(error)
# After fallback
assert agent.is_using_fallback_llm is True
assert agent.current_llm_model == 'fallback-model'
class TestFallbackLLMSwitching:
"""Test the fallback switching logic in _try_switch_to_fallback_llm."""
def test_switch_on_rate_limit_error(self):
"""Test that agent switches to fallback on ModelRateLimitError."""
from browser_use import Agent
primary = create_mock_llm('primary-model')
fallback = create_mock_llm('fallback-model')
agent = Agent(task='Test task', llm=primary, fallback_llm=fallback)
error = ModelRateLimitError(message='Rate limit exceeded', status_code=429, model='primary-model')
result = agent._try_switch_to_fallback_llm(error)
assert result is True
assert agent.llm is fallback
assert agent._using_fallback_llm is True
def test_switch_on_503_error(self):
"""Test that agent switches to fallback on 503 Service Unavailable."""
from browser_use import Agent
primary = create_mock_llm('primary-model')
fallback = create_mock_llm('fallback-model')
agent = Agent(task='Test task', llm=primary, fallback_llm=fallback)
error = ModelProviderError(message='Service unavailable', status_code=503, model='primary-model')
result = agent._try_switch_to_fallback_llm(error)
assert result is True
assert agent.llm is fallback
assert agent._using_fallback_llm is True
def test_switch_on_500_error(self):
"""Test that agent switches to fallback on 500 Internal Server Error."""
from browser_use import Agent
primary = create_mock_llm('primary-model')
fallback = create_mock_llm('fallback-model')
agent = Agent(task='Test task', llm=primary, fallback_llm=fallback)
error = ModelProviderError(message='Internal server error', status_code=500, model='primary-model')
result = agent._try_switch_to_fallback_llm(error)
assert result is True
assert agent.llm is fallback
def test_switch_on_502_error(self):
"""Test that agent switches to fallback on 502 Bad Gateway."""
from browser_use import Agent
primary = create_mock_llm('primary-model')
fallback = create_mock_llm('fallback-model')
agent = Agent(task='Test task', llm=primary, fallback_llm=fallback)
error = ModelProviderError(message='Bad gateway', status_code=502, model='primary-model')
result = agent._try_switch_to_fallback_llm(error)
assert result is True
assert agent.llm is fallback
def test_no_switch_on_400_error(self):
"""Test that agent does NOT switch on 400 Bad Request (not retryable)."""
from browser_use import Agent
primary = create_mock_llm('primary-model')
fallback = create_mock_llm('fallback-model')
agent = Agent(task='Test task', llm=primary, fallback_llm=fallback)
error = ModelProviderError(message='Bad request', status_code=400, model='primary-model')
result = agent._try_switch_to_fallback_llm(error)
assert result is False
assert agent.llm is primary # Still using primary
assert agent._using_fallback_llm is False
def test_switch_on_401_error(self):
"""Test that agent switches to fallback on 401 Unauthorized (API key error)."""
from browser_use import Agent
primary = create_mock_llm('primary-model')
fallback = create_mock_llm('fallback-model')
agent = Agent(task='Test task', llm=primary, fallback_llm=fallback)
error = ModelProviderError(message='Invalid API key', status_code=401, model='primary-model')
result = agent._try_switch_to_fallback_llm(error)
assert result is True
assert agent.llm is fallback
assert agent._using_fallback_llm is True
def test_switch_on_402_error(self):
"""Test that agent switches to fallback on 402 Payment Required (insufficient credits)."""
from browser_use import Agent
primary = create_mock_llm('primary-model')
fallback = create_mock_llm('fallback-model')
agent = Agent(task='Test task', llm=primary, fallback_llm=fallback)
error = ModelProviderError(message='Insufficient credits', status_code=402, model='primary-model')
result = agent._try_switch_to_fallback_llm(error)
assert result is True
assert agent.llm is fallback
assert agent._using_fallback_llm is True
def test_no_switch_when_no_fallback_configured(self):
"""Test that agent returns False when no fallback is configured."""
from browser_use import Agent
primary = create_mock_llm('primary-model')
agent = Agent(task='Test task', llm=primary)
error = ModelRateLimitError(message='Rate limit exceeded', status_code=429, model='primary-model')
result = agent._try_switch_to_fallback_llm(error)
assert result is False
assert agent.llm is primary
def test_no_switch_when_already_using_fallback(self):
"""Test that agent doesn't switch again when already using fallback."""
from browser_use import Agent
primary = create_mock_llm('primary-model')
fallback = create_mock_llm('fallback-model')
agent = Agent(task='Test task', llm=primary, fallback_llm=fallback)
# First switch succeeds
error = ModelRateLimitError(message='Rate limit', status_code=429, model='primary')
result = agent._try_switch_to_fallback_llm(error)
assert result is True
assert agent.llm is fallback
# Second switch fails - already using fallback
result = agent._try_switch_to_fallback_llm(error)
assert result is False
assert agent.llm is fallback # Still on fallback
class TestFallbackLLMIntegration:
"""Integration tests for fallback LLM behavior in get_model_output."""
def _create_failing_mock_llm(
self,
model_name: str,
fail_with: type[Exception],
fail_status_code: int = 429,
fail_message: str = 'Rate limit exceeded',
) -> BaseChatModel:
"""Create a mock LLM that always fails with the specified error."""
llm = AsyncMock(spec=BaseChatModel)
llm.model = model_name
llm._verified_api_keys = True
llm.provider = 'mock'
llm.name = model_name
llm.model_name = model_name
async def mock_ainvoke(*args, **kwargs):
if fail_with == ModelRateLimitError:
raise ModelRateLimitError(message=fail_message, status_code=fail_status_code, model=model_name)
elif fail_with == ModelProviderError:
raise ModelProviderError(message=fail_message, status_code=fail_status_code, model=model_name)
else:
raise Exception(fail_message)
llm.ainvoke.side_effect = mock_ainvoke
return llm
def _create_succeeding_mock_llm(self, model_name: str, agent) -> BaseChatModel:
"""Create a mock LLM that succeeds and returns a valid AgentOutput."""
llm = AsyncMock(spec=BaseChatModel)
llm.model = model_name
llm._verified_api_keys = True
llm.provider = 'mock'
llm.name = model_name
llm.model_name = model_name
default_done_action = """
{
"thinking": "null",
"evaluation_previous_goal": "Successfully completed the task",
"memory": "Task completed",
"next_goal": "Task completed",
"action": [
{
"done": {
"text": "Task completed successfully",
"success": true
}
}
]
}
"""
# Capture the agent reference for use in the closure
captured_agent = agent
async def mock_ainvoke(*args, **kwargs):
# Get the output format from kwargs and use it to parse
output_format = kwargs.get('output_format')
if output_format is not None:
parsed = output_format.model_validate_json(default_done_action)
return ChatInvokeCompletion(completion=parsed, usage=None)
# Fallback: use the agent's AgentOutput type
parsed = captured_agent.AgentOutput.model_validate_json(default_done_action)
return ChatInvokeCompletion(completion=parsed, usage=None)
llm.ainvoke.side_effect = mock_ainvoke
return llm
@pytest.mark.asyncio
async def test_get_model_output_switches_to_fallback_on_rate_limit(self, browser_session):
"""Test that get_model_output automatically switches to fallback on rate limit."""
from browser_use import Agent
# Create agent first with a working mock LLM
placeholder = create_mock_llm('placeholder')
agent = Agent(task='Test task', llm=placeholder, browser_session=browser_session)
# Create a failing primary and succeeding fallback
primary = self._create_failing_mock_llm(
'primary-model',
fail_with=ModelRateLimitError,
fail_status_code=429,
fail_message='Rate limit exceeded',
)
fallback = self._create_succeeding_mock_llm('fallback-model', agent)
# Replace the LLM and set up fallback
agent.llm = primary
agent._original_llm = primary
agent._fallback_llm = fallback
from browser_use.llm.messages import BaseMessage, UserMessage
messages: list[BaseMessage] = [UserMessage(content='Test message')]
# This should switch to fallback and succeed
result = await agent.get_model_output(messages)
assert result is not None
assert agent.llm is fallback
assert agent._using_fallback_llm is True
@pytest.mark.asyncio
async def test_get_model_output_raises_when_no_fallback(self, browser_session):
"""Test that get_model_output raises error when no fallback is configured."""
from browser_use import Agent
# Create agent first with a working mock LLM
placeholder = create_mock_llm('placeholder')
agent = Agent(task='Test task', llm=placeholder, browser_session=browser_session)
# Replace with failing LLM
primary = self._create_failing_mock_llm(
'primary-model',
fail_with=ModelRateLimitError,
fail_status_code=429,
fail_message='Rate limit exceeded',
)
agent.llm = primary
agent._original_llm = primary
agent._fallback_llm = None # No fallback
from browser_use.llm.messages import BaseMessage, UserMessage
messages: list[BaseMessage] = [UserMessage(content='Test message')]
# This should raise since no fallback is configured
with pytest.raises(ModelRateLimitError):
await agent.get_model_output(messages)
@pytest.mark.asyncio
async def test_get_model_output_raises_when_fallback_also_fails(self, browser_session):
"""Test that error is raised when fallback also fails."""
from browser_use import Agent
# Create agent first with a working mock LLM
placeholder = create_mock_llm('placeholder')
agent = Agent(task='Test task', llm=placeholder, browser_session=browser_session)
# Both models fail
primary = self._create_failing_mock_llm('primary', fail_with=ModelRateLimitError, fail_status_code=429)
fallback = self._create_failing_mock_llm('fallback', fail_with=ModelProviderError, fail_status_code=503)
agent.llm = primary
agent._original_llm = primary
agent._fallback_llm = fallback
from browser_use.llm.messages import BaseMessage, UserMessage
messages: list[BaseMessage] = [UserMessage(content='Test message')]
# Should fail after fallback also fails
with pytest.raises((ModelRateLimitError, ModelProviderError)):
await agent.get_model_output(messages)
if __name__ == '__main__':
pytest.main([__file__, '-v'])
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/test_fallback_llm.py",
"license": "MIT License",
"lines": 332,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:tests/ci/test_markdown_extractor.py | """Tests for markdown extractor preprocessing."""
from browser_use.dom.markdown_extractor import _preprocess_markdown_content
class TestPreprocessMarkdownContent:
"""Tests for _preprocess_markdown_content function."""
def test_preserves_short_lines(self):
"""Short lines (1-2 chars) should be preserved, not removed."""
content = '# Items\na\nb\nc\nOK\nNo'
filtered, _ = _preprocess_markdown_content(content)
assert 'a' in filtered.split('\n')
assert 'b' in filtered.split('\n')
assert 'c' in filtered.split('\n')
assert 'OK' in filtered.split('\n')
assert 'No' in filtered.split('\n')
def test_preserves_single_digit_numbers(self):
"""Single digit page numbers should be preserved."""
content = 'Page navigation:\n1\n2\n3\n10'
filtered, _ = _preprocess_markdown_content(content)
lines = filtered.split('\n')
assert '1' in lines
assert '2' in lines
assert '3' in lines
assert '10' in lines
def test_preserves_markdown_list_items(self):
"""Markdown list items with short content should be preserved."""
content = 'Shopping list:\n- a\n- b\n- OK\n- No'
filtered, _ = _preprocess_markdown_content(content)
assert '- a' in filtered
assert '- b' in filtered
assert '- OK' in filtered
assert '- No' in filtered
def test_preserves_state_codes(self):
"""Two-letter state codes should be preserved."""
content = 'States:\nCA\nNY\nTX'
filtered, _ = _preprocess_markdown_content(content)
lines = filtered.split('\n')
assert 'CA' in lines
assert 'NY' in lines
assert 'TX' in lines
def test_removes_empty_lines(self):
"""Empty and whitespace-only lines should be removed."""
content = 'Header\n\n \n\nContent'
filtered, _ = _preprocess_markdown_content(content)
# Should not have empty lines
for line in filtered.split('\n'):
assert line.strip(), f'Found empty line in output: {repr(line)}'
def test_removes_large_json_blobs(self):
"""Large JSON-like lines (>100 chars) should be removed."""
# Create a JSON blob > 100 chars
json_blob = '{"key": "' + 'x' * 100 + '"}'
content = f'Header\n{json_blob}\nFooter'
filtered, _ = _preprocess_markdown_content(content)
assert json_blob not in filtered
assert 'Header' in filtered
assert 'Footer' in filtered
def test_preserves_small_json(self):
"""Small JSON objects (<100 chars) should be preserved."""
small_json = '{"key": "value"}'
content = f'Header\n{small_json}\nFooter'
filtered, _ = _preprocess_markdown_content(content)
assert small_json in filtered
def test_compresses_multiple_newlines(self):
"""4+ consecutive newlines should be compressed to max_newlines."""
content = 'Header\n\n\n\n\nFooter'
filtered, _ = _preprocess_markdown_content(content, max_newlines=2)
# After filtering empty lines, we should have just Header and Footer
lines = [line for line in filtered.split('\n') if line.strip()]
assert lines == ['Header', 'Footer']
def test_returns_chars_filtered_count(self):
"""Should return count of characters removed."""
content = 'Header\n\n\n\n\nFooter'
_, chars_filtered = _preprocess_markdown_content(content)
assert chars_filtered > 0
def test_strips_result(self):
"""Result should be stripped of leading/trailing whitespace."""
content = ' \n\nContent\n\n '
filtered, _ = _preprocess_markdown_content(content)
assert not filtered.startswith(' ')
assert not filtered.startswith('\n')
assert not filtered.endswith(' ')
assert not filtered.endswith('\n')
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/test_markdown_extractor.py",
"license": "MIT License",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:tests/ci/test_sandbox_structured_output.py | """
Tests for sandbox structured output handling.
Tests that output_model_schema works correctly when using @sandbox decorator,
specifically that the _output_model_schema private attribute is preserved
through serialization/deserialization.
"""
from pydantic import BaseModel
from browser_use.agent.views import ActionResult, AgentHistory, AgentHistoryList, BrowserStateHistory
from browser_use.sandbox.sandbox import _parse_with_type_annotation
class ExtractedData(BaseModel):
"""Example structured output model"""
title: str
price: float
in_stock: bool
class NestedModel(BaseModel):
"""Nested model for testing complex structures"""
items: list[ExtractedData]
total_count: int
class TestGetStructuredOutput:
"""Tests for AgentHistoryList.get_structured_output method"""
def test_get_structured_output_parses_final_result(self):
"""Test that get_structured_output correctly parses final result with provided schema"""
# Create history with structured JSON as final result
json_result = '{"title": "Test Product", "price": 29.99, "in_stock": true}'
history = AgentHistoryList(
history=[
AgentHistory(
model_output=None,
result=[ActionResult(extracted_content=json_result, is_done=True)],
state=BrowserStateHistory(url='https://example.com', title='Test', tabs=[], interacted_element=[]),
)
]
)
# Use get_structured_output with explicit schema
result = history.get_structured_output(ExtractedData)
assert result is not None
assert isinstance(result, ExtractedData)
assert result.title == 'Test Product'
assert result.price == 29.99
assert result.in_stock is True
def test_get_structured_output_returns_none_when_no_final_result(self):
"""Test that get_structured_output returns None when there's no final result"""
history = AgentHistoryList(
history=[
AgentHistory(
model_output=None,
result=[ActionResult(extracted_content=None)],
state=BrowserStateHistory(url='https://example.com', title='Test', tabs=[], interacted_element=[]),
)
]
)
result = history.get_structured_output(ExtractedData)
assert result is None
def test_get_structured_output_with_nested_model(self):
"""Test get_structured_output works with nested Pydantic models"""
json_result = """
{
"items": [
{"title": "Item 1", "price": 10.0, "in_stock": true},
{"title": "Item 2", "price": 20.0, "in_stock": false}
],
"total_count": 2
}
"""
history = AgentHistoryList(
history=[
AgentHistory(
model_output=None,
result=[ActionResult(extracted_content=json_result, is_done=True)],
state=BrowserStateHistory(url='https://example.com', title='Test', tabs=[], interacted_element=[]),
)
]
)
result = history.get_structured_output(NestedModel)
assert result is not None
assert len(result.items) == 2
assert result.items[0].title == 'Item 1'
assert result.total_count == 2
class TestSandboxStructuredOutputParsing:
"""Tests for _parse_with_type_annotation handling of AgentHistoryList[T]"""
def test_parse_agent_history_list_without_generic(self):
"""Test parsing AgentHistoryList without generic parameter"""
data = {
'history': [
{
'model_output': None,
'result': [{'extracted_content': '{"title": "Test", "price": 9.99, "in_stock": true}', 'is_done': True}],
'state': {'url': 'https://example.com', 'title': 'Test', 'tabs': []},
}
]
}
result = _parse_with_type_annotation(data, AgentHistoryList)
assert isinstance(result, AgentHistoryList)
assert len(result.history) == 1
# Without generic, _output_model_schema should be None
assert result._output_model_schema is None
def test_parse_agent_history_list_with_generic_parameter(self):
"""Test parsing AgentHistoryList[ExtractedData] preserves output model schema"""
data = {
'history': [
{
'model_output': None,
'result': [{'extracted_content': '{"title": "Test", "price": 9.99, "in_stock": true}', 'is_done': True}],
'state': {'url': 'https://example.com', 'title': 'Test', 'tabs': []},
}
]
}
# Parse with generic type annotation
result = _parse_with_type_annotation(data, AgentHistoryList[ExtractedData])
assert isinstance(result, AgentHistoryList)
assert len(result.history) == 1
# With generic, _output_model_schema should be set
assert result._output_model_schema is ExtractedData
# Now structured_output property should work
structured = result.structured_output
assert structured is not None
assert isinstance(structured, ExtractedData)
assert structured.title == 'Test'
assert structured.price == 9.99
assert structured.in_stock is True
def test_parse_agent_history_list_structured_output_after_sandbox(self):
"""Simulate full sandbox round-trip with AgentHistoryList[T]"""
# This simulates what happens when sandbox returns data
json_content = '{"title": "Product", "price": 49.99, "in_stock": false}'
data = {
'history': [
{
'model_output': None,
'result': [{'extracted_content': json_content, 'is_done': True}],
'state': {'url': 'https://shop.com', 'title': 'Shop', 'tabs': []},
}
]
}
# Sandbox parses with return type annotation AgentHistoryList[ExtractedData]
result = _parse_with_type_annotation(data, AgentHistoryList[ExtractedData])
# User accesses structured_output property
output = result.structured_output
assert output is not None
assert output.title == 'Product'
assert output.price == 49.99
assert output.in_stock is False
class TestStructuredOutputPropertyFallback:
"""Tests for structured_output property behavior with and without _output_model_schema"""
def test_structured_output_property_works_when_schema_set(self):
"""Test structured_output property works when _output_model_schema is set"""
json_result = '{"title": "Test", "price": 5.0, "in_stock": true}'
history = AgentHistoryList(
history=[
AgentHistory(
model_output=None,
result=[ActionResult(extracted_content=json_result, is_done=True)],
state=BrowserStateHistory(url='https://example.com', title='Test', tabs=[], interacted_element=[]),
)
]
)
# Manually set the schema (as Agent.run() does)
history._output_model_schema = ExtractedData
result = history.structured_output
assert result is not None
assert isinstance(result, ExtractedData)
assert result.title == 'Test'
def test_structured_output_property_returns_none_without_schema(self):
"""Test structured_output property returns None when _output_model_schema is not set"""
json_result = '{"title": "Test", "price": 5.0, "in_stock": true}'
history = AgentHistoryList(
history=[
AgentHistory(
model_output=None,
result=[ActionResult(extracted_content=json_result, is_done=True)],
state=BrowserStateHistory(url='https://example.com', title='Test', tabs=[], interacted_element=[]),
)
]
)
# Don't set _output_model_schema
result = history.structured_output
# Property returns None because schema is not set
assert result is None
# But get_structured_output with explicit schema works
explicit_result = history.get_structured_output(ExtractedData)
assert explicit_result is not None
assert explicit_result.title == 'Test'
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/test_sandbox_structured_output.py",
"license": "MIT License",
"lines": 181,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:browser_use/actor/utils.py | """Utility functions for actor operations."""
class Utils:
"""Utility functions for actor operations."""
@staticmethod
def get_key_info(key: str) -> tuple[str, int | None]:
"""Get the code and windowsVirtualKeyCode for a key.
Args:
key: Key name (e.g., 'Enter', 'ArrowUp', 'a', 'A')
Returns:
Tuple of (code, windowsVirtualKeyCode)
Reference: Windows Virtual Key Codes
https://docs.microsoft.com/en-us/windows/win32/inputdev/virtual-key-codes
"""
# Complete mapping of key names to (code, virtualKeyCode)
# Based on standard Windows Virtual Key Codes
key_map = {
# Navigation keys
'Backspace': ('Backspace', 8),
'Tab': ('Tab', 9),
'Enter': ('Enter', 13),
'Escape': ('Escape', 27),
'Space': ('Space', 32),
' ': ('Space', 32),
'PageUp': ('PageUp', 33),
'PageDown': ('PageDown', 34),
'End': ('End', 35),
'Home': ('Home', 36),
'ArrowLeft': ('ArrowLeft', 37),
'ArrowUp': ('ArrowUp', 38),
'ArrowRight': ('ArrowRight', 39),
'ArrowDown': ('ArrowDown', 40),
'Insert': ('Insert', 45),
'Delete': ('Delete', 46),
# Modifier keys
'Shift': ('ShiftLeft', 16),
'ShiftLeft': ('ShiftLeft', 16),
'ShiftRight': ('ShiftRight', 16),
'Control': ('ControlLeft', 17),
'ControlLeft': ('ControlLeft', 17),
'ControlRight': ('ControlRight', 17),
'Alt': ('AltLeft', 18),
'AltLeft': ('AltLeft', 18),
'AltRight': ('AltRight', 18),
'Meta': ('MetaLeft', 91),
'MetaLeft': ('MetaLeft', 91),
'MetaRight': ('MetaRight', 92),
# Function keys F1-F24
'F1': ('F1', 112),
'F2': ('F2', 113),
'F3': ('F3', 114),
'F4': ('F4', 115),
'F5': ('F5', 116),
'F6': ('F6', 117),
'F7': ('F7', 118),
'F8': ('F8', 119),
'F9': ('F9', 120),
'F10': ('F10', 121),
'F11': ('F11', 122),
'F12': ('F12', 123),
'F13': ('F13', 124),
'F14': ('F14', 125),
'F15': ('F15', 126),
'F16': ('F16', 127),
'F17': ('F17', 128),
'F18': ('F18', 129),
'F19': ('F19', 130),
'F20': ('F20', 131),
'F21': ('F21', 132),
'F22': ('F22', 133),
'F23': ('F23', 134),
'F24': ('F24', 135),
# Numpad keys
'NumLock': ('NumLock', 144),
'Numpad0': ('Numpad0', 96),
'Numpad1': ('Numpad1', 97),
'Numpad2': ('Numpad2', 98),
'Numpad3': ('Numpad3', 99),
'Numpad4': ('Numpad4', 100),
'Numpad5': ('Numpad5', 101),
'Numpad6': ('Numpad6', 102),
'Numpad7': ('Numpad7', 103),
'Numpad8': ('Numpad8', 104),
'Numpad9': ('Numpad9', 105),
'NumpadMultiply': ('NumpadMultiply', 106),
'NumpadAdd': ('NumpadAdd', 107),
'NumpadSubtract': ('NumpadSubtract', 109),
'NumpadDecimal': ('NumpadDecimal', 110),
'NumpadDivide': ('NumpadDivide', 111),
# Lock keys
'CapsLock': ('CapsLock', 20),
'ScrollLock': ('ScrollLock', 145),
# OEM/Punctuation keys (US keyboard layout)
'Semicolon': ('Semicolon', 186),
';': ('Semicolon', 186),
'Equal': ('Equal', 187),
'=': ('Equal', 187),
'Comma': ('Comma', 188),
',': ('Comma', 188),
'Minus': ('Minus', 189),
'-': ('Minus', 189),
'Period': ('Period', 190),
'.': ('Period', 190),
'Slash': ('Slash', 191),
'/': ('Slash', 191),
'Backquote': ('Backquote', 192),
'`': ('Backquote', 192),
'BracketLeft': ('BracketLeft', 219),
'[': ('BracketLeft', 219),
'Backslash': ('Backslash', 220),
'\\': ('Backslash', 220),
'BracketRight': ('BracketRight', 221),
']': ('BracketRight', 221),
'Quote': ('Quote', 222),
"'": ('Quote', 222),
# Media/Browser keys
'AudioVolumeMute': ('AudioVolumeMute', 173),
'AudioVolumeDown': ('AudioVolumeDown', 174),
'AudioVolumeUp': ('AudioVolumeUp', 175),
'MediaTrackNext': ('MediaTrackNext', 176),
'MediaTrackPrevious': ('MediaTrackPrevious', 177),
'MediaStop': ('MediaStop', 178),
'MediaPlayPause': ('MediaPlayPause', 179),
'BrowserBack': ('BrowserBack', 166),
'BrowserForward': ('BrowserForward', 167),
'BrowserRefresh': ('BrowserRefresh', 168),
'BrowserStop': ('BrowserStop', 169),
'BrowserSearch': ('BrowserSearch', 170),
'BrowserFavorites': ('BrowserFavorites', 171),
'BrowserHome': ('BrowserHome', 172),
# Additional common keys
'Clear': ('Clear', 12),
'Pause': ('Pause', 19),
'Select': ('Select', 41),
'Print': ('Print', 42),
'Execute': ('Execute', 43),
'PrintScreen': ('PrintScreen', 44),
'Help': ('Help', 47),
'ContextMenu': ('ContextMenu', 93),
}
if key in key_map:
return key_map[key]
# Handle alphanumeric keys dynamically
if len(key) == 1:
if key.isalpha():
# Letter keys: A-Z have VK codes 65-90
return (f'Key{key.upper()}', ord(key.upper()))
elif key.isdigit():
# Digit keys: 0-9 have VK codes 48-57 (same as ASCII)
return (f'Digit{key}', ord(key))
# Fallback: use the key name as code, no virtual key code
return (key, None)
# Backward compatibility: provide standalone function
def get_key_info(key: str) -> tuple[str, int | None]:
"""Get the code and windowsVirtualKeyCode for a key.
Args:
key: Key name (e.g., 'Enter', 'ArrowUp', 'a', 'A')
Returns:
Tuple of (code, windowsVirtualKeyCode)
Reference: Windows Virtual Key Codes
https://docs.microsoft.com/en-us/windows/win32/inputdev/virtual-key-codes
"""
return Utils.get_key_info(key)
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/actor/utils.py",
"license": "MIT License",
"lines": 162,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:browser_use/agent/judge.py | """Judge system for evaluating browser-use agent execution traces."""
import base64
import logging
from pathlib import Path
from typing import Literal
from browser_use.llm.messages import (
BaseMessage,
ContentPartImageParam,
ContentPartTextParam,
ImageURL,
SystemMessage,
UserMessage,
)
logger = logging.getLogger(__name__)
def _encode_image(image_path: str) -> str | None:
"""Encode image to base64 string."""
try:
path = Path(image_path)
if not path.exists():
return None
with open(path, 'rb') as f:
return base64.b64encode(f.read()).decode('utf-8')
except Exception as e:
logger.warning(f'Failed to encode image {image_path}: {e}')
return None
def _truncate_text(text: str, max_length: int, from_beginning: bool = False) -> str:
"""Truncate text to maximum length with eval system indicator."""
if len(text) <= max_length:
return text
if from_beginning:
return '...[text truncated]' + text[-max_length + 23 :]
else:
return text[: max_length - 23] + '...[text truncated]...'
def construct_judge_messages(
task: str,
final_result: str,
agent_steps: list[str],
screenshot_paths: list[str],
max_images: int = 10,
ground_truth: str | None = None,
use_vision: bool | Literal['auto'] = True,
) -> list[BaseMessage]:
"""
Construct messages for judge evaluation of agent trace.
Args:
task: The original task description
final_result: The final result returned to the user
agent_steps: List of formatted agent step descriptions
screenshot_paths: List of screenshot file paths
max_images: Maximum number of screenshots to include
ground_truth: Optional ground truth answer or criteria that must be satisfied for success
Returns:
List of messages for LLM judge evaluation
"""
task_truncated = _truncate_text(task, 40000)
final_result_truncated = _truncate_text(final_result, 40000)
steps_text = '\n'.join(agent_steps)
steps_text_truncated = _truncate_text(steps_text, 40000)
# Only include screenshots if use_vision is not False
encoded_images: list[ContentPartImageParam] = []
if use_vision is not False:
# Select last N screenshots
selected_screenshots = screenshot_paths[-max_images:] if len(screenshot_paths) > max_images else screenshot_paths
# Encode screenshots
for img_path in selected_screenshots:
encoded = _encode_image(img_path)
if encoded:
encoded_images.append(
ContentPartImageParam(
image_url=ImageURL(
url=f'data:image/png;base64,{encoded}',
media_type='image/png',
)
)
)
# System prompt for judge - conditionally add ground truth section
ground_truth_section = ''
if ground_truth:
ground_truth_section = """
**GROUND TRUTH VALIDATION (HIGHEST PRIORITY):**
The <ground_truth> section contains verified correct information for this task. This can be:
- **Evaluation criteria**: Specific conditions that must be met (e.g., "The success popup should show up", "Must extract exactly 5 items")
- **Factual answers**: The correct answer to a question or information retrieval task (e.g. "10/11/24", "Paris")
- **Expected outcomes**: What should happen after task completion (e.g., "Google Doc must be created", "File should be downloaded")
The ground truth takes ABSOLUTE precedence over all other evaluation criteria. If the ground truth is not satisfied by the agent's execution and final response, the verdict MUST be false.
"""
system_prompt = f"""You are an expert judge evaluating browser automation agent performance.
<evaluation_framework>
{ground_truth_section}
**PRIMARY EVALUATION CRITERIA (in order of importance):**
1. **Task Satisfaction (Most Important)**: Did the agent accomplish what the user asked for? Break down the task into the key criteria and evaluate if the agent all of them. Focus on user intent and final outcome.
2. **Output Quality**: Is the final result in the correct format and complete? Does it match exactly what was requested?
3. **Tool Effectiveness**: Did the browser interactions work as expected? Were tools used appropriately? How many % of the tools failed?
4. **Agent Reasoning**: Quality of decision-making, planning, and problem-solving throughout the trajectory.
5. **Browser Handling**: Navigation stability, error recovery, and technical execution. If the browser crashes, does not load or a captcha blocks the task, the score must be very low.
**VERDICT GUIDELINES:**
- true: Task completed as requested, human-like execution, all of the users criteria were met and the agent did not make up any information.
- false: Task not completed, or only partially completed.
**Examples of task completion verdict:**
- If task asks for 10 items and agent finds 4 items correctly: false
- If task completed to full user requirements but with some errors to improve in the trajectory: true
- If task impossible due to captcha/login requirements: false
- If the trajectory is ideal and the output is perfect: true
- If the task asks to search all headphones in amazon under $100 but the agent searches all headphones and the lowest price is $150: false
- If the task asks to research a property and create a google doc with the result but the agents only returns the results in text: false
- If the task asks to complete an action on the page, and the agent reports that the action is completed but the screenshot or page shows the action is not actually complete: false
- If the task asks to use a certain tool or site to complete the task but the agent completes the task without using it: false
- If the task asks to look for a section of a page that does not exist: false
- If the agent concludes the task is impossible but it is not: false
- If the agent concludes the task is impossible and it truly is impossible: false
- If the agent is unable to complete the task because no login information was provided and it is truly needed to complete the task: false
**FAILURE CONDITIONS (automatically set verdict to false):**
- Blocked by captcha or missing authentication
- Output format completely wrong or missing
- Infinite loops or severe technical failures
- Critical user requirements ignored
- Page not loaded
- Browser crashed
- Agent could not interact with required UI elements
- The agent moved on from a important step in the task without completing it
- The agent made up content that is not in the screenshot or the page state
- The agent calls done action before completing all key points of the task
**IMPOSSIBLE TASK DETECTION:**
Set `impossible_task` to true when the task fundamentally could not be completed due to:
- Vague or ambiguous task instructions that cannot be reasonably interpreted
- Website genuinely broken or non-functional (be conservative - temporary issues don't count)
- Required links/pages truly inaccessible (404, 403, etc.)
- Task requires authentication/login but no credentials were provided
- Task asks for functionality that doesn't exist on the target site
- Other insurmountable external obstacles beyond the agent's control
Do NOT mark as impossible if:
- Agent made poor decisions but task was achievable
- Temporary page loading issues that could be retried
- Agent didn't try the right approach
- Website works but agent struggled with it
**CAPTCHA DETECTION:**
Set `reached_captcha` to true if:
- Screenshots show captcha challenges (reCAPTCHA, hCaptcha, etc.)
- Agent reports being blocked by bot detection
- Error messages indicate captcha/verification requirements
- Any evidence the agent encountered anti-bot measures during execution
**IMPORTANT EVALUATION NOTES:**
- **evaluate for action** - For each key step of the trace, double check whether the action that the agent tried to performed actually happened. If the required action did not actually occur, the verdict should be false.
- **screenshot is not entire content** - The agent has the entire DOM content, but the screenshot is only part of the content. If the agent extracts information from the page, but you do not see it in the screenshot, you can assume this information is there.
- **Penalize poor tool usage** - Wrong tools, inefficient approaches, ignoring available information.
- **ignore unexpected dates and times** - These agent traces are from varying dates, you can assume the dates the agent uses for search or filtering are correct.
- **IMPORTANT**: be very picky about the user's request - Have very high standard for the agent completing the task exactly to the user's request.
- **IMPORTANT**: be initially doubtful of the agent's self reported success, be sure to verify that its methods are valid and fulfill the user's desires to a tee.
</evaluation_framework>
<response_format>
Respond with EXACTLY this JSON structure (no additional text before or after):
{{
"reasoning": "Breakdown of user task into key points. Detailed analysis covering: what went well, what didn't work, trajectory quality assessment, tool usage evaluation, output quality review, and overall user satisfaction prediction.",
"verdict": true or false,
"failure_reason": "Max 5 sentences explanation of why the task was not completed successfully in case of failure. If verdict is true, use an empty string.",
"impossible_task": true or false,
"reached_captcha": true or false
}}
</response_format>
"""
# Build user prompt with conditional ground truth section
ground_truth_prompt = ''
if ground_truth:
ground_truth_prompt = f"""
<ground_truth>
{ground_truth}
</ground_truth>
"""
user_prompt = f"""
<task>
{task_truncated or 'No task provided'}
</task>
{ground_truth_prompt}
<agent_trajectory>
{steps_text_truncated or 'No agent trajectory provided'}
</agent_trajectory>
<final_result>
{final_result_truncated or 'No final result provided'}
</final_result>
{len(encoded_images)} screenshots from execution are attached.
Evaluate this agent execution given the criteria and respond with the exact JSON structure requested."""
# Build messages with screenshots
content_parts: list[ContentPartTextParam | ContentPartImageParam] = [ContentPartTextParam(text=user_prompt)]
content_parts.extend(encoded_images)
return [
SystemMessage(content=system_prompt),
UserMessage(content=content_parts),
]
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/agent/judge.py",
"license": "MIT License",
"lines": 188,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
browser-use/browser-use:browser_use/agent/variable_detector.py | """Detect variables in agent history for reuse"""
import re
from browser_use.agent.views import AgentHistoryList, DetectedVariable
from browser_use.dom.views import DOMInteractedElement
def detect_variables_in_history(history: AgentHistoryList) -> dict[str, DetectedVariable]:
"""
Analyze agent history and detect reusable variables.
Uses two strategies:
1. Element attributes (id, name, type, placeholder, aria-label) - most reliable
2. Value pattern matching (email, phone, date formats) - fallback
Returns:
Dictionary mapping variable names to DetectedVariable objects
"""
detected: dict[str, DetectedVariable] = {}
detected_values: set[str] = set() # Track which values we've already detected
for step_idx, history_item in enumerate(history.history):
if not history_item.model_output:
continue
for action_idx, action in enumerate(history_item.model_output.action):
# Convert action to dict - handle both Pydantic models and dict-like objects
if hasattr(action, 'model_dump'):
action_dict = action.model_dump()
elif isinstance(action, dict):
action_dict = action
else:
# For SimpleNamespace or similar objects
action_dict = vars(action)
# Get the interacted element for this action (if available)
element = None
if history_item.state and history_item.state.interacted_element:
if len(history_item.state.interacted_element) > action_idx:
element = history_item.state.interacted_element[action_idx]
# Detect variables in this action
_detect_in_action(action_dict, element, detected, detected_values)
return detected
def _detect_in_action(
action_dict: dict,
element: DOMInteractedElement | None,
detected: dict[str, DetectedVariable],
detected_values: set[str],
) -> None:
"""Detect variables in a single action using element context"""
# Extract action type and parameters
for action_type, params in action_dict.items():
if not isinstance(params, dict):
continue
# Check fields that commonly contain variables
fields_to_check = ['text', 'query']
for field in fields_to_check:
if field not in params:
continue
value = params[field]
if not isinstance(value, str) or not value.strip():
continue
# Skip if we already detected this exact value
if value in detected_values:
continue
# Try to detect variable type (with element context)
var_info = _detect_variable_type(value, element)
if not var_info:
continue
var_name, var_format = var_info
# Ensure unique variable name
var_name = _ensure_unique_name(var_name, detected)
# Add detected variable
detected[var_name] = DetectedVariable(
name=var_name,
original_value=value,
type='string',
format=var_format,
)
detected_values.add(value)
def _detect_variable_type(
value: str,
element: DOMInteractedElement | None = None,
) -> tuple[str, str | None] | None:
"""
Detect if a value looks like a variable, using element context when available.
Priority:
1. Element attributes (id, name, type, placeholder, aria-label) - most reliable
2. Value pattern matching (email, phone, date formats) - fallback
Returns:
(variable_name, format) or None if not detected
"""
# STRATEGY 1: Use element attributes (most reliable)
if element and element.attributes:
attr_detection = _detect_from_attributes(element.attributes)
if attr_detection:
return attr_detection
# STRATEGY 2: Pattern matching on value (fallback)
return _detect_from_value_pattern(value)
def _detect_from_attributes(attributes: dict[str, str]) -> tuple[str, str | None] | None:
"""
Detect variable from element attributes.
Check attributes in priority order:
1. type attribute (HTML5 input types - most specific)
2. id, name, placeholder, aria-label (semantic hints)
"""
# Check 'type' attribute first (HTML5 input types)
input_type = attributes.get('type', '').lower()
if input_type == 'email':
return ('email', 'email')
elif input_type == 'tel':
return ('phone', 'phone')
elif input_type == 'date':
return ('date', 'date')
elif input_type == 'number':
return ('number', 'number')
elif input_type == 'url':
return ('url', 'url')
# Combine semantic attributes for keyword matching
semantic_attrs = [
attributes.get('id', ''),
attributes.get('name', ''),
attributes.get('placeholder', ''),
attributes.get('aria-label', ''),
]
combined_text = ' '.join(semantic_attrs).lower()
# Address detection
if any(keyword in combined_text for keyword in ['address', 'street', 'addr']):
if 'billing' in combined_text:
return ('billing_address', None)
elif 'shipping' in combined_text:
return ('shipping_address', None)
else:
return ('address', None)
# Comment/Note detection
if any(keyword in combined_text for keyword in ['comment', 'note', 'message', 'description']):
return ('comment', None)
# Email detection
if 'email' in combined_text or 'e-mail' in combined_text:
return ('email', 'email')
# Phone detection
if any(keyword in combined_text for keyword in ['phone', 'tel', 'mobile', 'cell']):
return ('phone', 'phone')
# Name detection (order matters - check specific before general)
if 'first' in combined_text and 'name' in combined_text:
return ('first_name', None)
elif 'last' in combined_text and 'name' in combined_text:
return ('last_name', None)
elif 'full' in combined_text and 'name' in combined_text:
return ('full_name', None)
elif 'name' in combined_text:
return ('name', None)
# Date detection
if any(keyword in combined_text for keyword in ['date', 'dob', 'birth']):
return ('date', 'date')
# City detection
if 'city' in combined_text:
return ('city', None)
# State/Province detection
if 'state' in combined_text or 'province' in combined_text:
return ('state', None)
# Country detection
if 'country' in combined_text:
return ('country', None)
# Zip code detection
if any(keyword in combined_text for keyword in ['zip', 'postal', 'postcode']):
return ('zip_code', 'postal_code')
# Company detection
if 'company' in combined_text or 'organization' in combined_text:
return ('company', None)
return None
def _detect_from_value_pattern(value: str) -> tuple[str, str | None] | None:
"""
Detect variable type from value pattern (fallback when no element context).
Patterns:
- Email: contains @ and . with valid format
- Phone: digits with separators, 10+ chars
- Date: YYYY-MM-DD format
- Name: Capitalized word(s), 2-30 chars, letters only
- Number: Pure digits, 1-9 chars
"""
# Email detection - most specific first
if '@' in value and '.' in value:
# Basic email validation
if re.match(r'^[\w\.-]+@[\w\.-]+\.\w+$', value):
return ('email', 'email')
# Phone detection (digits with separators, 10+ chars)
if re.match(r'^[\d\s\-\(\)\+]+$', value):
# Remove separators and check length
digits_only = re.sub(r'[\s\-\(\)\+]', '', value)
if len(digits_only) >= 10:
return ('phone', 'phone')
# Date detection (YYYY-MM-DD or similar)
if re.match(r'^\d{4}-\d{2}-\d{2}$', value):
return ('date', 'date')
# Name detection (capitalized, only letters/spaces, 2-30 chars)
if value and value[0].isupper() and value.replace(' ', '').replace('-', '').isalpha() and 2 <= len(value) <= 30:
words = value.split()
if len(words) == 1:
return ('first_name', None)
elif len(words) == 2:
return ('full_name', None)
else:
return ('name', None)
# Number detection (pure digits, not phone length)
if value.isdigit() and 1 <= len(value) <= 9:
return ('number', 'number')
return None
def _ensure_unique_name(base_name: str, existing: dict[str, DetectedVariable]) -> str:
"""
Ensure variable name is unique by adding suffix if needed.
Examples:
first_name → first_name
first_name (exists) → first_name_2
first_name_2 (exists) → first_name_3
"""
if base_name not in existing:
return base_name
# Add numeric suffix
counter = 2
while f'{base_name}_{counter}' in existing:
counter += 1
return f'{base_name}_{counter}'
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/agent/variable_detector.py",
"license": "MIT License",
"lines": 215,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/browser/cloud/views.py | from typing import Literal
from uuid import UUID
from pydantic import BaseModel, ConfigDict, Field
ProxyCountryCode = (
Literal[
'us', # United States
'uk', # United Kingdom
'fr', # France
'it', # Italy
'jp', # Japan
'au', # Australia
'de', # Germany
'fi', # Finland
'ca', # Canada
'in', # India
]
| str
)
# Browser session timeout limits (in minutes)
MAX_FREE_USER_SESSION_TIMEOUT = 15 # Free users limited to 15 minutes
MAX_PAID_USER_SESSION_TIMEOUT = 240 # Paid users can go up to 4 hours
# Requests
class CreateBrowserRequest(BaseModel):
"""Request to create a cloud browser instance.
Args:
cloud_profile_id: The ID of the profile to use for the session
cloud_proxy_country_code: Country code for proxy location
cloud_timeout: The timeout for the session in minutes
"""
model_config = ConfigDict(extra='forbid', populate_by_name=True)
profile_id: UUID | str | None = Field(
default=None,
alias='cloud_profile_id',
description='The ID of the profile to use for the session. Can be a UUID or a string of UUID.',
title='Cloud Profile ID',
)
proxy_country_code: ProxyCountryCode | None = Field(
default=None,
alias='cloud_proxy_country_code',
description='Country code for proxy location.',
title='Cloud Proxy Country Code',
)
timeout: int | None = Field(
ge=1,
le=MAX_PAID_USER_SESSION_TIMEOUT,
default=None,
alias='cloud_timeout',
description=f'The timeout for the session in minutes. Free users are limited to {MAX_FREE_USER_SESSION_TIMEOUT} minutes, paid users can use up to {MAX_PAID_USER_SESSION_TIMEOUT} minutes ({MAX_PAID_USER_SESSION_TIMEOUT // 60} hours).',
title='Cloud Timeout',
)
CloudBrowserParams = CreateBrowserRequest # alias for easier readability
# Responses
class CloudBrowserResponse(BaseModel):
"""Response from cloud browser API."""
id: str
status: str
liveUrl: str = Field(alias='liveUrl')
cdpUrl: str = Field(alias='cdpUrl')
timeoutAt: str = Field(alias='timeoutAt')
startedAt: str = Field(alias='startedAt')
finishedAt: str | None = Field(alias='finishedAt', default=None)
# Errors
class CloudBrowserError(Exception):
"""Exception raised when cloud browser operations fail."""
pass
class CloudBrowserAuthError(CloudBrowserError):
"""Exception raised when cloud browser authentication fails."""
pass
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/browser/cloud/views.py",
"license": "MIT License",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:browser_use/browser/session_manager.py | """Event-driven CDP session management.
Manages CDP sessions by listening to Target.attachedToTarget and Target.detachedFromTarget
events, ensuring the session pool always reflects the current browser state.
"""
import asyncio
from typing import TYPE_CHECKING
from cdp_use.cdp.target import AttachedToTargetEvent, DetachedFromTargetEvent, SessionID, TargetID
from browser_use.utils import create_task_with_error_handling
if TYPE_CHECKING:
from browser_use.browser.session import BrowserSession, CDPSession, Target
class SessionManager:
"""Event-driven CDP session manager.
Automatically synchronizes the CDP session pool with browser state via CDP events.
Key features:
- Sessions added/removed automatically via Target attach/detach events
- Multiple sessions can attach to the same target
- Targets only removed when ALL sessions detach
- No stale sessions - pool always reflects browser reality
SessionManager is the SINGLE SOURCE OF TRUTH for all targets and sessions.
"""
def __init__(self, browser_session: 'BrowserSession'):
self.browser_session = browser_session
self.logger = browser_session.logger
# All targets (entities: pages, iframes, workers)
self._targets: dict[TargetID, 'Target'] = {}
# All sessions (communication channels)
self._sessions: dict[SessionID, 'CDPSession'] = {}
# Mapping: target -> sessions attached to it
self._target_sessions: dict[TargetID, set[SessionID]] = {}
# Reverse mapping: session -> target it belongs to
self._session_to_target: dict[SessionID, TargetID] = {}
self._lock = asyncio.Lock()
self._recovery_lock = asyncio.Lock()
# Focus recovery coordination - event-driven instead of polling
self._recovery_in_progress: bool = False
self._recovery_complete_event: asyncio.Event | None = None
self._recovery_task: asyncio.Task | None = None
async def start_monitoring(self) -> None:
"""Start monitoring Target attach/detach events.
Registers CDP event handlers to keep the session pool synchronized with browser state.
Also discovers and initializes all existing targets on startup.
"""
if not self.browser_session._cdp_client_root:
raise RuntimeError('CDP client not initialized')
# Capture cdp_client_root in closure to avoid type errors
cdp_client = self.browser_session._cdp_client_root
# Enable target discovery to receive targetInfoChanged events automatically
# This eliminates the need for getTargetInfo() polling calls
await cdp_client.send.Target.setDiscoverTargets(
params={'discover': True, 'filter': [{'type': 'page'}, {'type': 'iframe'}]}
)
# Register synchronous event handlers (CDP requirement)
def on_attached(event: AttachedToTargetEvent, session_id: SessionID | None = None):
# _handle_target_attached() handles:
# - setAutoAttach for children
# - Create CDPSession
# - Enable monitoring (for pages/tabs)
# - Add to pool
create_task_with_error_handling(
self._handle_target_attached(event),
name='handle_target_attached',
logger_instance=self.logger,
suppress_exceptions=True,
)
def on_detached(event: DetachedFromTargetEvent, session_id: SessionID | None = None):
create_task_with_error_handling(
self._handle_target_detached(event),
name='handle_target_detached',
logger_instance=self.logger,
suppress_exceptions=True,
)
def on_target_info_changed(event, session_id: SessionID | None = None):
# Update session info from targetInfoChanged events (no polling needed!)
create_task_with_error_handling(
self._handle_target_info_changed(event),
name='handle_target_info_changed',
logger_instance=self.logger,
suppress_exceptions=True,
)
cdp_client.register.Target.attachedToTarget(on_attached)
cdp_client.register.Target.detachedFromTarget(on_detached)
cdp_client.register.Target.targetInfoChanged(on_target_info_changed)
self.logger.debug('[SessionManager] Event monitoring started')
# Discover and initialize ALL existing targets
await self._initialize_existing_targets()
def _get_session_for_target(self, target_id: TargetID) -> 'CDPSession | None':
"""Internal: Get ANY valid session for a target (picks first available).
⚠️ INTERNAL API - Use browser_session.get_or_create_cdp_session() instead!
This method has no validation, no focus management, no recovery.
Args:
target_id: Target ID to get session for
Returns:
CDPSession if exists, None if target has detached
"""
session_ids = self._target_sessions.get(target_id, set())
if not session_ids:
# Check if this is the focused target - indicates stale focus that needs cleanup
if self.browser_session.agent_focus_target_id == target_id:
self.logger.warning(
f'[SessionManager] ⚠️ Attempted to get session for stale focused target {target_id[:8]}... '
f'Clearing stale focus and triggering recovery.'
)
# Clear stale focus immediately (defense in depth)
self.browser_session.agent_focus_target_id = None
# Trigger recovery if not already in progress
if not self._recovery_in_progress:
self.logger.warning('[SessionManager] Recovery was not in progress! Triggering now.')
self._recovery_task = create_task_with_error_handling(
self._recover_agent_focus(target_id),
name='recover_agent_focus_from_stale_get',
logger_instance=self.logger,
suppress_exceptions=False,
)
return None
return self._sessions.get(next(iter(session_ids)))
def get_all_page_targets(self) -> list:
"""Get all page/tab targets using owned data.
Returns:
List of Target objects for all page/tab targets
"""
page_targets = []
for target in self._targets.values():
if target.target_type in ('page', 'tab'):
page_targets.append(target)
return page_targets
async def validate_session(self, target_id: TargetID) -> bool:
"""Check if a target still has active sessions.
Args:
target_id: Target ID to validate
Returns:
True if target has active sessions, False if it should be removed
"""
if target_id not in self._target_sessions:
return False
return len(self._target_sessions[target_id]) > 0
async def clear(self) -> None:
"""Clear all owned data structures for cleanup."""
async with self._lock:
# Clear owned data (single source of truth)
self._targets.clear()
self._sessions.clear()
self._target_sessions.clear()
self._session_to_target.clear()
self.logger.info('[SessionManager] Cleared all owned data (targets, sessions, mappings)')
async def is_target_valid(self, target_id: TargetID) -> bool:
"""Check if a target is still valid and has active sessions.
Args:
target_id: Target ID to validate
Returns:
True if target is valid and has active sessions, False otherwise
"""
if target_id not in self._target_sessions:
return False
return len(self._target_sessions[target_id]) > 0
def get_target_id_from_session_id(self, session_id: SessionID) -> TargetID | None:
"""Look up which target a session belongs to.
Args:
session_id: The session ID to look up
Returns:
Target ID if found, None otherwise
"""
return self._session_to_target.get(session_id)
def get_target(self, target_id: TargetID) -> 'Target | None':
"""Get target from owned data.
Args:
target_id: Target ID to get
Returns:
Target object if found, None otherwise
"""
return self._targets.get(target_id)
def get_all_targets(self) -> dict[TargetID, 'Target']:
"""Get all targets (read-only access to owned data).
Returns:
Dict mapping target_id to Target objects
"""
return self._targets
def get_all_target_ids(self) -> list[TargetID]:
"""Get all target IDs from owned data.
Returns:
List of all target IDs
"""
return list(self._targets.keys())
def get_all_sessions(self) -> dict[SessionID, 'CDPSession']:
"""Get all sessions (read-only access to owned data).
Returns:
Dict mapping session_id to CDPSession objects
"""
return self._sessions
def get_session(self, session_id: SessionID) -> 'CDPSession | None':
"""Get session from owned data.
Args:
session_id: Session ID to get
Returns:
CDPSession object if found, None otherwise
"""
return self._sessions.get(session_id)
def get_all_sessions_for_target(self, target_id: TargetID) -> list['CDPSession']:
"""Get ALL sessions attached to a target from owned data.
Args:
target_id: Target ID to get sessions for
Returns:
List of all CDPSession objects for this target
"""
session_ids = self._target_sessions.get(target_id, set())
return [self._sessions[sid] for sid in session_ids if sid in self._sessions]
def get_target_sessions_mapping(self) -> dict[TargetID, set[SessionID]]:
"""Get target->sessions mapping (read-only access).
Returns:
Dict mapping target_id to set of session_ids
"""
return self._target_sessions
def get_focused_target(self) -> 'Target | None':
"""Get the target that currently has agent focus.
Convenience method that uses browser_session.agent_focus_target_id.
Returns:
Target object if agent has focus, None otherwise
"""
if not self.browser_session.agent_focus_target_id:
return None
return self.get_target(self.browser_session.agent_focus_target_id)
async def ensure_valid_focus(self, timeout: float = 3.0) -> bool:
"""Ensure agent_focus_target_id points to a valid, attached CDP session.
If the focus target is stale (detached), this method waits for automatic recovery.
Uses event-driven coordination instead of polling for efficiency.
Args:
timeout: Maximum time to wait for recovery in seconds (default: 3.0)
Returns:
True if focus is valid or successfully recovered, False if no focus or recovery failed
"""
if not self.browser_session.agent_focus_target_id:
# No focus at all - might be initial state or complete failure
if self._recovery_in_progress and self._recovery_complete_event:
# Recovery is happening, wait for it
try:
await asyncio.wait_for(self._recovery_complete_event.wait(), timeout=timeout)
# Check again after recovery - simple existence check
focus_id = self.browser_session.agent_focus_target_id
return bool(focus_id and self._get_session_for_target(focus_id))
except TimeoutError:
self.logger.error(f'[SessionManager] ❌ Timed out waiting for recovery after {timeout}s')
return False
return False
# Simple existence check - does the focused target have a session?
cdp_session = self._get_session_for_target(self.browser_session.agent_focus_target_id)
if cdp_session:
# Session exists - validate it's still active
is_valid = await self.validate_session(self.browser_session.agent_focus_target_id)
if is_valid:
return True
# Focus is stale - wait for recovery using event instead of polling
stale_target_id = self.browser_session.agent_focus_target_id
self.logger.warning(
f'[SessionManager] ⚠️ Stale agent_focus detected (target {stale_target_id[:8] if stale_target_id else "None"}... detached), '
f'waiting for recovery...'
)
# Check if recovery is already in progress
if not self._recovery_in_progress:
self.logger.warning(
'[SessionManager] ⚠️ Recovery not in progress for stale focus! '
'This indicates a bug - recovery should have been triggered.'
)
return False
# Wait for recovery complete event (event-driven, not polling!)
if self._recovery_complete_event:
try:
start_time = asyncio.get_event_loop().time()
await asyncio.wait_for(self._recovery_complete_event.wait(), timeout=timeout)
elapsed = asyncio.get_event_loop().time() - start_time
# Verify recovery succeeded - simple existence check
focus_id = self.browser_session.agent_focus_target_id
if focus_id and self._get_session_for_target(focus_id):
self.logger.info(
f'[SessionManager] ✅ Agent focus recovered to {self.browser_session.agent_focus_target_id[:8]}... '
f'after {elapsed * 1000:.0f}ms'
)
return True
else:
self.logger.error(
f'[SessionManager] ❌ Recovery completed but focus still invalid after {elapsed * 1000:.0f}ms'
)
return False
except TimeoutError:
self.logger.error(
f'[SessionManager] ❌ Recovery timed out after {timeout}s '
f'(was: {stale_target_id[:8] if stale_target_id else "None"}..., '
f'now: {self.browser_session.agent_focus_target_id[:8] if self.browser_session.agent_focus_target_id else "None"})'
)
return False
else:
self.logger.error('[SessionManager] ❌ Recovery event not initialized')
return False
async def _handle_target_attached(self, event: AttachedToTargetEvent) -> None:
"""Handle Target.attachedToTarget event.
Called automatically by Chrome when a new target/session is created.
This is the ONLY place where sessions are added to the pool.
"""
target_id = event['targetInfo']['targetId']
session_id = event['sessionId']
target_type = event['targetInfo']['type']
target_info = event['targetInfo']
waiting_for_debugger = event.get('waitingForDebugger', False)
self.logger.debug(
f'[SessionManager] Target attached: {target_id[:8]}... (session={session_id[:8]}..., '
f'type={target_type}, waitingForDebugger={waiting_for_debugger})'
)
# Defensive check: browser may be shutting down and _cdp_client_root could be None
if self.browser_session._cdp_client_root is None:
self.logger.debug(
f'[SessionManager] Skipping target attach for {target_id[:8]}... - browser shutting down (no CDP client)'
)
return
# Enable auto-attach for this session's children (do this FIRST, outside lock)
try:
await self.browser_session._cdp_client_root.send.Target.setAutoAttach(
params={'autoAttach': True, 'waitForDebuggerOnStart': False, 'flatten': True}, session_id=session_id
)
except Exception as e:
error_str = str(e)
# Expected for short-lived targets (workers, temp iframes) that detach before this executes
if '-32001' not in error_str and 'Session with given id not found' not in error_str:
self.logger.debug(f'[SessionManager] Auto-attach failed for {target_type}: {e}')
async with self._lock:
# Track this session for the target
if target_id not in self._target_sessions:
self._target_sessions[target_id] = set()
self._target_sessions[target_id].add(session_id)
self._session_to_target[session_id] = target_id
# Create or update Target (source of truth for url/title)
if target_id not in self._targets:
from browser_use.browser.session import Target
target = Target(
target_id=target_id,
target_type=target_type,
url=target_info.get('url', 'about:blank'),
title=target_info.get('title', 'Unknown title'),
)
self._targets[target_id] = target
self.logger.debug(f'[SessionManager] Created target {target_id[:8]}... (type={target_type})')
else:
# Update existing target info
existing_target = self._targets[target_id]
existing_target.url = target_info.get('url', existing_target.url)
existing_target.title = target_info.get('title', existing_target.title)
# Create CDPSession (communication channel)
from browser_use.browser.session import CDPSession
assert self.browser_session._cdp_client_root is not None, 'Root CDP client required'
cdp_session = CDPSession(
cdp_client=self.browser_session._cdp_client_root,
target_id=target_id,
session_id=session_id,
)
# Add to sessions dict
self._sessions[session_id] = cdp_session
# If proxy auth is configured, enable Fetch auth handling on this session
# Avoids overwriting Target.attachedToTarget handlers elsewhere
try:
proxy_cfg = self.browser_session.browser_profile.proxy
username = proxy_cfg.username if proxy_cfg else None
password = proxy_cfg.password if proxy_cfg else None
if username and password:
await cdp_session.cdp_client.send.Fetch.enable(
params={'handleAuthRequests': True},
session_id=cdp_session.session_id,
)
self.logger.debug(f'[SessionManager] Fetch.enable(handleAuthRequests=True) on session {session_id[:8]}...')
except Exception as e:
self.logger.debug(f'[SessionManager] Fetch.enable on attached session failed: {type(e).__name__}: {e}')
self.logger.debug(
f'[SessionManager] Created session {session_id[:8]}... for target {target_id[:8]}... '
f'(total sessions: {len(self._sessions)})'
)
# Enable lifecycle events and network monitoring for page targets
if target_type in ('page', 'tab'):
await self._enable_page_monitoring(cdp_session)
# Resume execution if waiting for debugger
if waiting_for_debugger:
try:
assert self.browser_session._cdp_client_root is not None
await self.browser_session._cdp_client_root.send.Runtime.runIfWaitingForDebugger(session_id=session_id)
except Exception as e:
self.logger.warning(f'[SessionManager] Failed to resume execution: {e}')
async def _handle_target_info_changed(self, event: dict) -> None:
"""Handle Target.targetInfoChanged event.
Updates target title/URL without polling getTargetInfo().
Chrome fires this automatically when title or URL changes.
"""
target_info = event.get('targetInfo', {})
target_id = target_info.get('targetId')
if not target_id:
return
async with self._lock:
# Update target if it exists (source of truth for url/title)
if target_id in self._targets:
target = self._targets[target_id]
target.title = target_info.get('title', target.title)
target.url = target_info.get('url', target.url)
async def _handle_target_detached(self, event: DetachedFromTargetEvent) -> None:
"""Handle Target.detachedFromTarget event.
Called automatically by Chrome when a target/session is destroyed.
This is the ONLY place where sessions are removed from the pool.
"""
session_id = event['sessionId']
target_id = event.get('targetId') # May be empty
# If targetId not in event, look it up via session mapping
if not target_id:
async with self._lock:
target_id = self._session_to_target.get(session_id)
if not target_id:
self.logger.warning(f'[SessionManager] Session detached but target unknown (session={session_id[:8]}...)')
return
agent_focus_lost = False
target_fully_removed = False
target_type = None
async with self._lock:
# Remove this session from target's session set
if target_id in self._target_sessions:
self._target_sessions[target_id].discard(session_id)
remaining_sessions = len(self._target_sessions[target_id])
self.logger.debug(
f'[SessionManager] Session detached: target={target_id[:8]}... '
f'session={session_id[:8]}... (remaining={remaining_sessions})'
)
# Only remove target when NO sessions remain
if remaining_sessions == 0:
self.logger.debug(f'[SessionManager] No sessions remain for target {target_id[:8]}..., removing target')
target_fully_removed = True
# Check if agent_focus points to this target
agent_focus_lost = self.browser_session.agent_focus_target_id == target_id
# Immediately clear stale focus to prevent operations on detached target
if agent_focus_lost:
self.logger.debug(
f'[SessionManager] Clearing stale agent_focus_target_id {target_id[:8]}... '
f'to prevent operations on detached target'
)
self.browser_session.agent_focus_target_id = None
# Get target type before removing (needed for TabClosedEvent dispatch)
target = self._targets.get(target_id)
target_type = target.target_type if target else None
# Remove target (entity) from owned data
if target_id in self._targets:
self._targets.pop(target_id)
self.logger.debug(
f'[SessionManager] Removed target {target_id[:8]}... (remaining targets: {len(self._targets)})'
)
# Clean up tracking
del self._target_sessions[target_id]
else:
# Target not tracked - already removed or never attached
self.logger.debug(
f'[SessionManager] Session detached from untracked target: target={target_id[:8]}... '
f'session={session_id[:8]}... (target was already removed or attach event was missed)'
)
# Remove session from owned sessions dict
if session_id in self._sessions:
self._sessions.pop(session_id)
self.logger.debug(
f'[SessionManager] Removed session {session_id[:8]}... (remaining sessions: {len(self._sessions)})'
)
# Remove from reverse mapping
if session_id in self._session_to_target:
del self._session_to_target[session_id]
# Dispatch TabClosedEvent only for page/tab targets that are fully removed (not iframes/workers or partial detaches)
if target_fully_removed:
if target_type in ('page', 'tab'):
from browser_use.browser.events import TabClosedEvent
self.browser_session.event_bus.dispatch(TabClosedEvent(target_id=target_id))
self.logger.debug(f'[SessionManager] Dispatched TabClosedEvent for page target {target_id[:8]}...')
elif target_type:
self.logger.debug(
f'[SessionManager] Target {target_id[:8]}... fully removed (type={target_type}) - not dispatching TabClosedEvent'
)
# Auto-recover agent_focus outside the lock to avoid blocking other operations
if agent_focus_lost:
# Create recovery task instead of awaiting directly - allows concurrent operations to wait on same recovery
if not self._recovery_in_progress:
self._recovery_task = create_task_with_error_handling(
self._recover_agent_focus(target_id),
name='recover_agent_focus',
logger_instance=self.logger,
suppress_exceptions=False,
)
async def _recover_agent_focus(self, crashed_target_id: TargetID) -> None:
"""Auto-recover agent_focus when the focused target crashes/detaches.
Uses recovery lock to prevent concurrent recovery attempts from creating multiple emergency tabs.
Coordinates with ensure_valid_focus() via events for efficient waiting.
Args:
crashed_target_id: The target ID that was lost
"""
try:
# Prevent concurrent recovery attempts
async with self._recovery_lock:
# Set recovery state INSIDE lock to prevent race conditions
if self._recovery_in_progress:
self.logger.debug('[SessionManager] Recovery already in progress, waiting for it to complete')
# Wait for ongoing recovery instead of starting a new one
if self._recovery_complete_event:
try:
await asyncio.wait_for(self._recovery_complete_event.wait(), timeout=5.0)
except TimeoutError:
self.logger.error('[SessionManager] Timed out waiting for ongoing recovery')
return
# Set recovery state
self._recovery_in_progress = True
self._recovery_complete_event = asyncio.Event()
if self.browser_session._cdp_client_root is None:
self.logger.debug('[SessionManager] Skipping focus recovery - browser shutting down (no CDP client)')
return
# Check if another recovery already fixed agent_focus
if self.browser_session.agent_focus_target_id and self.browser_session.agent_focus_target_id != crashed_target_id:
self.logger.debug(
f'[SessionManager] Agent focus already recovered by concurrent operation '
f'(now: {self.browser_session.agent_focus_target_id[:8]}...), skipping recovery'
)
return
# Note: agent_focus_target_id may already be None (cleared in _handle_target_detached)
current_focus_desc = (
f'{self.browser_session.agent_focus_target_id[:8]}...'
if self.browser_session.agent_focus_target_id
else 'None (already cleared)'
)
self.logger.warning(
f'[SessionManager] Agent focus target {crashed_target_id[:8]}... detached! '
f'Current focus: {current_focus_desc}. Auto-recovering by switching to another target...'
)
# Perform recovery (outside lock to allow concurrent operations)
# Try to find another valid page target
page_targets = self.get_all_page_targets()
new_target_id = None
is_existing_tab = False
if page_targets:
# Switch to most recent page that's not the crashed one
new_target_id = page_targets[-1].target_id
is_existing_tab = True
self.logger.info(f'[SessionManager] Switching agent_focus to existing tab {new_target_id[:8]}...')
else:
# No pages exist - create a new one
self.logger.warning('[SessionManager] No tabs remain! Creating new tab for agent...')
new_target_id = await self.browser_session._cdp_create_new_page('about:blank')
self.logger.info(f'[SessionManager] Created new tab {new_target_id[:8]}... for agent')
# Dispatch TabCreatedEvent so watchdogs can initialize
from browser_use.browser.events import TabCreatedEvent
self.browser_session.event_bus.dispatch(TabCreatedEvent(url='about:blank', target_id=new_target_id))
# Wait for CDP attach event to create session
# Note: This polling is necessary - waiting for external Chrome CDP event
# _handle_target_attached will add session to pool when Chrome fires attachedToTarget
new_session = None
for attempt in range(20): # Wait up to 2 seconds
await asyncio.sleep(0.1)
new_session = self._get_session_for_target(new_target_id)
if new_session:
break
if new_session:
self.browser_session.agent_focus_target_id = new_target_id
self.logger.info(f'[SessionManager] ✅ Agent focus recovered: {new_target_id[:8]}...')
# Visually activate the tab in browser (only for existing tabs)
if is_existing_tab:
try:
assert self.browser_session._cdp_client_root is not None
await self.browser_session._cdp_client_root.send.Target.activateTarget(params={'targetId': new_target_id})
self.logger.debug(f'[SessionManager] Activated tab {new_target_id[:8]}... in browser UI')
except Exception as e:
self.logger.debug(f'[SessionManager] Failed to activate tab visually: {e}')
# Get target to access url (from owned data)
target = self.get_target(new_target_id)
target_url = target.url if target else 'about:blank'
# Dispatch focus changed event
from browser_use.browser.events import AgentFocusChangedEvent
self.browser_session.event_bus.dispatch(AgentFocusChangedEvent(target_id=new_target_id, url=target_url))
return
# Recovery failed - create emergency fallback tab
self.logger.error(
f'[SessionManager] ❌ Failed to get session for {new_target_id[:8]}... after 2s, creating emergency fallback tab'
)
fallback_target_id = await self.browser_session._cdp_create_new_page('about:blank')
self.logger.warning(f'[SessionManager] Created emergency fallback tab {fallback_target_id[:8]}...')
# Try one more time with fallback
# Note: This polling is necessary - waiting for external Chrome CDP event
for _ in range(20):
await asyncio.sleep(0.1)
fallback_session = self._get_session_for_target(fallback_target_id)
if fallback_session:
self.browser_session.agent_focus_target_id = fallback_target_id
self.logger.warning(f'[SessionManager] ⚠️ Agent focus set to emergency fallback: {fallback_target_id[:8]}...')
from browser_use.browser.events import AgentFocusChangedEvent, TabCreatedEvent
self.browser_session.event_bus.dispatch(TabCreatedEvent(url='about:blank', target_id=fallback_target_id))
self.browser_session.event_bus.dispatch(
AgentFocusChangedEvent(target_id=fallback_target_id, url='about:blank')
)
return
# Complete failure - this should never happen
self.logger.critical(
'[SessionManager] 🚨 CRITICAL: Failed to recover agent_focus even with fallback! Agent may be in broken state.'
)
except Exception as e:
self.logger.error(f'[SessionManager] ❌ Error during agent_focus recovery: {type(e).__name__}: {e}')
finally:
# Always signal completion and reset recovery state
# This allows all waiting operations to proceed (success or failure)
if self._recovery_complete_event:
self._recovery_complete_event.set()
self._recovery_in_progress = False
self._recovery_task = None
self.logger.debug('[SessionManager] Recovery state reset')
async def _initialize_existing_targets(self) -> None:
"""Discover and initialize all existing targets at startup.
Attaches to each target and initializes it SYNCHRONOUSLY.
Chrome will also fire attachedToTarget events, but _handle_target_attached() is
idempotent (checks if target already in pool), so duplicate handling is safe.
This eliminates race conditions - monitoring is guaranteed ready before navigation.
"""
cdp_client = self.browser_session._cdp_client_root
assert cdp_client is not None
# Get all existing targets
targets_result = await cdp_client.send.Target.getTargets()
existing_targets = targets_result.get('targetInfos', [])
self.logger.debug(f'[SessionManager] Discovered {len(existing_targets)} existing targets')
# Track target IDs for verification
target_ids_to_wait_for = []
# Just attach to ALL existing targets - Chrome fires attachedToTarget events
# The on_attached handler (via create_task) does ALL the work
for target in existing_targets:
target_id = target['targetId']
target_type = target.get('type', 'unknown')
try:
# Just attach - event handler does everything
await cdp_client.send.Target.attachToTarget(params={'targetId': target_id, 'flatten': True})
target_ids_to_wait_for.append(target_id)
except Exception as e:
self.logger.debug(
f'[SessionManager] Failed to attach to existing target {target_id[:8]}... (type={target_type}): {e}'
)
# Wait for event handlers to complete their work (they run via create_task)
# Use event-driven approach instead of polling for better performance
ready_event = asyncio.Event()
async def check_all_ready():
"""Check if all sessions are ready and signal completion."""
while True:
ready_count = 0
for tid in target_ids_to_wait_for:
session = self._get_session_for_target(tid)
if session:
target = self._targets.get(tid)
target_type = target.target_type if target else 'unknown'
# For pages, verify monitoring is enabled
if target_type in ('page', 'tab'):
if hasattr(session, '_lifecycle_events') and session._lifecycle_events is not None:
ready_count += 1
else:
# Non-page targets don't need monitoring
ready_count += 1
if ready_count == len(target_ids_to_wait_for):
ready_event.set()
return
await asyncio.sleep(0.05)
# Start checking in background
check_task = create_task_with_error_handling(
check_all_ready(), name='check_all_targets_ready', logger_instance=self.logger
)
try:
# Wait for completion with timeout
await asyncio.wait_for(ready_event.wait(), timeout=2.0)
except TimeoutError:
# Timeout - count what's ready
ready_count = 0
for tid in target_ids_to_wait_for:
session = self._get_session_for_target(tid)
if session:
target = self._targets.get(tid)
target_type = target.target_type if target else 'unknown'
# For pages, verify monitoring is enabled
if target_type in ('page', 'tab'):
if hasattr(session, '_lifecycle_events') and session._lifecycle_events is not None:
ready_count += 1
else:
# Non-page targets don't need monitoring
ready_count += 1
self.logger.warning(
f'[SessionManager] Initialization timeout after 2.0s: {ready_count}/{len(target_ids_to_wait_for)} sessions ready'
)
finally:
check_task.cancel()
try:
await check_task
except asyncio.CancelledError:
pass
async def _enable_page_monitoring(self, cdp_session: 'CDPSession') -> None:
"""Enable lifecycle events and network monitoring for a page target.
This is called once per page when it's created, avoiding handler accumulation.
Registers a SINGLE lifecycle handler per session that stores events for navigations to consume.
Args:
cdp_session: The CDP session to enable monitoring on
"""
try:
# Enable Page domain first (required for lifecycle events)
await cdp_session.cdp_client.send.Page.enable(session_id=cdp_session.session_id)
# Enable lifecycle events (load, DOMContentLoaded, networkIdle, etc.)
await cdp_session.cdp_client.send.Page.setLifecycleEventsEnabled(
params={'enabled': True}, session_id=cdp_session.session_id
)
# Enable network monitoring for networkIdle detection
await cdp_session.cdp_client.send.Network.enable(session_id=cdp_session.session_id)
# Initialize lifecycle event storage for this session (thread-safe)
from collections import deque
cdp_session._lifecycle_events = deque(maxlen=50) # Keep last 50 events
cdp_session._lifecycle_lock = asyncio.Lock()
# Register ONE handler per session that stores events
def on_lifecycle_event(event, session_id=None):
event_name = event.get('name', 'unknown')
event_loader_id = event.get('loaderId', 'none')
# Find which target this session belongs to
target_id_from_event = None
if session_id:
target_id_from_event = self.get_target_id_from_session_id(session_id)
# Check if this event is for our target
if target_id_from_event == cdp_session.target_id:
# Store event for navigations to consume
event_data = {
'name': event_name,
'loaderId': event_loader_id,
'timestamp': asyncio.get_event_loop().time(),
}
# Append is atomic in CPython
try:
cdp_session._lifecycle_events.append(event_data)
except Exception as e:
# Only log errors, not every event
self.logger.error(f'[SessionManager] Failed to store lifecycle event: {e}')
# Register the handler ONCE (this is the only place we register)
cdp_session.cdp_client.register.Page.lifecycleEvent(on_lifecycle_event)
except Exception as e:
# Don't fail - target might be short-lived or already detached
error_str = str(e)
if '-32001' in error_str or 'Session with given id not found' in error_str:
self.logger.debug(
f'[SessionManager] Target {cdp_session.target_id[:8]}... detached before monitoring could be enabled (normal for short-lived targets)'
)
else:
self.logger.warning(
f'[SessionManager] Failed to enable monitoring for target {cdp_session.target_id[:8]}...: {e}'
)
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/browser/session_manager.py",
"license": "MIT License",
"lines": 740,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/code_use/formatting.py | """Browser state formatting helpers for code-use agent."""
import logging
from typing import Any
from browser_use.browser.session import BrowserSession
from browser_use.browser.views import BrowserStateSummary
logger = logging.getLogger(__name__)
async def format_browser_state_for_llm(
state: BrowserStateSummary,
namespace: dict[str, Any],
browser_session: BrowserSession,
) -> str:
"""
Format browser state summary for LLM consumption in code-use mode.
Args:
state: Browser state summary from browser_session.get_browser_state_summary()
namespace: The code execution namespace (for showing available variables)
browser_session: Browser session for additional checks (jQuery, etc.)
Returns:
Formatted browser state text for LLM
"""
assert state.dom_state is not None
dom_state = state.dom_state
# Use eval_representation (compact serializer for code agents)
dom_html = dom_state.eval_representation()
if dom_html == '':
dom_html = 'Empty DOM tree (you might have to wait for the page to load)'
# Format with URL and title header
lines = ['## Browser State']
lines.append(f'**URL:** {state.url}')
lines.append(f'**Title:** {state.title}')
lines.append('')
# Add tabs info if multiple tabs exist
if len(state.tabs) > 1:
lines.append('**Tabs:**')
current_target_candidates = []
# Find tabs that match current URL and title
for tab in state.tabs:
if tab.url == state.url and tab.title == state.title:
current_target_candidates.append(tab.target_id)
current_target_id = current_target_candidates[0] if len(current_target_candidates) == 1 else None
for tab in state.tabs:
is_current = ' (current)' if tab.target_id == current_target_id else ''
lines.append(f' - Tab {tab.target_id[-4:]}: {tab.url} - {tab.title[:30]}{is_current}')
lines.append('')
# Add page scroll info if available
if state.page_info:
pi = state.page_info
pages_above = pi.pixels_above / pi.viewport_height if pi.viewport_height > 0 else 0
pages_below = pi.pixels_below / pi.viewport_height if pi.viewport_height > 0 else 0
total_pages = pi.page_height / pi.viewport_height if pi.viewport_height > 0 else 0
scroll_info = f'**Page:** {pages_above:.1f} pages above, {pages_below:.1f} pages below'
if total_pages > 1.2: # Only mention total if significantly > 1 page
scroll_info += f', {total_pages:.1f} total pages'
lines.append(scroll_info)
lines.append('')
# Add network loading info if there are pending requests
if state.pending_network_requests:
# Remove duplicates by URL (keep first occurrence with earliest duration)
seen_urls = set()
unique_requests = []
for req in state.pending_network_requests:
if req.url not in seen_urls:
seen_urls.add(req.url)
unique_requests.append(req)
lines.append(f'**⏳ Loading:** {len(unique_requests)} network requests still loading')
# Show up to 20 unique requests with truncated URLs (30 chars max)
for req in unique_requests[:20]:
duration_sec = req.loading_duration_ms / 1000
url_display = req.url if len(req.url) <= 30 else req.url[:27] + '...'
logger.info(f' - [{duration_sec:.1f}s] {url_display}')
lines.append(f' - [{duration_sec:.1f}s] {url_display}')
if len(unique_requests) > 20:
lines.append(f' - ... and {len(unique_requests) - 20} more')
lines.append('**Tip:** Content may still be loading. Consider waiting with `await asyncio.sleep(1)` if data is missing.')
lines.append('')
# Add available variables and functions BEFORE DOM structure
# Show useful utilities (json, asyncio, etc.) and user-defined vars, but hide system objects
skip_vars = {
'browser',
'file_system', # System objects
'np',
'pd',
'plt',
'numpy',
'pandas',
'matplotlib',
'requests',
'BeautifulSoup',
'bs4',
'pypdf',
'PdfReader',
'wait',
}
# Highlight code block variables separately from regular variables
code_block_vars = []
regular_vars = []
tracked_code_blocks = namespace.get('_code_block_vars', set())
for name in namespace.keys():
# Skip private vars and system objects/actions
if not name.startswith('_') and name not in skip_vars:
if name in tracked_code_blocks:
code_block_vars.append(name)
else:
regular_vars.append(name)
# Sort for consistent display
available_vars_sorted = sorted(regular_vars)
code_block_vars_sorted = sorted(code_block_vars)
# Build available line with code blocks and variables
parts = []
if code_block_vars_sorted:
# Show detailed info for code block variables
code_block_details = []
for var_name in code_block_vars_sorted:
value = namespace.get(var_name)
if value is not None:
type_name = type(value).__name__
value_str = str(value) if not isinstance(value, str) else value
# Check if it's a function (starts with "(function" or "(async function")
is_function = value_str.strip().startswith('(function') or value_str.strip().startswith('(async function')
if is_function:
# For functions, only show name and type
detail = f'{var_name}({type_name})'
else:
# For non-functions, show first and last 20 chars
first_20 = value_str[:20].replace('\n', '\\n').replace('\t', '\\t')
last_20 = value_str[-20:].replace('\n', '\\n').replace('\t', '\\t') if len(value_str) > 20 else ''
if last_20 and first_20 != last_20:
detail = f'{var_name}({type_name}): "{first_20}...{last_20}"'
else:
detail = f'{var_name}({type_name}): "{first_20}"'
code_block_details.append(detail)
parts.append(f'**Code block variables:** {" | ".join(code_block_details)}')
if available_vars_sorted:
parts.append(f'**Variables:** {", ".join(available_vars_sorted)}')
lines.append(f'**Available:** {" | ".join(parts)}')
lines.append('')
# Add DOM structure
lines.append('**DOM Structure:**')
# Add scroll position hints for DOM
if state.page_info:
pi = state.page_info
pages_above = pi.pixels_above / pi.viewport_height if pi.viewport_height > 0 else 0
pages_below = pi.pixels_below / pi.viewport_height if pi.viewport_height > 0 else 0
if pages_above > 0:
dom_html = f'... {pages_above:.1f} pages above \n{dom_html}'
else:
dom_html = '[Start of page]\n' + dom_html
if pages_below <= 0:
dom_html += '\n[End of page]'
# Truncate DOM if too long and notify LLM
max_dom_length = 60000
if len(dom_html) > max_dom_length:
lines.append(dom_html[:max_dom_length])
lines.append(
f'\n[DOM truncated after {max_dom_length} characters. Full page contains {len(dom_html)} characters total. Use evaluate to explore more.]'
)
else:
lines.append(dom_html)
browser_state_text = '\n'.join(lines)
return browser_state_text
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/code_use/formatting.py",
"license": "MIT License",
"lines": 160,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/code_use/namespace.py | """Namespace initialization for code-use mode.
This module creates a namespace with all browser tools available as functions,
similar to a Jupyter notebook environment.
"""
import asyncio
import csv
import datetime
import json
import logging
import re
from pathlib import Path
from typing import Any
import requests
from browser_use.browser import BrowserSession
from browser_use.filesystem.file_system import FileSystem
from browser_use.llm.base import BaseChatModel
from browser_use.tools.service import CodeAgentTools, Tools
logger = logging.getLogger(__name__)
# Try to import optional data science libraries
try:
import numpy as np # type: ignore
NUMPY_AVAILABLE = True
except ImportError:
NUMPY_AVAILABLE = False
try:
import pandas as pd # type: ignore
PANDAS_AVAILABLE = True
except ImportError:
PANDAS_AVAILABLE = False
try:
import matplotlib.pyplot as plt # type: ignore
MATPLOTLIB_AVAILABLE = True
except ImportError:
MATPLOTLIB_AVAILABLE = False
try:
from bs4 import BeautifulSoup # type: ignore
BS4_AVAILABLE = True
except ImportError:
BS4_AVAILABLE = False
try:
from pypdf import PdfReader # type: ignore
PYPDF_AVAILABLE = True
except ImportError:
PYPDF_AVAILABLE = False
try:
from tabulate import tabulate # type: ignore
TABULATE_AVAILABLE = True
except ImportError:
TABULATE_AVAILABLE = False
def _strip_js_comments(js_code: str) -> str:
"""
Remove JavaScript comments before CDP evaluation.
CDP's Runtime.evaluate doesn't handle comments in all contexts.
Args:
js_code: JavaScript code potentially containing comments
Returns:
JavaScript code with comments stripped
"""
# Remove multi-line comments (/* ... */)
js_code = re.sub(r'/\*.*?\*/', '', js_code, flags=re.DOTALL)
# Remove single-line comments - only lines that START with // (after whitespace)
# This avoids breaking XPath strings, URLs, regex patterns, etc.
js_code = re.sub(r'^\s*//.*$', '', js_code, flags=re.MULTILINE)
return js_code
class EvaluateError(Exception):
"""Special exception raised by evaluate() to stop Python execution immediately."""
pass
async def validate_task_completion(
task: str,
output: str | None,
llm: BaseChatModel,
) -> tuple[bool, str]:
"""
Validate if task is truly complete by asking LLM without system prompt or history.
Args:
task: The original task description
output: The output from the done() call
llm: The LLM to use for validation
Returns:
Tuple of (is_complete, reasoning)
"""
from browser_use.llm.messages import UserMessage
# Build validation prompt
validation_prompt = f"""You are a task completion validator. Analyze if the agent has truly completed the user's task.
**Original Task:**
{task}
**Agent's Output:**
{output[:100000] if output else '(No output provided)'}
**Your Task:**
Determine if the agent has successfully completed the user's task. Consider:
1. Has the agent delivered what the user requested?
2. If data extraction was requested, is there actual data?
3. If the task is impossible (e.g., localhost website, login required but no credentials), is it truly impossible?
4. Could the agent continue and make meaningful progress?
**Response Format:**
Reasoning: [Your analysis of whether the task is complete]
Verdict: [YES or NO]
YES = Task is complete OR truly impossible to complete
NO = Agent should continue working"""
try:
# Call LLM with just the validation prompt (no system prompt, no history)
response = await llm.ainvoke([UserMessage(content=validation_prompt)])
response_text = response.completion
# Parse the response
reasoning = ''
verdict = 'NO'
# Extract reasoning and verdict
lines = response_text.split('\n')
for line in lines:
if line.strip().lower().startswith('reasoning:'):
reasoning = line.split(':', 1)[1].strip()
elif line.strip().lower().startswith('verdict:'):
verdict_text = line.split(':', 1)[1].strip().upper()
if 'YES' in verdict_text:
verdict = 'YES'
elif 'NO' in verdict_text:
verdict = 'NO'
# If we couldn't parse, try to find YES/NO in the response
if not reasoning:
reasoning = response_text
is_complete = verdict == 'YES'
logger.info(f'Task validation: {verdict}')
logger.debug(f'Validation reasoning: {reasoning}')
return is_complete, reasoning
except Exception as e:
logger.warning(f'Failed to validate task completion: {e}')
# On error, assume the agent knows what they're doing
return True, f'Validation failed: {e}'
async def evaluate(code: str, browser_session: BrowserSession) -> Any:
"""
Execute JavaScript code in the browser and return the result.
Args:
code: JavaScript code to execute (must be wrapped in IIFE)
Returns:
The result of the JavaScript execution
Raises:
EvaluateError: If JavaScript execution fails. This stops Python execution immediately.
Example:
result = await evaluate('''
(function(){
return Array.from(document.querySelectorAll('.product')).map(p => ({
name: p.querySelector('.name').textContent,
price: p.querySelector('.price').textContent
}))
})()
''')
"""
# Strip JavaScript comments before CDP evaluation (CDP doesn't support them in all contexts)
code = _strip_js_comments(code)
cdp_session = await browser_session.get_or_create_cdp_session()
try:
# Execute JavaScript with proper error handling
result = await cdp_session.cdp_client.send.Runtime.evaluate(
params={'expression': code, 'returnByValue': True, 'awaitPromise': True},
session_id=cdp_session.session_id,
)
# Check for JavaScript execution errors
if result.get('exceptionDetails'):
exception = result['exceptionDetails']
error_text = exception.get('text', 'Unknown error')
# Try to get more details from the exception
error_details = []
if 'exception' in exception:
exc_obj = exception['exception']
if 'description' in exc_obj:
error_details.append(exc_obj['description'])
elif 'value' in exc_obj:
error_details.append(str(exc_obj['value']))
# Build comprehensive error message with full CDP context
error_msg = f'JavaScript execution error: {error_text}'
if error_details:
error_msg += f'\nDetails: {" | ".join(error_details)}'
# Raise special exception that will stop Python execution immediately
raise EvaluateError(error_msg)
# Get the result data
result_data = result.get('result', {})
# Get the actual value
value = result_data.get('value')
# Return the value directly
if value is None:
return None if 'value' in result_data else 'undefined'
elif isinstance(value, (dict, list)):
# Complex objects - already deserialized by returnByValue
return value
else:
# Primitive values
return value
except EvaluateError:
# Re-raise EvaluateError as-is to stop Python execution
raise
except Exception as e:
# Wrap other exceptions in EvaluateError
raise EvaluateError(f'Failed to execute JavaScript: {type(e).__name__}: {e}') from e
def create_namespace(
browser_session: BrowserSession,
tools: Tools | None = None,
page_extraction_llm: BaseChatModel | None = None,
file_system: FileSystem | None = None,
available_file_paths: list[str] | None = None,
sensitive_data: dict[str, str | dict[str, str]] | None = None,
) -> dict[str, Any]:
"""
Create a namespace with all browser tools available as functions.
This function creates a dictionary of functions that can be used to interact
with the browser, similar to a Jupyter notebook environment.
Args:
browser_session: The browser session to use
tools: Optional Tools instance (will create default if not provided)
page_extraction_llm: Optional LLM for page extraction
file_system: Optional file system for file operations
available_file_paths: Optional list of available file paths
sensitive_data: Optional sensitive data dictionary
Returns:
Dictionary containing all available functions and objects
Example:
namespace = create_namespace(browser_session)
await namespace['navigate'](url='https://google.com')
result = await namespace['evaluate']('document.title')
"""
if tools is None:
# Use CodeAgentTools with default exclusions optimized for code-use mode
# For code-use, we keep: navigate, evaluate, wait, done
# and exclude: most browser interaction, file system actions (use Python instead)
tools = CodeAgentTools()
if available_file_paths is None:
available_file_paths = []
namespace: dict[str, Any] = {
# Core objects
'browser': browser_session,
'file_system': file_system,
# Standard library modules (always available)
'json': json,
'asyncio': asyncio,
'Path': Path,
'csv': csv,
're': re,
'datetime': datetime,
'requests': requests,
}
# Add optional data science libraries if available
if NUMPY_AVAILABLE:
namespace['np'] = np
namespace['numpy'] = np
if PANDAS_AVAILABLE:
namespace['pd'] = pd
namespace['pandas'] = pd
if MATPLOTLIB_AVAILABLE:
namespace['plt'] = plt
namespace['matplotlib'] = plt
if BS4_AVAILABLE:
namespace['BeautifulSoup'] = BeautifulSoup
namespace['bs4'] = BeautifulSoup
if PYPDF_AVAILABLE:
namespace['PdfReader'] = PdfReader
namespace['pypdf'] = PdfReader
if TABULATE_AVAILABLE:
namespace['tabulate'] = tabulate
# Track failed evaluate() calls to detect repeated failed approaches
if '_evaluate_failures' not in namespace:
namespace['_evaluate_failures'] = []
# Add custom evaluate function that returns values directly
async def evaluate_wrapper(
code: str | None = None, variables: dict[str, Any] | None = None, *_args: Any, **kwargs: Any
) -> Any:
# Handle both positional and keyword argument styles
if code is None:
# Check if code was passed as keyword arg
code = kwargs.get('code', kwargs.get('js_code', kwargs.get('expression', '')))
# Extract variables if passed as kwarg
if variables is None:
variables = kwargs.get('variables')
if not code:
raise ValueError('No JavaScript code provided to evaluate()')
# Inject variables if provided
if variables:
vars_json = json.dumps(variables)
stripped = code.strip()
# Check if code is already a function expression expecting params
# Pattern: (function(params) { ... }) or (async function(params) { ... })
if re.match(r'\((?:async\s+)?function\s*\(\s*\w+\s*\)', stripped):
# Already expects params, wrap to call it with our variables
code = f'(function(){{ const params = {vars_json}; return {stripped}(params); }})()'
else:
# Not a parameterized function, inject params in scope
# Check if already wrapped in IIFE (including arrow function IIFEs)
is_wrapped = (
(stripped.startswith('(function()') and '})()' in stripped[-10:])
or (stripped.startswith('(async function()') and '})()' in stripped[-10:])
or (stripped.startswith('(() =>') and ')()' in stripped[-10:])
or (stripped.startswith('(async () =>') and ')()' in stripped[-10:])
)
if is_wrapped:
# Already wrapped, inject params at the start
# Try to match regular function IIFE
match = re.match(r'(\((?:async\s+)?function\s*\(\s*\)\s*\{)', stripped)
if match:
prefix = match.group(1)
rest = stripped[len(prefix) :]
code = f'{prefix} const params = {vars_json}; {rest}'
else:
# Try to match arrow function IIFE
# Patterns: (() => expr)() or (() => { ... })() or (async () => ...)()
arrow_match = re.match(r'(\((?:async\s+)?\(\s*\)\s*=>\s*\{)', stripped)
if arrow_match:
# Arrow function with block body: (() => { ... })()
prefix = arrow_match.group(1)
rest = stripped[len(prefix) :]
code = f'{prefix} const params = {vars_json}; {rest}'
else:
# Arrow function with expression body or fallback: wrap in outer function
code = f'(function(){{ const params = {vars_json}; return {stripped}; }})()'
else:
# Not wrapped, wrap with params
code = f'(function(){{ const params = {vars_json}; {code} }})()'
# Skip auto-wrap below
return await evaluate(code, browser_session)
# Auto-wrap in IIFE if not already wrapped (and no variables were injected)
if not variables:
stripped = code.strip()
# Check for regular function IIFEs, async function IIFEs, and arrow function IIFEs
is_wrapped = (
(stripped.startswith('(function()') and '})()' in stripped[-10:])
or (stripped.startswith('(async function()') and '})()' in stripped[-10:])
or (stripped.startswith('(() =>') and ')()' in stripped[-10:])
or (stripped.startswith('(async () =>') and ')()' in stripped[-10:])
)
if not is_wrapped:
code = f'(function(){{{code}}})()'
# Execute and track failures
try:
result = await evaluate(code, browser_session)
# Print result structure for debugging
if isinstance(result, list) and result and isinstance(result[0], dict):
result_preview = f'list of dicts - len={len(result)}, example 1:\n'
sample_result = result[0]
for key, value in list(sample_result.items())[:10]:
value_str = str(value)[:10] if not isinstance(value, (int, float, bool, type(None))) else str(value)
result_preview += f' {key}: {value_str}...\n'
if len(sample_result) > 10:
result_preview += f' ... {len(sample_result) - 10} more keys'
print(result_preview)
elif isinstance(result, list):
if len(result) == 0:
print('type=list, len=0')
else:
result_preview = str(result)[:100]
print(f'type=list, len={len(result)}, preview={result_preview}...')
elif isinstance(result, dict):
result_preview = f'type=dict, len={len(result)}, sample keys:\n'
for key, value in list(result.items())[:10]:
value_str = str(value)[:10] if not isinstance(value, (int, float, bool, type(None))) else str(value)
result_preview += f' {key}: {value_str}...\n'
if len(result) > 10:
result_preview += f' ... {len(result) - 10} more keys'
print(result_preview)
else:
print(f'type={type(result).__name__}, value={repr(result)[:50]}')
return result
except Exception as e:
# Track errors for pattern detection
namespace['_evaluate_failures'].append({'error': str(e), 'type': 'exception'})
raise
namespace['evaluate'] = evaluate_wrapper
# Add get_selector_from_index helper for code_use mode
async def get_selector_from_index_wrapper(index: int) -> str:
"""
Get the CSS selector for an element by its interactive index.
This allows you to use the element's index from the browser state to get
its CSS selector for use in JavaScript evaluate() calls.
Args:
index: The interactive index from the browser state (e.g., [123])
Returns:
str: CSS selector that can be used in JavaScript
Example:
selector = await get_selector_from_index(123)
await evaluate(f'''
(function(){{
const el = document.querySelector({json.dumps(selector)});
if (el) el.click();
}})()
''')
"""
from browser_use.dom.utils import generate_css_selector_for_element
# Get element by index from browser session
node = await browser_session.get_element_by_index(index)
if node is None:
msg = f'Element index {index} not available - page may have changed. Try refreshing browser state.'
logger.warning(f'⚠️ {msg}')
raise RuntimeError(msg)
# Check if element is in shadow DOM
shadow_hosts = []
current = node.parent_node
while current:
if current.shadow_root_type is not None:
# This is a shadow host
host_tag = current.tag_name.lower()
host_id = current.attributes.get('id', '') if current.attributes else ''
host_desc = f'{host_tag}#{host_id}' if host_id else host_tag
shadow_hosts.insert(0, host_desc)
current = current.parent_node
# Check if in iframe
in_iframe = False
current = node.parent_node
while current:
if current.tag_name.lower() == 'iframe':
in_iframe = True
break
current = current.parent_node
# Use the robust selector generation function (now handles special chars in IDs)
selector = generate_css_selector_for_element(node)
# Log shadow DOM/iframe info if detected
if shadow_hosts:
shadow_path = ' > '.join(shadow_hosts)
logger.info(f'Element [{index}] is inside Shadow DOM. Path: {shadow_path}')
logger.info(f' Selector: {selector}')
logger.info(
f' To access: document.querySelector("{shadow_hosts[0].split("#")[0]}").shadowRoot.querySelector("{selector}")'
)
if in_iframe:
logger.info(f"Element [{index}] is inside an iframe. Regular querySelector won't work.")
if selector:
return selector
# Fallback: just use tag name if available
if node.tag_name:
return node.tag_name.lower()
raise ValueError(f'Could not generate selector for element index {index}')
namespace['get_selector_from_index'] = get_selector_from_index_wrapper
# Inject all tools as functions into the namespace
# Skip 'evaluate' since we have a custom implementation above
for action_name, action in tools.registry.registry.actions.items():
if action_name == 'evaluate':
continue # Skip - use custom evaluate that returns Python objects directly
param_model = action.param_model
action_function = action.function
# Create a closure to capture the current action_name, param_model, and action_function
def make_action_wrapper(act_name, par_model, act_func):
async def action_wrapper(*args, **kwargs):
# Convert positional args to kwargs based on param model fields
if args:
# Get the field names from the pydantic model
field_names = list(par_model.model_fields.keys())
for i, arg in enumerate(args):
if i < len(field_names):
kwargs[field_names[i]] = arg
# Create params from kwargs
try:
params = par_model(**kwargs)
except Exception as e:
raise ValueError(f'Invalid parameters for {act_name}: {e}') from e
# Special validation for done() - enforce minimal code cell
if act_name == 'done':
consecutive_failures = namespace.get('_consecutive_errors')
if consecutive_failures and consecutive_failures > 3:
pass
else:
# Check if there are multiple Python blocks in this response
all_blocks = namespace.get('_all_code_blocks', {})
python_blocks = [k for k in sorted(all_blocks.keys()) if k.startswith('python_')]
if len(python_blocks) > 1:
msg = (
'done() should be the ONLY code block in the response.\n'
'You have multiple Python blocks in this response. Consider calling done() in a separate response '
'Now verify the last output and if it satisfies the task, call done(), else continue working.'
)
print(msg)
# Get the current cell code from namespace (injected by service.py before execution)
current_code = namespace.get('_current_cell_code')
if current_code and isinstance(current_code, str):
# Count non-empty, non-comment lines
lines = [line.strip() for line in current_code.strip().split('\n')]
code_lines = [line for line in lines if line and not line.startswith('#')]
# Check if the line above await done() contains an if block
done_line_index = -1
for i, line in enumerate(reversed(code_lines)):
if 'await done()' in line or 'await done(' in line:
done_line_index = len(code_lines) - 1 - i
break
has_if_above = False
has_else_above = False
has_elif_above = False
if done_line_index > 0:
line_above = code_lines[done_line_index - 1]
has_if_above = line_above.strip().startswith('if ') and line_above.strip().endswith(':')
has_else_above = line_above.strip().startswith('else:')
has_elif_above = line_above.strip().startswith('elif ')
if has_if_above or has_else_above or has_elif_above:
msg = (
'done() should be called individually after verifying the result from any logic.\n'
'Consider validating your output first, THEN call done() in a final step without if/else/elif blocks only if the task is truly complete.'
)
logger.error(msg)
print(msg)
raise RuntimeError(msg)
# Build special context
special_context = {
'browser_session': browser_session,
'page_extraction_llm': page_extraction_llm,
'available_file_paths': available_file_paths,
'has_sensitive_data': False, # Can be handled separately if needed
'file_system': file_system,
}
# Execute the action
result = await act_func(params=params, **special_context)
# For code-use mode, we want to return the result directly
# not wrapped in ActionResult
if hasattr(result, 'extracted_content'):
# Special handling for done action - mark task as complete
if act_name == 'done' and hasattr(result, 'is_done') and result.is_done:
namespace['_task_done'] = True
# Store the extracted content as the final result
if result.extracted_content:
namespace['_task_result'] = result.extracted_content
# Store the self-reported success status
if hasattr(result, 'success'):
namespace['_task_success'] = result.success
# If there's extracted content, return it
if result.extracted_content:
return result.extracted_content
# If there's an error, raise it
if result.error:
raise RuntimeError(result.error)
# Otherwise return None
return None
return result
return action_wrapper
# Rename 'input' to 'input_text' to avoid shadowing Python's built-in input()
namespace_action_name = 'input_text' if action_name == 'input' else action_name
# Add the wrapper to the namespace
namespace[namespace_action_name] = make_action_wrapper(action_name, param_model, action_function)
return namespace
def get_namespace_documentation(namespace: dict[str, Any]) -> str:
"""
Generate documentation for all available functions in the namespace.
Args:
namespace: The namespace dictionary
Returns:
Markdown-formatted documentation string
"""
docs = ['# Available Functions\n']
# Document each function
for name, obj in sorted(namespace.items()):
if callable(obj) and not name.startswith('_'):
# Get function signature and docstring
if hasattr(obj, '__doc__') and obj.__doc__:
docs.append(f'## {name}\n')
docs.append(f'{obj.__doc__}\n')
return '\n'.join(docs)
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/code_use/namespace.py",
"license": "MIT License",
"lines": 547,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/code_use/notebook_export.py | """Export code-use session to Jupyter notebook format."""
import json
import re
from pathlib import Path
from browser_use.code_use.service import CodeAgent
from .views import CellType, NotebookExport
def export_to_ipynb(agent: CodeAgent, output_path: str | Path) -> Path:
"""
Export a NotebookSession to a Jupyter notebook (.ipynb) file.
Now includes JavaScript code blocks that were stored in the namespace.
Args:
session: The NotebookSession to export
output_path: Path where to save the notebook file
agent: Optional CodeAgent instance to access namespace for JavaScript blocks
Returns:
Path to the saved notebook file
Example:
```python
session = await agent.run()
notebook_path = export_to_ipynb(agent, 'my_automation.ipynb')
print(f'Notebook saved to {notebook_path}')
```
"""
output_path = Path(output_path)
# Create notebook structure
notebook = NotebookExport(
metadata={
'kernelspec': {'display_name': 'Python 3', 'language': 'python', 'name': 'python3'},
'language_info': {
'name': 'python',
'version': '3.11.0',
'mimetype': 'text/x-python',
'codemirror_mode': {'name': 'ipython', 'version': 3},
'pygments_lexer': 'ipython3',
'nbconvert_exporter': 'python',
'file_extension': '.py',
},
}
)
# Add setup cell at the beginning with proper type hints
setup_code = """import asyncio
import json
from typing import Any
from browser_use import BrowserSession
from browser_use.code_use import create_namespace
# Initialize browser and namespace
browser = BrowserSession()
await browser.start()
# Create namespace with all browser control functions
namespace: dict[str, Any] = create_namespace(browser)
# Import all functions into the current namespace
globals().update(namespace)
# Type hints for better IDE support (these are now available globally)
# navigate, click, input, evaluate, search, extract, scroll, done, etc.
print("Browser-use environment initialized!")
print("Available functions: navigate, click, input, evaluate, search, extract, done, etc.")"""
setup_cell = {
'cell_type': 'code',
'metadata': {},
'source': setup_code.split('\n'),
'execution_count': None,
'outputs': [],
}
notebook.cells.append(setup_cell)
# Add JavaScript code blocks as variables FIRST
if hasattr(agent, 'namespace') and agent.namespace:
# Look for JavaScript variables in the namespace
code_block_vars = agent.namespace.get('_code_block_vars', set())
for var_name in sorted(code_block_vars):
var_value = agent.namespace.get(var_name)
if isinstance(var_value, str) and var_value.strip():
# Check if this looks like JavaScript code
# Look for common JS patterns
js_patterns = [
r'function\s+\w+\s*\(',
r'\(\s*function\s*\(\)',
r'=>\s*{',
r'document\.',
r'Array\.from\(',
r'\.querySelector',
r'\.textContent',
r'\.innerHTML',
r'return\s+',
r'console\.log',
r'window\.',
r'\.map\(',
r'\.filter\(',
r'\.forEach\(',
]
is_js = any(re.search(pattern, var_value, re.IGNORECASE) for pattern in js_patterns)
if is_js:
# Create a code cell with the JavaScript variable
js_cell = {
'cell_type': 'code',
'metadata': {},
'source': [f'# JavaScript Code Block: {var_name}\n', f'{var_name} = """{var_value}"""'],
'execution_count': None,
'outputs': [],
}
notebook.cells.append(js_cell)
# Convert cells
python_cell_count = 0
for cell in agent.session.cells:
notebook_cell: dict = {
'cell_type': cell.cell_type.value,
'metadata': {},
'source': cell.source.splitlines(keepends=True),
}
if cell.cell_type == CellType.CODE:
python_cell_count += 1
notebook_cell['execution_count'] = cell.execution_count
notebook_cell['outputs'] = []
# Add output if available
if cell.output:
notebook_cell['outputs'].append(
{
'output_type': 'stream',
'name': 'stdout',
'text': cell.output.split('\n'),
}
)
# Add error if available
if cell.error:
notebook_cell['outputs'].append(
{
'output_type': 'error',
'ename': 'Error',
'evalue': cell.error.split('\n')[0] if cell.error else '',
'traceback': cell.error.split('\n') if cell.error else [],
}
)
# Add browser state as a separate output
if cell.browser_state:
notebook_cell['outputs'].append(
{
'output_type': 'stream',
'name': 'stdout',
'text': [f'Browser State:\n{cell.browser_state}'],
}
)
notebook.cells.append(notebook_cell)
# Write to file
output_path.parent.mkdir(parents=True, exist_ok=True)
with open(output_path, 'w', encoding='utf-8') as f:
json.dump(notebook.model_dump(), f, indent=2, ensure_ascii=False)
return output_path
def session_to_python_script(agent: CodeAgent) -> str:
"""
Convert a CodeAgent session to a Python script.
Now includes JavaScript code blocks that were stored in the namespace.
Args:
agent: The CodeAgent instance to convert
Returns:
Python script as a string
Example:
```python
await agent.run()
script = session_to_python_script(agent)
print(script)
```
"""
lines = []
lines.append('# Generated from browser-use code-use session\n')
lines.append('import asyncio\n')
lines.append('import json\n')
lines.append('from browser_use import BrowserSession\n')
lines.append('from browser_use.code_use import create_namespace\n\n')
lines.append('async def main():\n')
lines.append('\t# Initialize browser and namespace\n')
lines.append('\tbrowser = BrowserSession()\n')
lines.append('\tawait browser.start()\n\n')
lines.append('\t# Create namespace with all browser control functions\n')
lines.append('\tnamespace = create_namespace(browser)\n\n')
lines.append('\t# Extract functions from namespace for direct access\n')
lines.append('\tnavigate = namespace["navigate"]\n')
lines.append('\tclick = namespace["click"]\n')
lines.append('\tinput_text = namespace["input"]\n')
lines.append('\tevaluate = namespace["evaluate"]\n')
lines.append('\tsearch = namespace["search"]\n')
lines.append('\textract = namespace["extract"]\n')
lines.append('\tscroll = namespace["scroll"]\n')
lines.append('\tdone = namespace["done"]\n')
lines.append('\tgo_back = namespace["go_back"]\n')
lines.append('\twait = namespace["wait"]\n')
lines.append('\tscreenshot = namespace["screenshot"]\n')
lines.append('\tfind_text = namespace["find_text"]\n')
lines.append('\tswitch_tab = namespace["switch"]\n')
lines.append('\tclose_tab = namespace["close"]\n')
lines.append('\tdropdown_options = namespace["dropdown_options"]\n')
lines.append('\tselect_dropdown = namespace["select_dropdown"]\n')
lines.append('\tupload_file = namespace["upload_file"]\n')
lines.append('\tsend_keys = namespace["send_keys"]\n\n')
# Add JavaScript code blocks as variables FIRST
if hasattr(agent, 'namespace') and agent.namespace:
code_block_vars = agent.namespace.get('_code_block_vars', set())
for var_name in sorted(code_block_vars):
var_value = agent.namespace.get(var_name)
if isinstance(var_value, str) and var_value.strip():
# Check if this looks like JavaScript code
js_patterns = [
r'function\s+\w+\s*\(',
r'\(\s*function\s*\(\)',
r'=>\s*{',
r'document\.',
r'Array\.from\(',
r'\.querySelector',
r'\.textContent',
r'\.innerHTML',
r'return\s+',
r'console\.log',
r'window\.',
r'\.map\(',
r'\.filter\(',
r'\.forEach\(',
]
is_js = any(re.search(pattern, var_value, re.IGNORECASE) for pattern in js_patterns)
if is_js:
lines.append(f'\t# JavaScript Code Block: {var_name}\n')
lines.append(f'\t{var_name} = """{var_value}"""\n\n')
for i, cell in enumerate(agent.session.cells):
if cell.cell_type == CellType.CODE:
lines.append(f'\t# Cell {i + 1}\n')
# Indent each line of source
source_lines = cell.source.split('\n')
for line in source_lines:
if line.strip(): # Only add non-empty lines
lines.append(f'\t{line}\n')
lines.append('\n')
lines.append('\tawait browser.stop()\n\n')
lines.append("if __name__ == '__main__':\n")
lines.append('\tasyncio.run(main())\n')
return ''.join(lines)
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/code_use/notebook_export.py",
"license": "MIT License",
"lines": 232,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/code_use/service.py | """Code-use agent service - Jupyter notebook-like code execution for browser automation."""
import asyncio
import datetime
import html
import json
import logging
import re
import tempfile
import traceback
from pathlib import Path
from typing import Any
from uuid_extensions import uuid7str
from browser_use.browser import BrowserSession
from browser_use.browser.profile import BrowserProfile
from browser_use.dom.service import DomService
from browser_use.filesystem.file_system import FileSystem
from browser_use.llm.base import BaseChatModel
from browser_use.llm.messages import (
AssistantMessage,
BaseMessage,
ContentPartImageParam,
ContentPartTextParam,
ImageURL,
UserMessage,
)
from browser_use.screenshots.service import ScreenshotService
from browser_use.telemetry.service import ProductTelemetry
from browser_use.telemetry.views import AgentTelemetryEvent
from browser_use.tokens.service import TokenCost
from browser_use.tokens.views import UsageSummary
from browser_use.tools.service import CodeAgentTools, Tools
from browser_use.utils import get_browser_use_version
from .formatting import format_browser_state_for_llm
from .namespace import EvaluateError, create_namespace
from .utils import detect_token_limit_issue, extract_code_blocks, extract_url_from_task, truncate_message_content
from .views import (
CellType,
CodeAgentHistory,
CodeAgentHistoryList,
CodeAgentModelOutput,
CodeAgentResult,
CodeAgentState,
CodeAgentStepMetadata,
ExecutionStatus,
NotebookSession,
)
logger = logging.getLogger(__name__)
class CodeAgent:
"""
Agent that executes Python code in a notebook-like environment for browser automation.
This agent provides a Jupyter notebook-like interface where the LLM writes Python code
that gets executed in a persistent namespace with browser control functions available.
"""
def __init__(
self,
task: str,
# Optional parameters
llm: BaseChatModel | None = None,
browser_session: BrowserSession | None = None,
browser: BrowserSession | None = None, # Alias for browser_session
tools: Tools | None = None,
controller: Tools | None = None, # Alias for tools
# Agent settings
page_extraction_llm: BaseChatModel | None = None,
file_system: FileSystem | None = None,
available_file_paths: list[str] | None = None,
sensitive_data: dict[str, str | dict[str, str]] | None = None,
max_steps: int = 100,
max_failures: int = 8,
max_validations: int = 0,
use_vision: bool = True,
calculate_cost: bool = False,
demo_mode: bool | None = None,
**kwargs,
):
"""
Initialize the code-use agent.
Args:
task: The task description for the agent
browser_session: Optional browser session (will be created if not provided) [DEPRECATED: use browser]
browser: Optional browser session (cleaner API)
tools: Optional Tools instance (will create default if not provided)
controller: Optional Tools instance
page_extraction_llm: Optional LLM for page extraction
file_system: Optional file system for file operations
available_file_paths: Optional list of available file paths
sensitive_data: Optional sensitive data dictionary
max_steps: Maximum number of execution steps
max_failures: Maximum consecutive errors before termination (default: 8)
max_validations: Maximum number of times to run the validator agent (default: 0)
use_vision: Whether to include screenshots in LLM messages (default: True)
calculate_cost: Whether to calculate token costs (default: False)
demo_mode: Enable the in-browser demo panel for live logging (default: False)
llm: Optional ChatBrowserUse LLM instance (will create default if not provided)
**kwargs: Additional keyword arguments for compatibility (ignored)
"""
# Log and ignore unknown kwargs for compatibility
if kwargs:
logger.debug(f'Ignoring additional kwargs for CodeAgent compatibility: {list(kwargs.keys())}')
if llm is None:
try:
from browser_use import ChatBrowserUse
llm = ChatBrowserUse()
logger.debug('CodeAgent using ChatBrowserUse')
except Exception as e:
raise RuntimeError(f'Failed to initialize CodeAgent LLM: {e}')
if 'ChatBrowserUse' not in llm.__class__.__name__:
raise ValueError('This agent works only with ChatBrowserUse.')
# Handle browser vs browser_session parameter (browser takes precedence)
if browser and browser_session:
raise ValueError('Cannot specify both "browser" and "browser_session" parameters. Use "browser" for the cleaner API.')
browser_session = browser or browser_session
# Handle controller vs tools parameter (controller takes precedence)
if controller and tools:
raise ValueError('Cannot specify both "controller" and "tools" parameters. Use "controller" for the cleaner API.')
tools = controller or tools
# Store browser_profile for creating browser session if needed
self._demo_mode_enabled = False
if browser_session is None:
profile_kwargs: dict[str, Any] = {}
if demo_mode is not None:
profile_kwargs['demo_mode'] = demo_mode
self._browser_profile_for_init = BrowserProfile(**profile_kwargs)
else:
self._browser_profile_for_init = None
self.task = task
self.llm = llm
self.browser_session = browser_session
if self.browser_session:
if demo_mode is not None and self.browser_session.browser_profile.demo_mode != demo_mode:
self.browser_session.browser_profile = self.browser_session.browser_profile.model_copy(
update={'demo_mode': demo_mode}
)
self._demo_mode_enabled = bool(self.browser_session.browser_profile.demo_mode)
self.tools = tools or CodeAgentTools()
self.page_extraction_llm = page_extraction_llm
self.file_system = file_system if file_system is not None else FileSystem(base_dir='./')
self.available_file_paths = available_file_paths or []
self.sensitive_data = sensitive_data
self.max_steps = max_steps
self.max_failures = max_failures
self.max_validations = max_validations
self.use_vision = use_vision
self.session = NotebookSession()
self.namespace: dict[str, Any] = {}
self._llm_messages: list[BaseMessage] = [] # Internal LLM conversation history
self.complete_history: list[CodeAgentHistory] = [] # Type-safe history with model_output and result
self.dom_service: DomService | None = None
self._last_browser_state_text: str | None = None # Track last browser state text
self._last_screenshot: str | None = None # Track last screenshot (base64)
self._consecutive_errors = 0 # Track consecutive errors for auto-termination
self._validation_count = 0 # Track number of validator runs
self._last_llm_usage: Any | None = None # Track last LLM call usage stats
self._step_start_time = 0.0 # Track step start time for duration calculation
self.usage_summary: UsageSummary | None = None # Track usage summary across run for history property
self._sample_output_added = False # Track whether preview cell already created
# Initialize screenshot service for eval tracking
self.id = uuid7str()
timestamp = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
base_tmp = Path(tempfile.gettempdir())
self.agent_directory = base_tmp / f'browser_use_code_agent_{self.id}_{timestamp}'
self.screenshot_service = ScreenshotService(agent_directory=self.agent_directory)
# Initialize token cost service for usage tracking
self.token_cost_service = TokenCost(include_cost=calculate_cost)
self.token_cost_service.register_llm(llm)
if page_extraction_llm:
self.token_cost_service.register_llm(page_extraction_llm)
# Set version and source for telemetry
self.version = get_browser_use_version()
try:
package_root = Path(__file__).parent.parent.parent
repo_files = ['.git', 'README.md', 'docs', 'examples']
if all(Path(package_root / file).exists() for file in repo_files):
self.source = 'git'
else:
self.source = 'pip'
except Exception:
self.source = 'unknown'
# Telemetry
self.telemetry = ProductTelemetry()
async def run(self, max_steps: int | None = None) -> NotebookSession:
"""
Run the agent to complete the task.
Args:
max_steps: Optional override for maximum number of steps (uses __init__ value if not provided)
Returns:
The notebook session with all executed cells
"""
# Use override if provided, otherwise use value from __init__
steps_to_run = max_steps if max_steps is not None else self.max_steps
self.max_steps = steps_to_run
# Start browser if not provided
if self.browser_session is None:
assert self._browser_profile_for_init is not None
self.browser_session = BrowserSession(browser_profile=self._browser_profile_for_init)
await self.browser_session.start()
if self.browser_session:
self._demo_mode_enabled = bool(self.browser_session.browser_profile.demo_mode)
if self._demo_mode_enabled and getattr(self.browser_session.browser_profile, 'headless', False):
logger.warning('Demo mode is enabled but the browser is headless=True; set headless=False to view the panel.')
if self._demo_mode_enabled:
await self._demo_mode_log(f'Started CodeAgent task: {self.task}', 'info', {'tag': 'task'})
# Initialize DOM service with cross-origin iframe support enabled
self.dom_service = DomService(
browser_session=self.browser_session,
cross_origin_iframes=True, # Enable for code-use agent to access forms in iframes
)
# Create namespace with all tools
self.namespace = create_namespace(
browser_session=self.browser_session,
tools=self.tools,
page_extraction_llm=self.page_extraction_llm,
file_system=self.file_system,
available_file_paths=self.available_file_paths,
sensitive_data=self.sensitive_data,
)
# Initialize conversation with task
self._llm_messages.append(UserMessage(content=f'Task: {self.task}'))
# Track agent run error for telemetry
agent_run_error: str | None = None
should_delay_close = False
# Extract URL from task and navigate if found
initial_url = extract_url_from_task(self.task)
if initial_url:
try:
logger.info(f'Extracted URL from task, navigating to: {initial_url}')
# Use the navigate action from namespace
await self.namespace['navigate'](initial_url)
# Wait for page load
await asyncio.sleep(2)
# Record this navigation as a cell in the notebook
nav_code = f"await navigate('{initial_url}')"
cell = self.session.add_cell(source=nav_code)
cell.status = ExecutionStatus.SUCCESS
cell.execution_count = self.session.increment_execution_count()
cell.output = f'Navigated to {initial_url}'
# Get browser state after navigation for the cell
if self.dom_service:
try:
browser_state_text, _ = await self._get_browser_state()
cell.browser_state = browser_state_text
except Exception as state_error:
logger.debug(f'Failed to capture browser state for initial navigation cell: {state_error}')
except Exception as e:
logger.warning(f'Failed to navigate to extracted URL {initial_url}: {e}')
# Record failed navigation as error cell
nav_code = f"await navigate('{initial_url}')"
cell = self.session.add_cell(source=nav_code)
cell.status = ExecutionStatus.ERROR
cell.execution_count = self.session.increment_execution_count()
cell.error = str(e)
# Get initial browser state before first LLM call
if self.browser_session and self.dom_service:
try:
browser_state_text, screenshot = await self._get_browser_state()
self._last_browser_state_text = browser_state_text
self._last_screenshot = screenshot
except Exception as e:
logger.warning(f'Failed to get initial browser state: {e}')
# Main execution loop
for step in range(self.max_steps):
logger.info(f'\n\n\n\n\n\n\nStep {step + 1}/{self.max_steps}')
await self._demo_mode_log(f'Starting step {step + 1}/{self.max_steps}', 'info', {'step': step + 1})
# Start timing this step
self._step_start_time = datetime.datetime.now().timestamp()
# Check if we're approaching the step limit or error limit and inject warning
steps_remaining = self.max_steps - step - 1
errors_remaining = self.max_failures - self._consecutive_errors
should_warn = (
steps_remaining <= 1 # Last step or next to last
or errors_remaining <= 1 # One more error will terminate
or (steps_remaining <= 2 and self._consecutive_errors >= 2) # Close to both limits
)
if should_warn:
warning_message = (
f'\n\n⚠️ CRITICAL WARNING: You are approaching execution limits!\n'
f'- Steps remaining: {steps_remaining + 1}\n'
f'- Consecutive errors: {self._consecutive_errors}/{self.max_failures}\n\n'
f'YOU MUST call done() in your NEXT response, even if the task is incomplete:\n'
f"- Set success=False if you couldn't complete the task\n"
f'- Return EVERYTHING you found so far (partial data is better than nothing)\n'
f"- Include any variables you've stored (products, all_data, etc.)\n"
f"- Explain what worked and what didn't\n\n"
f'Without done(), the user will receive NOTHING.'
)
self._llm_messages.append(UserMessage(content=warning_message))
try:
# Fetch fresh browser state right before LLM call (only if not already set)
if not self._last_browser_state_text and self.browser_session and self.dom_service:
try:
logger.debug('🔍 Fetching browser state before LLM call...')
browser_state_text, screenshot = await self._get_browser_state()
self._last_browser_state_text = browser_state_text
self._last_screenshot = screenshot
# # Log browser state
# if len(browser_state_text) > 2000:
# logger.info(
# f'Browser state (before LLM):\n{browser_state_text[:2000]}...\n[Truncated, full state {len(browser_state_text)} chars sent to LLM]'
# )
# else:
# logger.info(f'Browser state (before LLM):\n{browser_state_text}')
except Exception as e:
logger.warning(f'Failed to get browser state before LLM call: {e}')
# Get code from LLM (this also adds to self._llm_messages)
try:
code, full_llm_response = await self._get_code_from_llm(step_number=step + 1)
except Exception as llm_error:
# LLM call failed - count as consecutive error and retry
self._consecutive_errors += 1
logger.warning(
f'LLM call failed (consecutive errors: {self._consecutive_errors}/{self.max_failures}), retrying: {llm_error}'
)
await self._demo_mode_log(
f'LLM call failed: {llm_error}',
'error',
{'step': step + 1},
)
# Check if we've hit the consecutive error limit
if self._consecutive_errors >= self.max_failures:
logger.error(f'Terminating: {self.max_failures} consecutive LLM failures')
break
await asyncio.sleep(1) # Brief pause before retry
continue
if not code or code.strip() == '':
# If task is already done, empty code is fine (LLM explaining completion)
if self._is_task_done():
logger.info('Task already marked as done, LLM provided explanation without code')
# Add the text response to history as a non-code step
await self._add_step_to_complete_history(
model_output_code='',
full_llm_response=full_llm_response,
output=full_llm_response, # Treat the explanation as output
error=None,
screenshot_path=await self._capture_screenshot(step + 1),
)
break # Exit the loop since task is done
logger.warning('LLM returned empty code')
self._consecutive_errors += 1
# new state
if self.browser_session and self.dom_service:
try:
browser_state_text, screenshot = await self._get_browser_state()
self._last_browser_state_text = browser_state_text
self._last_screenshot = screenshot
except Exception as e:
logger.warning(f'Failed to get new browser state: {e}')
continue
# Execute code blocks sequentially if multiple python blocks exist
# This allows JS/bash blocks to be injected into namespace before Python code uses them
all_blocks = self.namespace.get('_all_code_blocks', {})
python_blocks = [k for k in sorted(all_blocks.keys()) if k.startswith('python_')]
if len(python_blocks) > 1:
# Multiple Python blocks - execute each sequentially
output = None
error = None
for i, block_key in enumerate(python_blocks):
logger.info(f'Executing Python block {i + 1}/{len(python_blocks)}')
block_code = all_blocks[block_key]
block_output, block_error, _ = await self._execute_code(block_code)
# Accumulate outputs
if block_output:
output = (output or '') + block_output
if block_error:
error = block_error
# Stop on first error
break
else:
# Single Python block - execute normally
output, error, _ = await self._execute_code(code)
# Track consecutive errors
if error:
self._consecutive_errors += 1
logger.warning(f'Consecutive errors: {self._consecutive_errors}/{self.max_failures}')
# Check if we've hit the consecutive error limit
if self._consecutive_errors >= self.max_failures:
logger.error(
f'Terminating: {self.max_failures} consecutive errors reached. The agent is unable to make progress.'
)
await self._demo_mode_log(
f'Terminating after {self.max_failures} consecutive errors without progress.',
'error',
{'step': step + 1},
)
# Add termination message to complete history before breaking
await self._add_step_to_complete_history(
model_output_code=code,
full_llm_response=f'[Terminated after {self.max_failures} consecutive errors]',
output=None,
error=f'Auto-terminated: {self.max_failures} consecutive errors without progress',
screenshot_path=None,
)
break
else:
# Reset consecutive error counter on success
self._consecutive_errors = 0
# Check if task is done - validate completion first if not at limits
if self._is_task_done():
# Get the final result from namespace (from done() call)
final_result: str | None = self.namespace.get('_task_result') # type: ignore[assignment]
# Check if we should validate (not at step/error limits and under max validations)
steps_remaining = self.max_steps - step - 1
should_validate = (
self._validation_count < self.max_validations # Haven't exceeded max validations
and steps_remaining >= 4 # At least 4 steps away from limit
and self._consecutive_errors < 3 # Not close to error limit (8 consecutive)
)
if should_validate:
self._validation_count += 1
logger.info('Validating task completion with LLM...')
from .namespace import validate_task_completion
is_complete, reasoning = await validate_task_completion(
task=self.task,
output=final_result,
llm=self.llm,
)
if not is_complete:
# Task not truly complete - inject feedback and continue
logger.warning('Validator: Task not complete, continuing...')
validation_feedback = (
f'\n\n⚠️ VALIDATOR FEEDBACK:\n'
f'Your done() call was rejected. The task is NOT complete yet.\n\n'
f'Validation reasoning:\n{reasoning}\n\n'
f'You must continue working on the task. Analyze what is missing and complete it.\n'
f'Do NOT call done() again until the task is truly finished.'
)
# Clear the done flag so execution continues
self.namespace['_task_done'] = False
self.namespace.pop('_task_result', None)
self.namespace.pop('_task_success', None)
# Add validation feedback to LLM messages
self._llm_messages.append(UserMessage(content=validation_feedback))
# Don't override output - let execution continue normally
else:
logger.info('Validator: Task complete')
# Override output with done message for final step
if final_result:
output = final_result
else:
# At limits - skip validation and accept done()
if self._validation_count >= self.max_validations:
logger.info(
f'Reached max validations ({self.max_validations}) - skipping validation and accepting done()'
)
else:
logger.info('At step/error limits - skipping validation')
if final_result:
output = final_result
if output:
# Check if this is the final done() output
if self._is_task_done():
# Show done() output more prominently
logger.info(
f'✓ Task completed - Final output from done():\n{output[:300] if len(output) > 300 else output}'
)
# Also show files_to_display if they exist in namespace
attachments: list[str] | None = self.namespace.get('_task_attachments') # type: ignore[assignment]
if attachments:
logger.info(f'Files displayed: {", ".join(attachments)}')
else:
logger.info(f'Code output:\n{output}')
# Browser state is now only logged when fetched before LLM call (not after execution)
# Take screenshot for eval tracking
screenshot_path = await self._capture_screenshot(step + 1)
# Add step to complete_history for eval system
await self._add_step_to_complete_history(
model_output_code=code,
full_llm_response=full_llm_response,
output=output,
error=error,
screenshot_path=screenshot_path,
)
# Check if task is done (after validation)
if self._is_task_done():
# Get the final result from namespace
final_result: str | None = self.namespace.get('_task_result', output) # type: ignore[assignment]
logger.info('Task completed successfully')
if final_result:
logger.info(f'Final result: {final_result}')
self._add_sample_output_cell(final_result)
if self._demo_mode_enabled:
await self._demo_mode_log(
f'Final Result: {final_result or "Task completed"}',
'success',
{'tag': 'task'},
)
should_delay_close = True
break
# If validation rejected done(), continue to next iteration
# The feedback message has already been added to _llm_messages
# Add result to LLM messages for next iteration (without browser state)
result_message = self._format_execution_result(code, output, error, current_step=step + 1)
truncated_result = truncate_message_content(result_message)
self._llm_messages.append(UserMessage(content=truncated_result))
except Exception as e:
logger.error(f'Error in step {step + 1}: {e}')
traceback.print_exc()
break
else:
# Loop completed without break - max_steps reached
logger.warning(f'Maximum steps ({self.max_steps}) reached without task completion')
await self._demo_mode_log(
f'Maximum steps ({self.max_steps}) reached without completing the task.',
'error',
{'tag': 'task'},
)
# If task is not done, capture the last step's output as partial result
if not self._is_task_done() and self.complete_history:
# Get the last step's output/error and use it as final extracted_content
last_step = self.complete_history[-1]
last_result = last_step.result[0] if last_step.result else None
last_output = last_result.extracted_content if last_result else None
last_error = last_result.error if last_result else None
# Build a partial result message from the last step
partial_result_parts = []
partial_result_parts.append(f'Task incomplete - reached step limit ({self.max_steps} steps).')
partial_result_parts.append('Last step output:')
if last_output:
partial_result_parts.append(f'\nOutput: {last_output}')
if last_error:
partial_result_parts.append(f'\nError: {last_error}')
# Add any accumulated variables that might contain useful data
data_vars = []
for var_name in sorted(self.namespace.keys()):
if not var_name.startswith('_') and var_name not in {'json', 'asyncio', 'csv', 're', 'datetime', 'Path'}:
var_value = self.namespace[var_name]
# Check if it's a list or dict that might contain collected data
if isinstance(var_value, (list, dict)) and var_value:
data_vars.append(f' - {var_name}: {type(var_value).__name__} with {len(var_value)} items')
if data_vars:
partial_result_parts.append('\nVariables in namespace that may contain partial data:')
partial_result_parts.extend(data_vars)
partial_result = '\n'.join(partial_result_parts)
# Update the last step's extracted_content with this partial result
if last_result:
last_result.extracted_content = partial_result
last_result.is_done = False
last_result.success = False
logger.info(f'\nPartial result captured from last step:\n{partial_result}')
if self._demo_mode_enabled:
await self._demo_mode_log(f'Partial result:\n{partial_result}', 'error', {'tag': 'task'})
# Log final summary if task was completed
if self._is_task_done():
logger.info('\n' + '=' * 60)
logger.info('TASK COMPLETED SUCCESSFULLY')
logger.info('=' * 60)
final_result: str | None = self.namespace.get('_task_result') # type: ignore[assignment]
if final_result:
logger.info(f'\nFinal Output:\n{final_result}')
self._add_sample_output_cell(final_result)
attachments: list[str] | None = self.namespace.get('_task_attachments') # type: ignore[assignment]
if attachments:
logger.info(f'\nFiles Attached:\n{chr(10).join(attachments)}')
logger.info('=' * 60 + '\n')
if self._demo_mode_enabled and not should_delay_close:
await self._demo_mode_log(
f'Final Result: {final_result or "Task completed"}',
'success',
{'tag': 'task'},
)
should_delay_close = True
# Auto-close browser if keep_alive is False
if should_delay_close and self._demo_mode_enabled:
await asyncio.sleep(30)
await self.close()
# Store usage summary for history property
self.usage_summary = await self.token_cost_service.get_usage_summary()
# Log token usage summary
await self.token_cost_service.log_usage_summary()
# Log telemetry event
try:
self._log_agent_event(max_steps=self.max_steps, agent_run_error=agent_run_error)
except Exception as log_e:
logger.error(f'Failed to log telemetry event: {log_e}', exc_info=True)
# Store history data in session for history property
self.session._complete_history = self.complete_history
self.session._usage_summary = self.usage_summary
return self.session
async def _get_code_from_llm(self, step_number: int | None = None) -> tuple[str, str]:
"""Get Python code from the LLM.
Returns:
Tuple of (extracted_code, full_llm_response)
"""
# Prepare messages for this request
# Include browser state as separate message if available (not accumulated in history)
messages_to_send = self._llm_messages.copy()
if self._last_browser_state_text:
# Create message with optional screenshot
if self.use_vision and self._last_screenshot:
# Build content with text + screenshot
content_parts: list[ContentPartTextParam | ContentPartImageParam] = [
ContentPartTextParam(text=self._last_browser_state_text)
]
# Add screenshot
content_parts.append(
ContentPartImageParam(
image_url=ImageURL(
url=f'data:image/png;base64,{self._last_screenshot}',
media_type='image/png',
detail='auto',
),
)
)
messages_to_send.append(UserMessage(content=content_parts))
else:
# Text only
messages_to_send.append(UserMessage(content=self._last_browser_state_text))
# Clear browser state after including it so it's only in this request
self._last_browser_state_text = None
self._last_screenshot = None
# Call LLM with message history (including temporary browser state message)
response = await self.llm.ainvoke(messages_to_send)
# Store usage stats from this LLM call
self._last_llm_usage = response.usage
# Log the LLM's raw output for debugging
logger.info(f'LLM Response:\n{response.completion}')
await self._demo_mode_log(
f'LLM Response:\n{response.completion}',
'thought',
{'step': step_number} if step_number else None,
)
# Check for token limit or repetition issues
max_tokens = getattr(self.llm, 'max_tokens', None)
completion_tokens = response.usage.completion_tokens if response.usage else None
is_problematic, issue_message = detect_token_limit_issue(
completion=response.completion,
completion_tokens=completion_tokens,
max_tokens=max_tokens,
stop_reason=response.stop_reason,
)
if is_problematic:
logger.warning(f'Token limit issue detected: {issue_message}')
# Don't add the bad response to history
# Instead, inject a system message prompting recovery
recovery_prompt = (
f'Your previous response hit a token limit or became repetitive: {issue_message}\n\n'
'Please write a SHORT plan (2 sentences) for what to do next, then execute ONE simple action.'
)
self._llm_messages.append(UserMessage(content=recovery_prompt))
# Return a controlled error message instead of corrupted code
return '', f'[Token limit error: {issue_message}]'
# Store the full response
full_response = response.completion
# Extract code blocks from response
# Support multiple code block types: python, js, bash, markdown
code_blocks = extract_code_blocks(response.completion)
# Inject non-python blocks into namespace as variables
# Track which variables are code blocks for browser state display
if '_code_block_vars' not in self.namespace:
self.namespace['_code_block_vars'] = set()
for block_type, block_content in code_blocks.items():
if not block_type.startswith('python'):
# Store js, bash, markdown blocks (and named variants) as variables in namespace
self.namespace[block_type] = block_content
self.namespace['_code_block_vars'].add(block_type)
print(f'→ Code block variable: {block_type} (str, {len(block_content)} chars)')
logger.debug(f'Injected {block_type} block into namespace ({len(block_content)} chars)')
# Store all code blocks for sequential execution
self.namespace['_all_code_blocks'] = code_blocks
# Get Python code if it exists
# If no python block exists and no other code blocks exist, return empty string to skip execution
# This prevents treating plain text explanations as code
code = code_blocks.get('python', response.completion)
# Add to LLM messages (truncate for history to save context)
truncated_completion = truncate_message_content(response.completion)
self._llm_messages.append(AssistantMessage(content=truncated_completion))
return code, full_response
def _print_variable_info(self, var_name: str, value: Any) -> None:
"""Print compact info about a variable assignment."""
# Skip built-in modules and known imports
skip_names = {
'json',
'asyncio',
'csv',
're',
'datetime',
'Path',
'pd',
'np',
'plt',
'requests',
'BeautifulSoup',
'PdfReader',
'browser',
'file_system',
}
if var_name in skip_names:
return
# Skip code block variables (already printed)
if '_code_block_vars' in self.namespace and var_name in self.namespace.get('_code_block_vars', set()):
return
# Print compact variable info
if isinstance(value, (list, dict)):
preview = str(value)[:100]
print(f'→ Variable: {var_name} ({type(value).__name__}, len={len(value)}, preview={preview}...)')
elif isinstance(value, str) and len(value) > 50:
print(f'→ Variable: {var_name} (str, {len(value)} chars, preview={value[:50]}...)')
elif callable(value):
print(f'→ Variable: {var_name} (function)')
else:
print(f'→ Variable: {var_name} ({type(value).__name__}, value={repr(value)[:50]})')
async def _execute_code(self, code: str) -> tuple[str | None, str | None, str | None]:
"""
Execute Python code in the namespace.
Args:
code: The Python code to execute
Returns:
Tuple of (output, error, browser_state)
"""
# Create new cell
cell = self.session.add_cell(source=code)
cell.status = ExecutionStatus.RUNNING
cell.execution_count = self.session.increment_execution_count()
output = None
error = None
browser_state = None
try:
# Capture output
import ast
import io
import sys
old_stdout = sys.stdout
sys.stdout = io.StringIO()
try:
# Add asyncio to namespace if not already there
if 'asyncio' not in self.namespace:
self.namespace['asyncio'] = asyncio
# Store the current code in namespace for done() validation
self.namespace['_current_cell_code'] = code
# Store consecutive errors count for done() validation
self.namespace['_consecutive_errors'] = self._consecutive_errors
# Check if code contains await expressions - if so, wrap in async function
# This mimics how Jupyter/IPython handles top-level await
try:
tree = ast.parse(code, mode='exec')
has_await = any(isinstance(node, (ast.Await, ast.AsyncWith, ast.AsyncFor)) for node in ast.walk(tree))
except SyntaxError:
# If parse fails, let exec handle the error
has_await = False
if has_await:
# When code has await, we must wrap in async function
# To make variables persist naturally (like Jupyter without needing 'global'):
# 1. Extract all assigned variable names from the code
# 2. Inject 'global' declarations for variables that already exist in namespace
# 3. Extract user's explicit global declarations and pre-define those vars
# 4. Return locals() so we can update namespace with new variables
# Find all variable names being assigned + user's explicit globals
try:
assigned_names = set()
user_global_names = set()
for node in ast.walk(tree):
if isinstance(node, ast.Assign):
for target in node.targets:
if isinstance(target, ast.Name):
assigned_names.add(target.id)
elif isinstance(node, ast.AugAssign) and isinstance(node.target, ast.Name):
assigned_names.add(node.target.id)
elif isinstance(node, (ast.AnnAssign, ast.NamedExpr)):
if hasattr(node, 'target') and isinstance(node.target, ast.Name):
assigned_names.add(node.target.id)
elif isinstance(node, ast.Global):
# Track user's explicit global declarations
user_global_names.update(node.names)
# Pre-define any user-declared globals that don't exist yet
# This prevents NameError when user writes "global foo" before "foo = ..."
for name in user_global_names:
if name not in self.namespace:
self.namespace[name] = None
# Filter to only existing namespace vars (like Jupyter does)
# Include both: assigned vars that exist + user's explicit globals
existing_vars = {name for name in (assigned_names | user_global_names) if name in self.namespace}
except Exception as e:
existing_vars = set()
# Build global declaration if needed
global_decl = ''
has_global_decl = False
if existing_vars:
vars_str = ', '.join(sorted(existing_vars))
global_decl = f' global {vars_str}\n'
has_global_decl = True
indented_code = '\n'.join(' ' + line if line.strip() else line for line in code.split('\n'))
wrapped_code = f"""async def __code_exec__():
{global_decl}{indented_code}
# Return locals so we can update the namespace
return locals()
__code_exec_coro__ = __code_exec__()
"""
# Store whether we added a global declaration (needed for error line mapping)
self.namespace['_has_global_decl'] = has_global_decl
# Compile and execute wrapper at module level
compiled_code = compile(wrapped_code, '<code>', 'exec')
exec(compiled_code, self.namespace, self.namespace)
# Get and await the coroutine, then update namespace with new/modified variables
coro = self.namespace.get('__code_exec_coro__')
if coro:
result_locals = await coro
# Update namespace with all variables from the function's locals
# This makes variable assignments persist across cells
if result_locals:
for key, value in result_locals.items():
if not key.startswith('_'):
self.namespace[key] = value
# Variable info is tracked in "Available" section, no need for verbose inline output
# Clean up temporary variables
self.namespace.pop('__code_exec_coro__', None)
self.namespace.pop('__code_exec__', None)
else:
# No await - execute directly at module level for natural variable scoping
# This means x = x + 10 will work without needing 'global x'
# Track variables before execution
vars_before = set(self.namespace.keys())
compiled_code = compile(code, '<code>', 'exec')
exec(compiled_code, self.namespace, self.namespace)
# Track newly created/modified variables (info shown in "Available" section)
vars_after = set(self.namespace.keys())
new_vars = vars_after - vars_before
# Get output
output_value = sys.stdout.getvalue()
if output_value:
output = output_value
finally:
sys.stdout = old_stdout
# Wait 2 seconds for page to stabilize after code execution
await asyncio.sleep(0.5)
# Note: Browser state is now fetched right before LLM call instead of after each execution
# This reduces unnecessary state fetches for operations that don't affect the browser
cell.status = ExecutionStatus.SUCCESS
cell.output = output
cell.browser_state = None # Will be captured in next iteration before LLM call
except Exception as e:
# Handle EvaluateError specially - JavaScript execution failed
if isinstance(e, EvaluateError):
error = str(e)
cell.status = ExecutionStatus.ERROR
cell.error = error
logger.error(f'Code execution error: {error}')
await asyncio.sleep(1)
# Browser state will be fetched before next LLM call
# Return immediately - do not continue executing code
return output, error, None
# Handle NameError specially - check for code block variable confusion
if isinstance(e, NameError):
error_msg = str(e)
cell.status = ExecutionStatus.ERROR
cell.error = error
# Browser state will be fetched before next LLM call
await asyncio.sleep(0.5)
return output, error, None
# For syntax errors and common parsing errors, show just the error message
# without the full traceback to keep output clean
if isinstance(e, SyntaxError):
error_msg = e.msg if e.msg else str(e)
error = f'{type(e).__name__}: {error_msg}'
# Detect common f-string issues with JSON/JavaScript code
if 'unterminated' in error_msg.lower() and 'string' in error_msg.lower() and code:
# Check if code contains f-strings with potential JSON/JS content
has_fstring = bool(re.search(r'\bf["\']', code))
has_json_pattern = bool(re.search(r'json\.dumps|"[^"]*\{[^"]*\}[^"]*"|\'[^\']*\{[^\']*\}[^\']*\'', code))
has_js_pattern = bool(re.search(r'evaluate\(|await evaluate', code))
if has_fstring and (has_json_pattern or has_js_pattern):
error += (
'\n\n💡 TIP: Detected f-string with JSON/JavaScript code containing {}.\n'
' Use separate ```js or ```markdown blocks instead of f-strings to avoid escaping issues.\n'
' If your code block needs ``` inside it, wrap with 4+ backticks: ````markdown code`\n'
)
# Detect and provide helpful hints for common string literal errors
if 'unterminated' in error_msg.lower() and 'string' in error_msg.lower():
# Detect what type of string literal is unterminated
is_triple = 'triple-quoted' in error_msg.lower()
msg_lower = error_msg.lower()
# Detect prefix type from error message
if 'f-string' in msg_lower and 'raw' in msg_lower:
prefix = 'rf or fr'
desc = 'raw f-string'
elif 'f-string' in msg_lower:
prefix = 'f'
desc = 'f-string'
elif 'raw' in msg_lower and 'bytes' in msg_lower:
prefix = 'rb or br'
desc = 'raw bytes'
elif 'raw' in msg_lower:
prefix = 'r'
desc = 'raw string'
elif 'bytes' in msg_lower:
prefix = 'b'
desc = 'bytes'
else:
prefix = ''
desc = 'string'
# Build hint based on triple-quoted vs single/double quoted
if is_triple:
if prefix:
hint = f"Hint: Unterminated {prefix}'''...''' or {prefix}\"\"\"...\"\" ({desc}). Check for missing closing quotes or unescaped quotes inside."
else:
hint = "Hint: Unterminated '''...''' or \"\"\"...\"\" detected. Check for missing closing quotes or unescaped quotes inside."
hint += '\n If you need ``` inside your string, use a ````markdown varname` code block with 4+ backticks instead.'
else:
if prefix:
hint = f'Hint: Unterminated {prefix}\'...\' or {prefix}"..." ({desc}). Check for missing closing quote or unescaped quotes inside.'
else:
hint = 'Hint: Unterminated \'...\' or "..." detected. Check for missing closing quote or unescaped quotes inside the string.'
error += f'\n{hint}'
# Show the problematic line from the code
if e.text:
error += f'\n{e.text}'
elif e.lineno and code:
# If e.text is empty, extract the line from the code
lines = code.split('\n')
if 0 < e.lineno <= len(lines):
error += f'\n{lines[e.lineno - 1]}'
else:
# For other errors, try to extract useful information
error_str = str(e)
error = f'{type(e).__name__}: {error_str}' if error_str else f'{type(e).__name__} occurred'
# For RuntimeError or other exceptions, try to extract traceback info
# to show which line in the user's code actually failed
if hasattr(e, '__traceback__'):
# Walk the traceback to find the frame with '<code>' filename
tb = e.__traceback__
user_code_lineno = None
while tb is not None:
frame = tb.tb_frame
if frame.f_code.co_filename == '<code>':
# Found the frame executing user code
# Get the line number from the traceback
user_code_lineno = tb.tb_lineno
break
tb = tb.tb_next
cell.status = ExecutionStatus.ERROR
cell.error = error
logger.error(f'Code execution error: {error}')
await asyncio.sleep(1)
# Browser state will be fetched before next LLM call
return output, error, None
async def _get_browser_state(self) -> tuple[str, str | None]:
"""Get the current browser state as text with ultra-minimal DOM structure for code agents.
Returns:
Tuple of (browser_state_text, screenshot_base64)
"""
if not self.browser_session or not self.dom_service:
return 'Browser state not available', None
try:
# Get full browser state including screenshot if use_vision is enabled
include_screenshot = True
state = await self.browser_session.get_browser_state_summary(include_screenshot=include_screenshot)
# Format browser state with namespace context
browser_state_text = await format_browser_state_for_llm(
state=state, namespace=self.namespace, browser_session=self.browser_session
)
screenshot = state.screenshot if include_screenshot else None
return browser_state_text, screenshot
except Exception as e:
logger.error(f'Failed to get browser state: {e}')
return f'Error getting browser state: {e}', None
def _format_execution_result(self, code: str, output: str | None, error: str | None, current_step: int | None = None) -> str:
"""Format the execution result for the LLM (without browser state)."""
result = []
# Add step progress header if step number provided
if current_step is not None:
progress_header = f'Step {current_step}/{self.max_steps} executed'
# Add consecutive failure tracking if there are errors
if error and self._consecutive_errors > 0:
progress_header += f' | Consecutive failures: {self._consecutive_errors}/{self.max_failures}'
result.append(progress_header)
if error:
result.append(f'Error: {error}')
if output:
# Truncate output if too long
if len(output) > 10000:
output = output[:9950] + '\n[Truncated after 10000 characters]'
result.append(f'Output: {output}')
if len(result) == 0:
result.append('Executed')
return '\n'.join(result)
def _is_task_done(self) -> bool:
"""Check if the task is marked as done in the namespace."""
# Check if 'done' was called by looking for a special marker in namespace
return self.namespace.get('_task_done', False)
async def _capture_screenshot(self, step_number: int) -> str | None:
"""Capture and store screenshot for eval tracking."""
if not self.browser_session:
return None
try:
# Get browser state summary which includes screenshot
state = await self.browser_session.get_browser_state_summary(include_screenshot=True)
if state and state.screenshot:
# Store screenshot using screenshot service
screenshot_path = await self.screenshot_service.store_screenshot(state.screenshot, step_number)
return str(screenshot_path) if screenshot_path else None
except Exception as e:
logger.warning(f'Failed to capture screenshot for step {step_number}: {e}')
return None
async def _add_step_to_complete_history(
self,
model_output_code: str,
full_llm_response: str,
output: str | None,
error: str | None,
screenshot_path: str | None,
) -> None:
"""Add a step to complete_history using type-safe models."""
# Get current browser URL and title for state
url: str | None = None
title: str | None = None
if self.browser_session:
try:
url = await self.browser_session.get_current_page_url()
# Get title from browser
cdp_session = await self.browser_session.get_or_create_cdp_session()
result = await cdp_session.cdp_client.send.Runtime.evaluate(
params={'expression': 'document.title', 'returnByValue': True},
session_id=cdp_session.session_id,
)
title = result.get('result', {}).get('value')
except Exception as e:
logger.debug(f'Failed to get browser URL/title for history: {e}')
# Check if this is a done result
is_done = self._is_task_done()
# Get self-reported success from done() call if task is done
self_reported_success: bool | None = None
if is_done:
task_success = self.namespace.get('_task_success')
self_reported_success = task_success if isinstance(task_success, bool) else None
# Create result entry using typed model
result_entry = CodeAgentResult(
extracted_content=output if output else None,
error=error if error else None,
is_done=is_done,
success=self_reported_success,
)
# Create state entry using typed model
state_entry = CodeAgentState(url=url, title=title, screenshot_path=screenshot_path)
# Create metadata entry using typed model
step_end_time = datetime.datetime.now().timestamp()
metadata_entry = CodeAgentStepMetadata(
input_tokens=self._last_llm_usage.prompt_tokens if self._last_llm_usage else None,
output_tokens=self._last_llm_usage.completion_tokens if self._last_llm_usage else None,
step_start_time=self._step_start_time,
step_end_time=step_end_time,
)
# Create model output entry using typed model (if there's code to track)
model_output_entry: CodeAgentModelOutput | None = None
if model_output_code or full_llm_response:
model_output_entry = CodeAgentModelOutput(
model_output=model_output_code if model_output_code else '',
full_response=full_llm_response if full_llm_response else '',
)
# Create history entry using typed model
history_entry = CodeAgentHistory(
model_output=model_output_entry,
result=[result_entry],
state=state_entry,
metadata=metadata_entry,
screenshot_path=screenshot_path, # Keep for backward compatibility
)
self.complete_history.append(history_entry)
await self._demo_mode_log_step(history_entry)
async def _demo_mode_log(self, message: str, level: str = 'info', metadata: dict[str, Any] | None = None) -> None:
if not (self._demo_mode_enabled and message and self.browser_session):
return
try:
await self.browser_session.send_demo_mode_log(
message=message,
level=level,
metadata=metadata or {},
)
except Exception as exc:
logger.debug(f'[DemoMode] Failed to send log: {exc}')
async def _demo_mode_log_step(self, history_entry: CodeAgentHistory) -> None:
if not self._demo_mode_enabled:
return
step_number = len(self.complete_history)
result = history_entry.result[0] if history_entry.result else None
if not result:
return
level = 'error' if result.error else 'success' if result.success else 'info'
message_parts = [f'Step {step_number}:']
if result.error:
message_parts.append(f'Error: {result.error}')
if result.extracted_content:
message_parts.append(result.extracted_content)
elif result.success:
message_parts.append('Marked done.')
else:
message_parts.append('Executed.')
await self._demo_mode_log(
' '.join(message_parts).strip(),
level,
{'step': step_number, 'url': history_entry.state.url if history_entry.state else None},
)
def _add_sample_output_cell(self, final_result: Any | None) -> None:
if self._sample_output_added or final_result is None:
return
sample_content: str | None = None
def _extract_sample(data: Any) -> Any | None:
if isinstance(data, list) and data:
return data[0]
if isinstance(data, dict) and data:
first_key = next(iter(data))
return {first_key: data[first_key]}
return data if isinstance(data, (str, int, float, bool)) else None
data: Any | None = None
if isinstance(final_result, str):
try:
data = json.loads(final_result)
except Exception:
sample_content = final_result.strip()
elif isinstance(final_result, (list, dict)):
data = final_result
if data is not None:
sample = _extract_sample(data)
if isinstance(sample, (dict, list)):
try:
sample_content = json.dumps(sample, indent=2, ensure_ascii=False)
except Exception:
sample_content = str(sample)
elif sample is not None:
sample_content = str(sample)
if not sample_content:
return
sample_cell = self.session.add_cell(source='# Sample output preview')
sample_cell.cell_type = CellType.MARKDOWN
sample_cell.status = ExecutionStatus.SUCCESS
sample_cell.execution_count = None
escaped = html.escape(sample_content)
sample_cell.output = f'<pre>{escaped}</pre>'
self._sample_output_added = True
def _log_agent_event(self, max_steps: int, agent_run_error: str | None = None) -> None:
"""Send the agent event for this run to telemetry."""
from urllib.parse import urlparse
token_summary = self.token_cost_service.get_usage_tokens_for_model(self.llm.model)
# For CodeAgent, we don't have action history like Agent does
# Instead we track the code execution cells
action_history_data: list[list[dict[str, Any]] | None] = []
for step in self.complete_history:
# Extract code from model_output if available (type-safe access)
if step.model_output and step.model_output.full_response:
code = step.model_output.full_response
# Represent each code cell as a simple action entry
action_history_data.append([{'llm_response': code}])
else:
action_history_data.append(None)
# Get final result from the last step or namespace (type-safe)
final_result: Any = self.namespace.get('_task_result')
final_result_str: str | None = final_result if isinstance(final_result, str) else None
# Get URLs visited from complete_history (type-safe access)
urls_visited: list[str] = []
for step in self.complete_history:
if step.state.url and step.state.url not in urls_visited:
urls_visited.append(step.state.url)
# Get errors from complete_history (type-safe access)
errors: list[str] = []
for step in self.complete_history:
for result in step.result:
if result.error:
errors.append(result.error)
# Determine success from task completion status (type-safe)
is_done = self._is_task_done()
task_success: Any = self.namespace.get('_task_success')
self_reported_success: bool | None = task_success if isinstance(task_success, bool) else (False if is_done else None)
self.telemetry.capture(
AgentTelemetryEvent(
task=self.task,
model=self.llm.model,
model_provider=self.llm.provider,
max_steps=max_steps,
max_actions_per_step=1, # CodeAgent executes one code cell per step
use_vision=self.use_vision,
version=self.version,
source=self.source,
cdp_url=urlparse(self.browser_session.cdp_url).hostname
if self.browser_session and self.browser_session.cdp_url
else None,
agent_type='code', # CodeAgent identifier
action_errors=errors,
action_history=action_history_data,
urls_visited=urls_visited,
steps=len(self.complete_history),
total_input_tokens=token_summary.prompt_tokens,
total_output_tokens=token_summary.completion_tokens,
prompt_cached_tokens=token_summary.prompt_cached_tokens,
total_tokens=token_summary.total_tokens,
total_duration_seconds=sum(step.metadata.duration_seconds for step in self.complete_history if step.metadata),
success=self_reported_success,
final_result_response=final_result_str,
error_message=agent_run_error,
)
)
def screenshot_paths(self, n_last: int | None = None) -> list[str | None]:
"""
Get screenshot paths from complete_history for eval system.
Args:
n_last: Optional number of last screenshots to return
Returns:
List of screenshot file paths (or None for missing screenshots)
"""
paths = [step.screenshot_path for step in self.complete_history]
if n_last is not None:
return paths[-n_last:] if len(paths) > n_last else paths
return paths
@property
def message_manager(self) -> Any:
"""
Compatibility property for eval system.
Returns a mock object with last_input_messages attribute.
"""
class MockMessageManager:
def __init__(self, llm_messages: list[BaseMessage]) -> None:
# Convert code-use LLM messages to format expected by eval system
self.last_input_messages = llm_messages
return MockMessageManager(self._llm_messages)
@property
def history(self) -> CodeAgentHistoryList:
"""
Compatibility property for eval system.
Returns a CodeAgentHistoryList object with history attribute containing complete_history.
This is what the eval system expects when it does: agent_history = agent.history
"""
return CodeAgentHistoryList(self.complete_history, self.usage_summary)
async def close(self) -> None:
"""Close the browser session."""
if self.browser_session:
# Check if we should close the browser based on keep_alive setting
if not self.browser_session.browser_profile.keep_alive:
await self.browser_session.kill()
else:
logger.debug('Browser keep_alive is True, not closing browser session')
async def __aenter__(self) -> 'CodeAgent':
"""Async context manager entry."""
return self
async def __aexit__(self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: Any) -> None:
"""Async context manager exit."""
await self.close()
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/code_use/service.py",
"license": "MIT License",
"lines": 1232,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/code_use/views.py | """Data models for code-use mode."""
from __future__ import annotations
import json
from enum import Enum
from pathlib import Path
from typing import Any
from pydantic import BaseModel, ConfigDict, Field, PrivateAttr
from uuid_extensions import uuid7str
from browser_use.tokens.views import UsageSummary
class CellType(str, Enum):
"""Type of notebook cell."""
CODE = 'code'
MARKDOWN = 'markdown'
class ExecutionStatus(str, Enum):
"""Execution status of a cell."""
PENDING = 'pending'
RUNNING = 'running'
SUCCESS = 'success'
ERROR = 'error'
class CodeCell(BaseModel):
"""Represents a code cell in the notebook-like execution."""
model_config = ConfigDict(extra='forbid')
id: str = Field(default_factory=uuid7str)
cell_type: CellType = CellType.CODE
source: str = Field(description='The code to execute')
output: str | None = Field(default=None, description='The output of the code execution')
execution_count: int | None = Field(default=None, description='The execution count')
status: ExecutionStatus = Field(default=ExecutionStatus.PENDING)
error: str | None = Field(default=None, description='Error message if execution failed')
browser_state: str | None = Field(default=None, description='Browser state after execution')
class NotebookSession(BaseModel):
"""Represents a notebook-like session."""
model_config = ConfigDict(extra='forbid')
id: str = Field(default_factory=uuid7str)
cells: list[CodeCell] = Field(default_factory=list)
current_execution_count: int = Field(default=0)
namespace: dict[str, Any] = Field(default_factory=dict, description='Current namespace state')
_complete_history: list[CodeAgentHistory] = PrivateAttr(default_factory=list)
_usage_summary: UsageSummary | None = PrivateAttr(default=None)
def add_cell(self, source: str) -> CodeCell:
"""Add a new code cell to the session."""
cell = CodeCell(source=source)
self.cells.append(cell)
return cell
def get_cell(self, cell_id: str) -> CodeCell | None:
"""Get a cell by ID."""
for cell in self.cells:
if cell.id == cell_id:
return cell
return None
def get_latest_cell(self) -> CodeCell | None:
"""Get the most recently added cell."""
if self.cells:
return self.cells[-1]
return None
def increment_execution_count(self) -> int:
"""Increment and return the execution count."""
self.current_execution_count += 1
return self.current_execution_count
@property
def history(self) -> CodeAgentHistoryList:
"""Get the history as an AgentHistoryList-compatible object."""
return CodeAgentHistoryList(self._complete_history, self._usage_summary)
class NotebookExport(BaseModel):
"""Export format for Jupyter notebook."""
model_config = ConfigDict(extra='forbid')
nbformat: int = Field(default=4)
nbformat_minor: int = Field(default=5)
metadata: dict[str, Any] = Field(default_factory=dict)
cells: list[dict[str, Any]] = Field(default_factory=list)
class CodeAgentModelOutput(BaseModel):
"""Model output for CodeAgent - contains the code and full LLM response."""
model_config = ConfigDict(extra='forbid')
model_output: str = Field(description='The extracted code from the LLM response')
full_response: str = Field(description='The complete LLM response including any text/reasoning')
class CodeAgentResult(BaseModel):
"""Result of executing a code cell in CodeAgent."""
model_config = ConfigDict(extra='forbid')
extracted_content: str | None = Field(default=None, description='Output from code execution')
error: str | None = Field(default=None, description='Error message if execution failed')
is_done: bool = Field(default=False, description='Whether task is marked as done')
success: bool | None = Field(default=None, description='Self-reported success from done() call')
class CodeAgentState(BaseModel):
"""State information for a CodeAgent step."""
model_config = ConfigDict(extra='forbid', arbitrary_types_allowed=True)
url: str | None = Field(default=None, description='Current page URL')
title: str | None = Field(default=None, description='Current page title')
screenshot_path: str | None = Field(default=None, description='Path to screenshot file')
def get_screenshot(self) -> str | None:
"""Load screenshot from disk and return as base64 string."""
if not self.screenshot_path:
return None
import base64
from pathlib import Path
path_obj = Path(self.screenshot_path)
if not path_obj.exists():
return None
try:
with open(path_obj, 'rb') as f:
screenshot_data = f.read()
return base64.b64encode(screenshot_data).decode('utf-8')
except Exception:
return None
class CodeAgentStepMetadata(BaseModel):
"""Metadata for a single CodeAgent step including timing and token information."""
model_config = ConfigDict(extra='forbid')
input_tokens: int | None = Field(default=None, description='Number of input tokens used')
output_tokens: int | None = Field(default=None, description='Number of output tokens used')
step_start_time: float = Field(description='Step start timestamp (Unix time)')
step_end_time: float = Field(description='Step end timestamp (Unix time)')
@property
def duration_seconds(self) -> float:
"""Calculate step duration in seconds."""
return self.step_end_time - self.step_start_time
class CodeAgentHistory(BaseModel):
"""History item for CodeAgent actions."""
model_config = ConfigDict(extra='forbid', arbitrary_types_allowed=True)
model_output: CodeAgentModelOutput | None = Field(default=None, description='LLM output for this step')
result: list[CodeAgentResult] = Field(default_factory=list, description='Results from code execution')
state: CodeAgentState = Field(description='Browser state at this step')
metadata: CodeAgentStepMetadata | None = Field(default=None, description='Step timing and token metadata')
screenshot_path: str | None = Field(default=None, description='Legacy field for screenshot path')
def model_dump(self, **kwargs) -> dict[str, Any]:
"""Custom serialization for CodeAgentHistory."""
return {
'model_output': self.model_output.model_dump() if self.model_output else None,
'result': [r.model_dump() for r in self.result],
'state': self.state.model_dump(),
'metadata': self.metadata.model_dump() if self.metadata else None,
'screenshot_path': self.screenshot_path,
}
class CodeAgentHistoryList:
"""Compatibility wrapper for CodeAgentHistory that provides AgentHistoryList-like API."""
def __init__(self, complete_history: list[CodeAgentHistory], usage_summary: UsageSummary | None) -> None:
"""Initialize with CodeAgent history data."""
self._complete_history = complete_history
self._usage_summary = usage_summary
@property
def history(self) -> list[CodeAgentHistory]:
"""Get the raw history list."""
return self._complete_history
@property
def usage(self) -> UsageSummary | None:
"""Get the usage summary."""
return self._usage_summary
def __len__(self) -> int:
"""Return the number of history items."""
return len(self._complete_history)
def __str__(self) -> str:
"""Representation of the CodeAgentHistoryList object."""
return f'CodeAgentHistoryList(steps={len(self._complete_history)}, action_results={len(self.action_results())})'
def __repr__(self) -> str:
"""Representation of the CodeAgentHistoryList object."""
return self.__str__()
def final_result(self) -> None | str:
"""Final result from history."""
if self._complete_history and self._complete_history[-1].result:
return self._complete_history[-1].result[-1].extracted_content
return None
def is_done(self) -> bool:
"""Check if the agent is done."""
if self._complete_history and len(self._complete_history[-1].result) > 0:
last_result = self._complete_history[-1].result[-1]
return last_result.is_done is True
return False
def is_successful(self) -> bool | None:
"""Check if the agent completed successfully."""
if self._complete_history and len(self._complete_history[-1].result) > 0:
last_result = self._complete_history[-1].result[-1]
if last_result.is_done is True:
return last_result.success
return None
def errors(self) -> list[str | None]:
"""Get all errors from history, with None for steps without errors."""
errors = []
for h in self._complete_history:
step_errors = [r.error for r in h.result if r.error]
# each step can have only one error
errors.append(step_errors[0] if step_errors else None)
return errors
def has_errors(self) -> bool:
"""Check if the agent has any non-None errors."""
return any(error is not None for error in self.errors())
def urls(self) -> list[str | None]:
"""Get all URLs from history."""
return [h.state.url if h.state.url is not None else None for h in self._complete_history]
def screenshot_paths(self, n_last: int | None = None, return_none_if_not_screenshot: bool = True) -> list[str | None]:
"""Get all screenshot paths from history."""
if n_last == 0:
return []
if n_last is None:
if return_none_if_not_screenshot:
return [h.state.screenshot_path if h.state.screenshot_path is not None else None for h in self._complete_history]
else:
return [h.state.screenshot_path for h in self._complete_history if h.state.screenshot_path is not None]
else:
if return_none_if_not_screenshot:
return [
h.state.screenshot_path if h.state.screenshot_path is not None else None
for h in self._complete_history[-n_last:]
]
else:
return [h.state.screenshot_path for h in self._complete_history[-n_last:] if h.state.screenshot_path is not None]
def screenshots(self, n_last: int | None = None, return_none_if_not_screenshot: bool = True) -> list[str | None]:
"""Get all screenshots from history as base64 strings."""
if n_last == 0:
return []
history_items = self._complete_history if n_last is None else self._complete_history[-n_last:]
screenshots = []
for item in history_items:
screenshot_b64 = item.state.get_screenshot()
if screenshot_b64:
screenshots.append(screenshot_b64)
else:
if return_none_if_not_screenshot:
screenshots.append(None)
return screenshots
def action_results(self) -> list[CodeAgentResult]:
"""Get all results from history."""
results = []
for h in self._complete_history:
results.extend([r for r in h.result if r])
return results
def extracted_content(self) -> list[str]:
"""Get all extracted content from history."""
content = []
for h in self._complete_history:
content.extend([r.extracted_content for r in h.result if r.extracted_content])
return content
def number_of_steps(self) -> int:
"""Get the number of steps in the history."""
return len(self._complete_history)
def total_duration_seconds(self) -> float:
"""Get total duration of all steps in seconds."""
total = 0.0
for h in self._complete_history:
if h.metadata:
total += h.metadata.duration_seconds
return total
def last_action(self) -> None | dict:
"""Last action in history - returns the last code execution."""
if self._complete_history and self._complete_history[-1].model_output:
return {
'execute_code': {
'code': self._complete_history[-1].model_output.model_output,
'full_response': self._complete_history[-1].model_output.full_response,
}
}
return None
def action_names(self) -> list[str]:
"""Get all action names from history - returns 'execute_code' for each code execution."""
action_names = []
for action in self.model_actions():
actions = list(action.keys())
if actions:
action_names.append(actions[0])
return action_names
def model_thoughts(self) -> list[Any]:
"""Get all thoughts from history - returns model_output for CodeAgent."""
return [h.model_output for h in self._complete_history if h.model_output]
def model_outputs(self) -> list[CodeAgentModelOutput]:
"""Get all model outputs from history."""
return [h.model_output for h in self._complete_history if h.model_output]
def model_actions(self) -> list[dict]:
"""Get all actions from history - returns code execution actions with their code."""
actions = []
for h in self._complete_history:
if h.model_output:
# Create one action dict per result (code execution)
for _ in h.result:
action_dict = {
'execute_code': {
'code': h.model_output.model_output,
'full_response': h.model_output.full_response,
}
}
actions.append(action_dict)
return actions
def action_history(self) -> list[list[dict]]:
"""Get truncated action history grouped by step."""
step_outputs = []
for h in self._complete_history:
step_actions = []
if h.model_output:
for result in h.result:
action_dict = {
'execute_code': {
'code': h.model_output.model_output,
},
'result': {
'extracted_content': result.extracted_content,
'is_done': result.is_done,
'success': result.success,
'error': result.error,
},
}
step_actions.append(action_dict)
step_outputs.append(step_actions)
return step_outputs
def model_actions_filtered(self, include: list[str] | None = None) -> list[dict]:
"""Get all model actions from history filtered - returns empty for CodeAgent."""
return []
def add_item(self, history_item: CodeAgentHistory) -> None:
"""Add a history item to the list."""
self._complete_history.append(history_item)
def model_dump(self, **kwargs) -> dict[str, Any]:
"""Custom serialization for CodeAgentHistoryList."""
return {
'history': [h.model_dump(**kwargs) for h in self._complete_history],
'usage': self._usage_summary.model_dump() if self._usage_summary else None,
}
def save_to_file(self, filepath: str | Path, sensitive_data: dict[str, str | dict[str, str]] | None = None) -> None:
"""Save history to JSON file."""
try:
Path(filepath).parent.mkdir(parents=True, exist_ok=True)
data = self.model_dump()
with open(filepath, 'w', encoding='utf-8') as f:
json.dump(data, f, indent=2, ensure_ascii=False)
except Exception as e:
raise e
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/code_use/views.py",
"license": "MIT License",
"lines": 320,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/dom/markdown_extractor.py | """
Shared markdown extraction utilities for browser content processing.
This module provides a unified interface for extracting clean markdown from browser content,
used by both the tools service and page actor.
"""
import re
from dataclasses import dataclass
from enum import Enum, auto
from typing import TYPE_CHECKING, Any
from browser_use.dom.serializer.html_serializer import HTMLSerializer
from browser_use.dom.service import DomService
from browser_use.dom.views import MarkdownChunk
if TYPE_CHECKING:
from browser_use.browser.session import BrowserSession
from browser_use.browser.watchdogs.dom_watchdog import DOMWatchdog
async def extract_clean_markdown(
browser_session: 'BrowserSession | None' = None,
dom_service: DomService | None = None,
target_id: str | None = None,
extract_links: bool = False,
) -> tuple[str, dict[str, Any]]:
"""Extract clean markdown from browser content using enhanced DOM tree.
This unified function can extract markdown using either a browser session (for tools service)
or a DOM service with target ID (for page actor).
Args:
browser_session: Browser session to extract content from (tools service path)
dom_service: DOM service instance (page actor path)
target_id: Target ID for the page (required when using dom_service)
extract_links: Whether to preserve links in markdown
Returns:
tuple: (clean_markdown_content, content_statistics)
Raises:
ValueError: If neither browser_session nor (dom_service + target_id) are provided
"""
# Validate input parameters
if browser_session is not None:
if dom_service is not None or target_id is not None:
raise ValueError('Cannot specify both browser_session and dom_service/target_id')
# Browser session path (tools service)
enhanced_dom_tree = await _get_enhanced_dom_tree_from_browser_session(browser_session)
current_url = await browser_session.get_current_page_url()
method = 'enhanced_dom_tree'
elif dom_service is not None and target_id is not None:
# DOM service path (page actor)
# Lazy fetch all_frames inside get_dom_tree if needed (for cross-origin iframes)
enhanced_dom_tree, _ = await dom_service.get_dom_tree(target_id=target_id, all_frames=None)
current_url = None # Not available via DOM service
method = 'dom_service'
else:
raise ValueError('Must provide either browser_session or both dom_service and target_id')
# Use the HTML serializer with the enhanced DOM tree
html_serializer = HTMLSerializer(extract_links=extract_links)
page_html = html_serializer.serialize(enhanced_dom_tree)
original_html_length = len(page_html)
# Use markdownify for clean markdown conversion
from markdownify import markdownify as md
content = md(
page_html,
heading_style='ATX', # Use # style headings
strip=['script', 'style'], # Remove these tags
bullets='-', # Use - for unordered lists
code_language='', # Don't add language to code blocks
escape_asterisks=False, # Don't escape asterisks (cleaner output)
escape_underscores=False, # Don't escape underscores (cleaner output)
escape_misc=False, # Don't escape other characters (cleaner output)
autolinks=False, # Don't convert URLs to <> format
default_title=False, # Don't add default title attributes
keep_inline_images_in=[], # Don't keep inline images in any tags (we already filter base64 in HTML)
)
initial_markdown_length = len(content)
# Minimal cleanup - markdownify already does most of the work
content = re.sub(r'%[0-9A-Fa-f]{2}', '', content) # Remove any remaining URL encoding
# Apply light preprocessing to clean up excessive whitespace
content, chars_filtered = _preprocess_markdown_content(content)
final_filtered_length = len(content)
# Content statistics
stats = {
'method': method,
'original_html_chars': original_html_length,
'initial_markdown_chars': initial_markdown_length,
'filtered_chars_removed': chars_filtered,
'final_filtered_chars': final_filtered_length,
}
# Add URL to stats if available
if current_url:
stats['url'] = current_url
return content, stats
async def _get_enhanced_dom_tree_from_browser_session(browser_session: 'BrowserSession'):
"""Get enhanced DOM tree from browser session via DOMWatchdog."""
# Get the enhanced DOM tree from DOMWatchdog
# This captures the current state of the page including dynamic content, shadow roots, etc.
dom_watchdog: DOMWatchdog | None = browser_session._dom_watchdog
assert dom_watchdog is not None, 'DOMWatchdog not available'
# Use cached enhanced DOM tree if available, otherwise build it
if dom_watchdog.enhanced_dom_tree is not None:
return dom_watchdog.enhanced_dom_tree
# Build the enhanced DOM tree if not cached
await dom_watchdog._build_dom_tree_without_highlights()
enhanced_dom_tree = dom_watchdog.enhanced_dom_tree
assert enhanced_dom_tree is not None, 'Enhanced DOM tree not available'
return enhanced_dom_tree
# Legacy aliases removed - all code now uses the unified extract_clean_markdown function
def _preprocess_markdown_content(content: str, max_newlines: int = 3) -> tuple[str, int]:
"""
Light preprocessing of markdown output - minimal cleanup with JSON blob removal.
Args:
content: Markdown content to lightly filter
max_newlines: Maximum consecutive newlines to allow
Returns:
tuple: (filtered_content, chars_filtered)
"""
original_length = len(content)
# Remove JSON blobs (common in SPAs like LinkedIn, Facebook, etc.)
# These are often embedded as `{"key":"value",...}` and can be massive
# Match JSON objects/arrays that are at least 100 chars long
# This catches SPA state/config data without removing small inline JSON
content = re.sub(r'`\{["\w].*?\}`', '', content, flags=re.DOTALL) # Remove JSON in code blocks
content = re.sub(r'\{"\$type":[^}]{100,}\}', '', content) # Remove JSON with $type fields (common pattern)
content = re.sub(r'\{"[^"]{5,}":\{[^}]{100,}\}', '', content) # Remove nested JSON objects
# Compress consecutive newlines (4+ newlines become max_newlines)
content = re.sub(r'\n{4,}', '\n' * max_newlines, content)
# Remove lines that are only whitespace
lines = content.split('\n')
filtered_lines = []
for line in lines:
stripped = line.strip()
# Keep all non-empty lines
if stripped:
# Skip lines that look like JSON (start with { or [ and are very long)
if (stripped.startswith('{') or stripped.startswith('[')) and len(stripped) > 100:
continue
filtered_lines.append(line)
content = '\n'.join(filtered_lines)
content = content.strip()
chars_filtered = original_length - len(content)
return content, chars_filtered
# ---------------------------------------------------------------------------
# Structure-aware markdown chunking
# ---------------------------------------------------------------------------
class _BlockType(Enum):
HEADER = auto()
CODE_FENCE = auto()
TABLE = auto()
LIST_ITEM = auto()
PARAGRAPH = auto()
BLANK = auto()
@dataclass(slots=True)
class _AtomicBlock:
block_type: _BlockType
lines: list[str]
char_start: int # offset in original content
char_end: int # offset in original content (exclusive)
_TABLE_ROW_RE = re.compile(r'^\s*\|.*\|\s*$')
_LIST_ITEM_RE = re.compile(r'^(\s*)([-*+]|\d+[.)]) ')
_LIST_CONTINUATION_RE = re.compile(r'^(\s{2,}|\t)')
def _parse_atomic_blocks(content: str) -> list[_AtomicBlock]:
"""Phase 1: Walk lines, group into unsplittable blocks."""
lines = content.split('\n')
blocks: list[_AtomicBlock] = []
i = 0
offset = 0 # char offset tracking
while i < len(lines):
line = lines[i]
line_len = len(line) + 1 # +1 for the newline we split on
# BLANK
if not line.strip():
blocks.append(
_AtomicBlock(
block_type=_BlockType.BLANK,
lines=[line],
char_start=offset,
char_end=offset + line_len,
)
)
offset += line_len
i += 1
continue
# CODE FENCE
if line.strip().startswith('```'):
fence_lines = [line]
fence_end = offset + line_len
i += 1
# Consume until closing fence or EOF
while i < len(lines):
fence_line = lines[i]
fence_line_len = len(fence_line) + 1
fence_lines.append(fence_line)
fence_end += fence_line_len
i += 1
if fence_line.strip().startswith('```') and len(fence_lines) > 1:
break
blocks.append(
_AtomicBlock(
block_type=_BlockType.CODE_FENCE,
lines=fence_lines,
char_start=offset,
char_end=fence_end,
)
)
offset = fence_end
continue
# HEADER
if line.lstrip().startswith('#'):
blocks.append(
_AtomicBlock(
block_type=_BlockType.HEADER,
lines=[line],
char_start=offset,
char_end=offset + line_len,
)
)
offset += line_len
i += 1
continue
# TABLE (consecutive |...| lines)
# Header + separator row stay together; each data row is its own block
if _TABLE_ROW_RE.match(line):
# Collect header line
header_lines = [line]
header_end = offset + line_len
i += 1
# Check if next line is separator (contains ---)
if i < len(lines) and _TABLE_ROW_RE.match(lines[i]) and '---' in lines[i]:
sep = lines[i]
sep_len = len(sep) + 1
header_lines.append(sep)
header_end += sep_len
i += 1
# Emit header+separator as one atomic block
blocks.append(
_AtomicBlock(
block_type=_BlockType.TABLE,
lines=header_lines,
char_start=offset,
char_end=header_end,
)
)
offset = header_end
# Each subsequent table row is its own TABLE block (splittable between rows)
while i < len(lines) and _TABLE_ROW_RE.match(lines[i]):
row = lines[i]
row_len = len(row) + 1
blocks.append(
_AtomicBlock(
block_type=_BlockType.TABLE,
lines=[row],
char_start=offset,
char_end=offset + row_len,
)
)
offset += row_len
i += 1
continue
# LIST ITEM (with indented continuations)
if _LIST_ITEM_RE.match(line):
list_lines = [line]
list_end = offset + line_len
i += 1
# Consume continuation lines (indented or blank between items)
while i < len(lines):
next_line = lines[i]
next_len = len(next_line) + 1
# Another list item at same or deeper indent → still part of this block
if _LIST_ITEM_RE.match(next_line):
list_lines.append(next_line)
list_end += next_len
i += 1
continue
# Indented continuation
if next_line.strip() and _LIST_CONTINUATION_RE.match(next_line):
list_lines.append(next_line)
list_end += next_len
i += 1
continue
break
blocks.append(
_AtomicBlock(
block_type=_BlockType.LIST_ITEM,
lines=list_lines,
char_start=offset,
char_end=list_end,
)
)
offset = list_end
continue
# PARAGRAPH (everything else, up to next blank line)
para_lines = [line]
para_end = offset + line_len
i += 1
while i < len(lines) and lines[i].strip():
# Stop if next line starts a different block type
nl = lines[i]
if nl.lstrip().startswith('#') or nl.strip().startswith('```') or _TABLE_ROW_RE.match(nl) or _LIST_ITEM_RE.match(nl):
break
nl_len = len(nl) + 1
para_lines.append(nl)
para_end += nl_len
i += 1
blocks.append(
_AtomicBlock(
block_type=_BlockType.PARAGRAPH,
lines=para_lines,
char_start=offset,
char_end=para_end,
)
)
offset = para_end
# Fix last block char_end: content may not end with \n
if blocks and content and not content.endswith('\n'):
blocks[-1] = _AtomicBlock(
block_type=blocks[-1].block_type,
lines=blocks[-1].lines,
char_start=blocks[-1].char_start,
char_end=len(content),
)
return blocks
def _block_text(block: _AtomicBlock) -> str:
return '\n'.join(block.lines)
def _get_table_header(block: _AtomicBlock) -> str | None:
"""Extract table header + separator rows from a TABLE block."""
assert block.block_type == _BlockType.TABLE
if len(block.lines) < 2:
return None
# Header is first line, separator is second line (must contain ---)
sep_line = block.lines[1]
if '---' in sep_line or '- -' in sep_line:
return block.lines[0] + '\n' + block.lines[1]
return None
def chunk_markdown_by_structure(
content: str,
max_chunk_chars: int = 100_000,
overlap_lines: int = 5,
start_from_char: int = 0,
) -> list[MarkdownChunk]:
"""Split markdown into structure-aware chunks.
Algorithm:
Phase 1 — Parse atomic blocks (headers, code fences, tables, list items, paragraphs).
Phase 2 — Greedy chunk assembly: accumulate blocks until exceeding max_chunk_chars.
A single block exceeding the limit is allowed (soft limit).
Phase 3 — Build overlap prefixes for context carry between chunks.
Args:
content: Full markdown string.
max_chunk_chars: Target maximum chars per chunk (soft limit for single blocks).
overlap_lines: Number of trailing lines from previous chunk to prepend.
start_from_char: Return chunks starting from the chunk that contains this offset.
Returns:
List of MarkdownChunk. Empty if start_from_char is past end of content.
"""
if not content:
return [
MarkdownChunk(
content='',
chunk_index=0,
total_chunks=1,
char_offset_start=0,
char_offset_end=0,
overlap_prefix='',
has_more=False,
)
]
if start_from_char >= len(content):
return []
# Phase 1: parse atomic blocks
blocks = _parse_atomic_blocks(content)
if not blocks:
return []
# Phase 2: greedy chunk assembly with header-preferred splitting
raw_chunks: list[list[_AtomicBlock]] = []
current_chunk: list[_AtomicBlock] = []
current_size = 0
for block in blocks:
block_size = block.char_end - block.char_start
# If adding this block would exceed limit AND we already have content, emit chunk
if current_size + block_size > max_chunk_chars and current_chunk:
# Prefer splitting at a header boundary within the current chunk.
# Scan backwards for the last HEADER block; if found and it wouldn't
# create a tiny chunk (< 50% of limit), split right before it so the
# header starts the next chunk for better semantic coherence.
best_split = len(current_chunk)
for j in range(len(current_chunk) - 1, 0, -1):
if current_chunk[j].block_type == _BlockType.HEADER:
prefix_size = sum(b.char_end - b.char_start for b in current_chunk[:j])
if prefix_size >= max_chunk_chars * 0.5:
best_split = j
break
raw_chunks.append(current_chunk[:best_split])
# Carry remaining blocks (from the header onward) into the next chunk
current_chunk = current_chunk[best_split:]
current_size = sum(b.char_end - b.char_start for b in current_chunk)
current_chunk.append(block)
current_size += block_size
if current_chunk:
raw_chunks.append(current_chunk)
total_chunks = len(raw_chunks)
# Phase 3: build MarkdownChunk objects with overlap prefixes
chunks: list[MarkdownChunk] = []
# Track table header from previous chunk for table continuations
prev_chunk_last_table_header: str | None = None
for idx, chunk_blocks in enumerate(raw_chunks):
chunk_text = '\n'.join(_block_text(b) for b in chunk_blocks)
char_start = chunk_blocks[0].char_start
char_end = chunk_blocks[-1].char_end
# Build overlap prefix
overlap = ''
if idx > 0:
prev_blocks = raw_chunks[idx - 1]
prev_text = '\n'.join(_block_text(b) for b in prev_blocks)
prev_lines = prev_text.split('\n')
# Check if current chunk starts with a table continuation
first_block = chunk_blocks[0]
if first_block.block_type == _BlockType.TABLE and prev_chunk_last_table_header:
# Always prepend table header for continuation
trailing = prev_lines[-(overlap_lines):] if overlap_lines > 0 else []
header_lines = prev_chunk_last_table_header.split('\n')
# Deduplicate: don't repeat header lines if they're already in trailing
combined = list(header_lines)
for tl in trailing:
if tl not in combined:
combined.append(tl)
overlap = '\n'.join(combined)
elif overlap_lines > 0:
overlap = '\n'.join(prev_lines[-(overlap_lines):])
# Track table header from this chunk for next iteration.
# Only overwrite if this chunk contains a new header+separator block;
# otherwise preserve the previous header so tables spanning 3+ chunks
# still get the header carried forward.
for b in chunk_blocks:
if b.block_type == _BlockType.TABLE:
hdr = _get_table_header(b)
if hdr is not None:
prev_chunk_last_table_header = hdr
has_more = idx < total_chunks - 1
chunks.append(
MarkdownChunk(
content=chunk_text,
chunk_index=idx,
total_chunks=total_chunks,
char_offset_start=char_start,
char_offset_end=char_end,
overlap_prefix=overlap,
has_more=has_more,
)
)
# Apply start_from_char filter: return chunks from the one containing that offset
if start_from_char > 0:
for i, chunk in enumerate(chunks):
if chunk.char_offset_end > start_from_char:
return chunks[i:]
return [] # offset past all chunks
return chunks
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/dom/markdown_extractor.py",
"license": "MIT License",
"lines": 450,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/dom/serializer/code_use_serializer.py | # @file purpose: Ultra-compact serializer optimized for code-use agents
# Focuses on minimal token usage while preserving essential interactive context
from browser_use.dom.utils import cap_text_length
from browser_use.dom.views import (
EnhancedDOMTreeNode,
NodeType,
SimplifiedNode,
)
# Minimal but sufficient attribute list for code agents
CODE_USE_KEY_ATTRIBUTES = [
'id', # Essential for element selection
'name', # For form inputs
'type', # For input types
'placeholder', # For empty inputs
'aria-label', # For buttons without text
'value', # Current values
'alt', # For images
'class', # Keep top 2 classes for common selectors
]
# Interactive elements agent can use
INTERACTIVE_ELEMENTS = {
'a',
'button',
'input',
'textarea',
'select',
'form',
}
# Semantic structure elements - expanded to include more content containers
SEMANTIC_STRUCTURE = {
'h1',
'h2',
'h3',
'h4',
'h5',
'h6',
'nav',
'main',
'header',
'footer',
'article',
'section',
'p', # Paragraphs often contain prices and product info
'span', # Spans often contain prices and labels
'div', # Divs with useful attributes (id/class) should be shown
'ul',
'ol',
'li',
'label',
'img',
}
class DOMCodeAgentSerializer:
"""Optimized DOM serializer for code-use agents - balances token efficiency with context."""
@staticmethod
def serialize_tree(node: SimplifiedNode | None, include_attributes: list[str], depth: int = 0) -> str:
"""
Serialize DOM tree with smart token optimization.
Strategy:
- Keep top 2 CSS classes for querySelector compatibility
- Show div/span/p elements with useful attributes or text
- Show all interactive + semantic elements
- Inline text up to 80 chars for better context
"""
if not node:
return ''
# Skip excluded/hidden nodes
if hasattr(node, 'excluded_by_parent') and node.excluded_by_parent:
return DOMCodeAgentSerializer._serialize_children(node, include_attributes, depth)
if not node.should_display:
return DOMCodeAgentSerializer._serialize_children(node, include_attributes, depth)
formatted_text = []
depth_str = ' ' * depth # Use 2 spaces instead of tabs for compactness
if node.original_node.node_type == NodeType.ELEMENT_NODE:
tag = node.original_node.tag_name.lower()
is_visible = node.original_node.snapshot_node and node.original_node.is_visible
# Skip invisible (except iframes)
if not is_visible and tag not in ['iframe', 'frame']:
return DOMCodeAgentSerializer._serialize_children(node, include_attributes, depth)
# Special handling for iframes
if tag in ['iframe', 'frame']:
return DOMCodeAgentSerializer._serialize_iframe(node, include_attributes, depth)
# Build minimal attributes
attributes_str = DOMCodeAgentSerializer._build_minimal_attributes(node.original_node)
# Decide if element should be shown
is_interactive = tag in INTERACTIVE_ELEMENTS
is_semantic = tag in SEMANTIC_STRUCTURE
has_useful_attrs = bool(attributes_str)
has_text = DOMCodeAgentSerializer._has_direct_text(node)
# Skip non-semantic, non-interactive containers without attributes
if not is_interactive and not is_semantic and not has_useful_attrs and not has_text:
return DOMCodeAgentSerializer._serialize_children(node, include_attributes, depth)
# Collapse pointless wrappers
if tag in {'div', 'span'} and not has_useful_attrs and not has_text and len(node.children) == 1:
return DOMCodeAgentSerializer._serialize_children(node, include_attributes, depth)
# Build element
line = f'{depth_str}<{tag}'
if attributes_str:
line += f' {attributes_str}'
# Inline text
inline_text = DOMCodeAgentSerializer._get_inline_text(node)
if inline_text:
line += f'>{inline_text}'
else:
line += '>'
formatted_text.append(line)
# Children (only if no inline text)
if node.children and not inline_text:
children_text = DOMCodeAgentSerializer._serialize_children(node, include_attributes, depth + 1)
if children_text:
formatted_text.append(children_text)
elif node.original_node.node_type == NodeType.TEXT_NODE:
# Handled inline with parent
pass
elif node.original_node.node_type == NodeType.DOCUMENT_FRAGMENT_NODE:
# Shadow DOM - minimal marker
if node.children:
formatted_text.append(f'{depth_str}#shadow')
children_text = DOMCodeAgentSerializer._serialize_children(node, include_attributes, depth + 1)
if children_text:
formatted_text.append(children_text)
return '\n'.join(formatted_text)
@staticmethod
def _serialize_children(node: SimplifiedNode, include_attributes: list[str], depth: int) -> str:
"""Serialize children."""
children_output = []
for child in node.children:
child_text = DOMCodeAgentSerializer.serialize_tree(child, include_attributes, depth)
if child_text:
children_output.append(child_text)
return '\n'.join(children_output)
@staticmethod
def _build_minimal_attributes(node: EnhancedDOMTreeNode) -> str:
"""Build minimal but useful attributes - keep top 2 classes for selectors."""
attrs = []
if node.attributes:
for attr in CODE_USE_KEY_ATTRIBUTES:
if attr in node.attributes:
value = str(node.attributes[attr]).strip()
if value:
# Special handling for class - keep only first 2 classes
if attr == 'class':
classes = value.split()[:2]
value = ' '.join(classes)
# Cap at 25 chars
value = cap_text_length(value, 25)
attrs.append(f'{attr}="{value}"')
return ' '.join(attrs)
@staticmethod
def _has_direct_text(node: SimplifiedNode) -> bool:
"""Check if node has direct text children."""
for child in node.children:
if child.original_node.node_type == NodeType.TEXT_NODE:
text = child.original_node.node_value.strip() if child.original_node.node_value else ''
if len(text) > 1:
return True
return False
@staticmethod
def _get_inline_text(node: SimplifiedNode) -> str:
"""Get inline text (max 80 chars for better context)."""
text_parts = []
for child in node.children:
if child.original_node.node_type == NodeType.TEXT_NODE:
text = child.original_node.node_value.strip() if child.original_node.node_value else ''
if text and len(text) > 1:
text_parts.append(text)
if not text_parts:
return ''
combined = ' '.join(text_parts)
return cap_text_length(combined, 40)
@staticmethod
def _serialize_iframe(node: SimplifiedNode, include_attributes: list[str], depth: int) -> str:
"""Handle iframe minimally."""
formatted_text = []
depth_str = ' ' * depth
tag = node.original_node.tag_name.lower()
# Minimal iframe marker
attributes_str = DOMCodeAgentSerializer._build_minimal_attributes(node.original_node)
line = f'{depth_str}<{tag}'
if attributes_str:
line += f' {attributes_str}'
line += '>'
formatted_text.append(line)
# Iframe content
if node.original_node.content_document:
formatted_text.append(f'{depth_str} #iframe-content')
# Find and serialize body content only
for child_node in node.original_node.content_document.children_nodes or []:
if child_node.tag_name.lower() == 'html':
for html_child in child_node.children:
if html_child.tag_name.lower() == 'body':
for body_child in html_child.children:
DOMCodeAgentSerializer._serialize_document_node(
body_child, formatted_text, include_attributes, depth + 2
)
break
return '\n'.join(formatted_text)
@staticmethod
def _serialize_document_node(
dom_node: EnhancedDOMTreeNode, output: list[str], include_attributes: list[str], depth: int
) -> None:
"""Serialize document node without SimplifiedNode wrapper."""
depth_str = ' ' * depth
if dom_node.node_type == NodeType.ELEMENT_NODE:
tag = dom_node.tag_name.lower()
# Skip invisible
is_visible = dom_node.snapshot_node and dom_node.is_visible
if not is_visible:
return
# Check if worth showing
is_interactive = tag in INTERACTIVE_ELEMENTS
is_semantic = tag in SEMANTIC_STRUCTURE
attributes_str = DOMCodeAgentSerializer._build_minimal_attributes(dom_node)
if not is_interactive and not is_semantic and not attributes_str:
# Skip but process children
for child in dom_node.children:
DOMCodeAgentSerializer._serialize_document_node(child, output, include_attributes, depth)
return
# Build element
line = f'{depth_str}<{tag}'
if attributes_str:
line += f' {attributes_str}'
# Get text
text_parts = []
for child in dom_node.children:
if child.node_type == NodeType.TEXT_NODE and child.node_value:
text = child.node_value.strip()
if text and len(text) > 1:
text_parts.append(text)
if text_parts:
combined = ' '.join(text_parts)
line += f'>{cap_text_length(combined, 25)}'
else:
line += '>'
output.append(line)
# Process non-text children
for child in dom_node.children:
if child.node_type != NodeType.TEXT_NODE:
DOMCodeAgentSerializer._serialize_document_node(child, output, include_attributes, depth + 1)
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/dom/serializer/code_use_serializer.py",
"license": "MIT License",
"lines": 238,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/dom/serializer/eval_serializer.py | # @file purpose: Concise evaluation serializer for DOM trees - optimized for LLM query writing
from browser_use.dom.utils import cap_text_length
from browser_use.dom.views import (
EnhancedDOMTreeNode,
NodeType,
SimplifiedNode,
)
# Critical attributes for query writing and form interaction
# NOTE: Removed 'id' and 'class' to force more robust structural selectors
EVAL_KEY_ATTRIBUTES = [
'id', # Removed - can have special chars, forces structural selectors
'class', # Removed - can have special chars like +, forces structural selectors
'name',
'type',
'placeholder',
'aria-label',
'role',
'value',
# 'href',
'data-testid',
'alt', # for images
'title', # useful for tooltips/link context
# State attributes (critical for form interaction)
'checked',
'selected',
'disabled',
'required',
'readonly',
# ARIA states
'aria-expanded',
'aria-pressed',
'aria-checked',
'aria-selected',
'aria-invalid',
# Validation attributes (help agents avoid brute force)
'pattern',
'min',
'max',
'minlength',
'maxlength',
'step',
'aria-valuemin',
'aria-valuemax',
'aria-valuenow',
]
# Semantic elements that should always be shown
SEMANTIC_ELEMENTS = {
'html', # Always show document root
'body', # Always show body
'h1',
'h2',
'h3',
'h4',
'h5',
'h6',
'a',
'button',
'input',
'textarea',
'select',
'form',
'label',
'nav',
'header',
'footer',
'main',
'article',
'section',
'table',
'thead',
'tbody',
'tr',
'th',
'td',
'ul',
'ol',
'li',
'img',
'iframe',
'video',
'audio',
}
# Container elements that can be collapsed if they only wrap one child
COLLAPSIBLE_CONTAINERS = {'div', 'span', 'section', 'article'}
# SVG child elements to skip (decorative only, no interaction value)
SVG_ELEMENTS = {
'path',
'rect',
'g',
'circle',
'ellipse',
'line',
'polyline',
'polygon',
'use',
'defs',
'clipPath',
'mask',
'pattern',
'image',
'text',
'tspan',
}
class DOMEvalSerializer:
"""Ultra-concise DOM serializer for quick LLM query writing."""
@staticmethod
def serialize_tree(node: SimplifiedNode | None, include_attributes: list[str], depth: int = 0) -> str:
"""
Serialize complete DOM tree structure for LLM understanding.
Strategy:
- Show ALL elements to preserve DOM structure
- Non-interactive elements show just tag name
- Interactive elements show full attributes + [index]
- Self-closing tags only (no closing tags)
"""
if not node:
return ''
# Skip excluded nodes but process children
if hasattr(node, 'excluded_by_parent') and node.excluded_by_parent:
return DOMEvalSerializer._serialize_children(node, include_attributes, depth)
# Skip nodes marked as should_display=False
if not node.should_display:
return DOMEvalSerializer._serialize_children(node, include_attributes, depth)
formatted_text = []
depth_str = depth * '\t'
if node.original_node.node_type == NodeType.ELEMENT_NODE:
tag = node.original_node.tag_name.lower()
is_visible = node.original_node.snapshot_node and node.original_node.is_visible
# Container elements that should be shown even if invisible (might have visible children)
container_tags = {'html', 'body', 'div', 'main', 'section', 'article', 'aside', 'header', 'footer', 'nav'}
# Skip invisible elements UNLESS they're containers or iframes (which might have visible children)
if not is_visible and tag not in container_tags and tag not in ['iframe', 'frame']:
return DOMEvalSerializer._serialize_children(node, include_attributes, depth)
# Special handling for iframes - show them with their content
if tag in ['iframe', 'frame']:
return DOMEvalSerializer._serialize_iframe(node, include_attributes, depth)
# Skip SVG elements entirely - they're just decorative graphics with no interaction value
# Show the <svg> tag itself to indicate graphics, but don't recurse into children
if tag == 'svg':
line = f'{depth_str}'
# Add [i_X] for interactive SVG elements only
if node.is_interactive:
line += f'[i_{node.original_node.backend_node_id}] '
line += '<svg'
attributes_str = DOMEvalSerializer._build_compact_attributes(node.original_node)
if attributes_str:
line += f' {attributes_str}'
line += ' /> <!-- SVG content collapsed -->'
return line
# Skip SVG child elements entirely (path, rect, g, circle, etc.)
if tag in SVG_ELEMENTS:
return ''
# Build compact attributes string
attributes_str = DOMEvalSerializer._build_compact_attributes(node.original_node)
# Decide if this element should be shown
is_semantic = tag in SEMANTIC_ELEMENTS
has_useful_attrs = bool(attributes_str)
has_text_content = DOMEvalSerializer._has_direct_text(node)
has_children = len(node.children) > 0
# Build compact element representation
line = f'{depth_str}'
# Add backend node ID notation - [i_X] for interactive elements only
if node.is_interactive:
line += f'[i_{node.original_node.backend_node_id}] '
# Non-interactive elements don't get an index notation
line += f'<{tag}'
if attributes_str:
line += f' {attributes_str}'
# Add scroll info if element is scrollable
if node.original_node.should_show_scroll_info:
scroll_text = node.original_node.get_scroll_info_text()
if scroll_text:
line += f' scroll="{scroll_text}"'
# Add inline text if present (keep it on same line for compactness)
inline_text = DOMEvalSerializer._get_inline_text(node)
# For containers (html, body, div, etc.), always show children even if there's inline text
# For other elements, inline text replaces children (more compact)
is_container = tag in container_tags
if inline_text and not is_container:
line += f'>{inline_text}'
else:
line += ' />'
formatted_text.append(line)
# Process children (always for containers, only if no inline_text for others)
if has_children and (is_container or not inline_text):
children_text = DOMEvalSerializer._serialize_children(node, include_attributes, depth + 1)
if children_text:
formatted_text.append(children_text)
elif node.original_node.node_type == NodeType.TEXT_NODE:
# Text nodes are handled inline with their parent
pass
elif node.original_node.node_type == NodeType.DOCUMENT_FRAGMENT_NODE:
# Shadow DOM - just show children directly with minimal marker
if node.children:
formatted_text.append(f'{depth_str}#shadow')
children_text = DOMEvalSerializer._serialize_children(node, include_attributes, depth + 1)
if children_text:
formatted_text.append(children_text)
return '\n'.join(formatted_text)
@staticmethod
def _serialize_children(node: SimplifiedNode, include_attributes: list[str], depth: int) -> str:
"""Helper to serialize all children of a node."""
children_output = []
# Check if parent is a list container (ul, ol)
is_list_container = node.original_node.node_type == NodeType.ELEMENT_NODE and node.original_node.tag_name.lower() in [
'ul',
'ol',
]
# Track list items and consecutive links
li_count = 0
max_list_items = 50
consecutive_link_count = 0
max_consecutive_links = 50
total_links_skipped = 0
for child in node.children:
# Get tag name for this child
current_tag = None
if child.original_node.node_type == NodeType.ELEMENT_NODE:
current_tag = child.original_node.tag_name.lower()
# If we're in a list container and this child is an li element
if is_list_container and current_tag == 'li':
li_count += 1
# Skip li elements after the 5th one
if li_count > max_list_items:
continue
# Track consecutive anchor tags (links)
if current_tag == 'a':
consecutive_link_count += 1
# Skip links after the 5th consecutive one
if consecutive_link_count > max_consecutive_links:
total_links_skipped += 1
continue
else:
# Reset counter when we hit a non-link element
# But first add truncation message if we skipped links
if total_links_skipped > 0:
depth_str = depth * '\t'
children_output.append(f'{depth_str}... ({total_links_skipped} more links in this list)')
total_links_skipped = 0
consecutive_link_count = 0
child_text = DOMEvalSerializer.serialize_tree(child, include_attributes, depth)
if child_text:
children_output.append(child_text)
# Add truncation message if we skipped items at the end
if is_list_container and li_count > max_list_items:
depth_str = depth * '\t'
children_output.append(
f'{depth_str}... ({li_count - max_list_items} more items in this list (truncated) use evaluate to get more.'
)
# Add truncation message for links if we skipped any at the end
if total_links_skipped > 0:
depth_str = depth * '\t'
children_output.append(
f'{depth_str}... ({total_links_skipped} more links in this list) (truncated) use evaluate to get more.'
)
return '\n'.join(children_output)
@staticmethod
def _build_compact_attributes(node: EnhancedDOMTreeNode) -> str:
"""Build ultra-compact attributes string with only key attributes."""
attrs = []
# Prioritize attributes that help with query writing
if node.attributes:
for attr in EVAL_KEY_ATTRIBUTES:
if attr in node.attributes:
value = str(node.attributes[attr]).strip()
if not value:
continue
# Special handling for different attributes
if attr == 'class':
# For class, limit to first 2 classes to save space
classes = value.split()[:3]
value = ' '.join(classes)
elif attr == 'href':
# For href, cap at 20 chars to save space
value = cap_text_length(value, 80)
else:
# Cap at 25 chars for other attributes
value = cap_text_length(value, 80)
attrs.append(f'{attr}="{value}"')
# Note: We intentionally don't add role from ax_node here because:
# 1. If role is explicitly set in HTML, it's already captured above via EVAL_KEY_ATTRIBUTES
# 2. Inferred roles from AX tree (like link, listitem, LineBreak) are redundant with the tag name
# 3. This reduces noise - <a href="..." role="link"> is redundant, we already know <a> is a link
return ' '.join(attrs)
@staticmethod
def _has_direct_text(node: SimplifiedNode) -> bool:
"""Check if node has direct text children (not nested in other elements)."""
for child in node.children:
if child.original_node.node_type == NodeType.TEXT_NODE:
text = child.original_node.node_value.strip() if child.original_node.node_value else ''
if len(text) > 1:
return True
return False
@staticmethod
def _get_inline_text(node: SimplifiedNode) -> str:
"""Get text content to display inline (max 40 chars)."""
text_parts = []
for child in node.children:
if child.original_node.node_type == NodeType.TEXT_NODE:
text = child.original_node.node_value.strip() if child.original_node.node_value else ''
if text and len(text) > 1:
text_parts.append(text)
if not text_parts:
return ''
combined = ' '.join(text_parts)
return cap_text_length(combined, 80)
@staticmethod
def _serialize_iframe(node: SimplifiedNode, include_attributes: list[str], depth: int) -> str:
"""Handle iframe serialization with content document."""
formatted_text = []
depth_str = depth * '\t'
tag = node.original_node.tag_name.lower()
# Build minimal iframe marker with key attributes
attributes_str = DOMEvalSerializer._build_compact_attributes(node.original_node)
line = f'{depth_str}<{tag}'
if attributes_str:
line += f' {attributes_str}'
# Add scroll info for iframe content
if node.original_node.should_show_scroll_info:
scroll_text = node.original_node.get_scroll_info_text()
if scroll_text:
line += f' scroll="{scroll_text}"'
line += ' />'
formatted_text.append(line)
# If iframe has content document, serialize its content
if node.original_node.content_document:
# Add marker for iframe content
formatted_text.append(f'{depth_str}\t#iframe-content')
# Process content document children
for child_node in node.original_node.content_document.children_nodes or []:
# Process html documents
if child_node.tag_name.lower() == 'html':
# Find and serialize body content only (skip head)
for html_child in child_node.children:
if html_child.tag_name.lower() == 'body':
for body_child in html_child.children:
# Recursively process body children (iframe content)
DOMEvalSerializer._serialize_document_node(
body_child, formatted_text, include_attributes, depth + 2, is_iframe_content=True
)
break # Stop after processing body
else:
# Not an html element - serialize directly
DOMEvalSerializer._serialize_document_node(
child_node, formatted_text, include_attributes, depth + 1, is_iframe_content=True
)
return '\n'.join(formatted_text)
@staticmethod
def _serialize_document_node(
dom_node: EnhancedDOMTreeNode,
output: list[str],
include_attributes: list[str],
depth: int,
is_iframe_content: bool = True,
) -> None:
"""Helper to serialize a document node without SimplifiedNode wrapper.
Args:
is_iframe_content: If True, be more permissive with visibility checks since
iframe content might not have snapshot data from parent page.
"""
depth_str = depth * '\t'
if dom_node.node_type == NodeType.ELEMENT_NODE:
tag = dom_node.tag_name.lower()
# For iframe content, be permissive - show all semantic elements even without snapshot data
# For regular content, skip invisible elements
if is_iframe_content:
# Only skip if we have snapshot data AND it's explicitly invisible
# If no snapshot data, assume visible (cross-origin iframe content)
is_visible = (not dom_node.snapshot_node) or dom_node.is_visible
else:
# Regular strict visibility check
is_visible = dom_node.snapshot_node and dom_node.is_visible
if not is_visible:
return
# Check if semantic or has useful attributes
is_semantic = tag in SEMANTIC_ELEMENTS
attributes_str = DOMEvalSerializer._build_compact_attributes(dom_node)
if not is_semantic and not attributes_str:
# Skip but process children
for child in dom_node.children:
DOMEvalSerializer._serialize_document_node(
child, output, include_attributes, depth, is_iframe_content=is_iframe_content
)
return
# Build element line
line = f'{depth_str}<{tag}'
if attributes_str:
line += f' {attributes_str}'
# Get direct text content
text_parts = []
for child in dom_node.children:
if child.node_type == NodeType.TEXT_NODE and child.node_value:
text = child.node_value.strip()
if text and len(text) > 1:
text_parts.append(text)
if text_parts:
combined = ' '.join(text_parts)
line += f'>{cap_text_length(combined, 100)}'
else:
line += ' />'
output.append(line)
# Process non-text children
for child in dom_node.children:
if child.node_type != NodeType.TEXT_NODE:
DOMEvalSerializer._serialize_document_node(
child, output, include_attributes, depth + 1, is_iframe_content=is_iframe_content
)
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/dom/serializer/eval_serializer.py",
"license": "MIT License",
"lines": 407,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/dom/serializer/html_serializer.py | # @file purpose: Serializes enhanced DOM trees to HTML format including shadow roots
from browser_use.dom.views import EnhancedDOMTreeNode, NodeType
class HTMLSerializer:
"""Serializes enhanced DOM trees back to HTML format.
This serializer reconstructs HTML from the enhanced DOM tree, including:
- Shadow DOM content (both open and closed)
- Iframe content documents
- All attributes and text nodes
- Proper HTML structure
Unlike getOuterHTML which only captures light DOM, this captures the full
enhanced tree including shadow roots that are crucial for modern SPAs.
"""
def __init__(self, extract_links: bool = False):
"""Initialize the HTML serializer.
Args:
extract_links: If True, preserves all links. If False, removes href attributes.
"""
self.extract_links = extract_links
def serialize(self, node: EnhancedDOMTreeNode, depth: int = 0) -> str:
"""Serialize an enhanced DOM tree node to HTML.
Args:
node: The enhanced DOM tree node to serialize
depth: Current depth for indentation (internal use)
Returns:
HTML string representation of the node and its descendants
"""
if node.node_type == NodeType.DOCUMENT_NODE:
# Process document root - serialize all children
parts = []
for child in node.children_and_shadow_roots:
child_html = self.serialize(child, depth)
if child_html:
parts.append(child_html)
return ''.join(parts)
elif node.node_type == NodeType.DOCUMENT_FRAGMENT_NODE:
# Shadow DOM root - wrap in template with shadowrootmode attribute
parts = []
# Add shadow root opening
shadow_type = node.shadow_root_type or 'open'
parts.append(f'<template shadowroot="{shadow_type.lower()}">')
# Serialize shadow children
for child in node.children:
child_html = self.serialize(child, depth + 1)
if child_html:
parts.append(child_html)
# Close shadow root
parts.append('</template>')
return ''.join(parts)
elif node.node_type == NodeType.ELEMENT_NODE:
parts = []
tag_name = node.tag_name.lower()
# Skip non-content elements
if tag_name in {'style', 'script', 'head', 'meta', 'link', 'title'}:
return ''
# Skip code tags with display:none - these often contain JSON state for SPAs
if tag_name == 'code' and node.attributes:
style = node.attributes.get('style', '')
# Check if element is hidden (display:none) - likely JSON data
if 'display:none' in style.replace(' ', '') or 'display: none' in style:
return ''
# Also check for bpr-guid IDs (LinkedIn's JSON data pattern)
element_id = node.attributes.get('id', '')
if 'bpr-guid' in element_id or 'data' in element_id or 'state' in element_id:
return ''
# Skip base64 inline images - these are usually placeholders or tracking pixels
if tag_name == 'img' and node.attributes:
src = node.attributes.get('src', '')
if src.startswith('data:image/'):
return ''
# Opening tag
parts.append(f'<{tag_name}')
# Add attributes
if node.attributes:
attrs = self._serialize_attributes(node.attributes)
if attrs:
parts.append(' ' + attrs)
# Handle void elements (self-closing)
void_elements = {
'area',
'base',
'br',
'col',
'embed',
'hr',
'img',
'input',
'link',
'meta',
'param',
'source',
'track',
'wbr',
}
if tag_name in void_elements:
parts.append(' />')
return ''.join(parts)
parts.append('>')
# Handle table normalization (ensure thead/tbody for markdownify)
if tag_name == 'table':
# Serialize shadow roots first (same as the general path)
if node.shadow_roots:
for shadow_root in node.shadow_roots:
child_html = self.serialize(shadow_root, depth + 1)
if child_html:
parts.append(child_html)
table_html = self._serialize_table_children(node, depth)
parts.append(table_html)
# Handle iframe content document
elif tag_name in {'iframe', 'frame'} and node.content_document:
# Serialize iframe content
for child in node.content_document.children_nodes or []:
child_html = self.serialize(child, depth + 1)
if child_html:
parts.append(child_html)
else:
# Serialize shadow roots FIRST (for declarative shadow DOM)
if node.shadow_roots:
for shadow_root in node.shadow_roots:
child_html = self.serialize(shadow_root, depth + 1)
if child_html:
parts.append(child_html)
# Then serialize light DOM children (for slot projection)
for child in node.children:
child_html = self.serialize(child, depth + 1)
if child_html:
parts.append(child_html)
# Closing tag
parts.append(f'</{tag_name}>')
return ''.join(parts)
elif node.node_type == NodeType.TEXT_NODE:
# Return text content with basic HTML escaping
if node.node_value:
return self._escape_html(node.node_value)
return ''
elif node.node_type == NodeType.COMMENT_NODE:
# Skip comments to reduce noise
return ''
else:
# Unknown node type - skip
return ''
def _serialize_table_children(self, table_node: EnhancedDOMTreeNode, depth: int) -> str:
"""Normalize table structure to ensure thead/tbody for markdownify.
When a <table> has no <thead> but the first <tr> contains <th> cells,
wrap that row in <thead> and remaining rows in <tbody>.
"""
children = table_node.children
if not children:
return ''
# Check if table already has thead
child_tags = [c.tag_name for c in children if c.node_type == NodeType.ELEMENT_NODE]
has_thead = 'thead' in child_tags
has_tbody = 'tbody' in child_tags
if has_thead or not child_tags:
# Already normalized or empty — serialize normally
parts = []
for child in children:
child_html = self.serialize(child, depth + 1)
if child_html:
parts.append(child_html)
return ''.join(parts)
# Find the first <tr> with <th> cells
first_tr = None
first_tr_idx = -1
for i, child in enumerate(children):
if child.node_type == NodeType.ELEMENT_NODE and child.tag_name == 'tr':
# Check if this row contains <th> cells
has_th = any(c.node_type == NodeType.ELEMENT_NODE and c.tag_name == 'th' for c in child.children)
if has_th:
first_tr = child
first_tr_idx = i
break # Only check the first <tr>
if first_tr is None:
# No header row detected — serialize normally
parts = []
for child in children:
child_html = self.serialize(child, depth + 1)
if child_html:
parts.append(child_html)
return ''.join(parts)
# Wrap first_tr in <thead>, remaining <tr> in <tbody>
parts = []
# Emit any children before the header row (e.g. colgroup, caption)
for child in children[:first_tr_idx]:
child_html = self.serialize(child, depth + 1)
if child_html:
parts.append(child_html)
# Emit <thead>
parts.append('<thead>')
parts.append(self.serialize(first_tr, depth + 2))
parts.append('</thead>')
# Collect remaining rows
remaining = children[first_tr_idx + 1 :]
if remaining and not has_tbody:
parts.append('<tbody>')
for child in remaining:
child_html = self.serialize(child, depth + 2)
if child_html:
parts.append(child_html)
parts.append('</tbody>')
else:
for child in remaining:
child_html = self.serialize(child, depth + 1)
if child_html:
parts.append(child_html)
return ''.join(parts)
def _serialize_attributes(self, attributes: dict[str, str]) -> str:
"""Serialize element attributes to HTML attribute string.
Args:
attributes: Dictionary of attribute names to values
Returns:
HTML attribute string (e.g., 'class="foo" id="bar"')
"""
parts = []
for key, value in attributes.items():
# Skip href if not extracting links
if not self.extract_links and key == 'href':
continue
# Skip data-* attributes as they often contain JSON payloads
# These are used by modern SPAs (React, Vue, Angular) for state management
if key.startswith('data-'):
continue
# Handle boolean attributes
if value == '' or value is None:
parts.append(key)
else:
# Escape attribute value
escaped_value = self._escape_attribute(value)
parts.append(f'{key}="{escaped_value}"')
return ' '.join(parts)
def _escape_html(self, text: str) -> str:
"""Escape HTML special characters in text content.
Args:
text: Raw text content
Returns:
HTML-escaped text
"""
return text.replace('&', '&').replace('<', '<').replace('>', '>')
def _escape_attribute(self, value: str) -> str:
"""Escape HTML special characters in attribute values.
Args:
value: Raw attribute value
Returns:
HTML-escaped attribute value
"""
return value.replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"').replace("'", ''')
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/dom/serializer/html_serializer.py",
"license": "MIT License",
"lines": 245,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/init_cmd.py | """
Standalone init command for browser-use template generation.
This module provides a minimal command-line interface for generating
browser-use templates without requiring heavy TUI dependencies.
"""
import json
import shutil
import sys
from pathlib import Path
from typing import Any
from urllib import request
from urllib.error import URLError
import click
from InquirerPy import inquirer
from InquirerPy.base.control import Choice
from InquirerPy.utils import InquirerPyStyle
from rich.console import Console
from rich.panel import Panel
from rich.text import Text
# Rich console for styled output
console = Console()
# GitHub template repository URL (for runtime fetching)
TEMPLATE_REPO_URL = 'https://raw.githubusercontent.com/browser-use/template-library/main'
# Export for backward compatibility with cli.py
# Templates are fetched at runtime via _get_template_list()
INIT_TEMPLATES: dict[str, Any] = {}
def _fetch_template_list() -> dict[str, Any] | None:
"""
Fetch template list from GitHub templates.json.
Returns template dict if successful, None if failed.
"""
try:
url = f'{TEMPLATE_REPO_URL}/templates.json'
with request.urlopen(url, timeout=5) as response:
data = response.read().decode('utf-8')
return json.loads(data)
except (URLError, TimeoutError, json.JSONDecodeError, Exception):
return None
def _get_template_list() -> dict[str, Any]:
"""
Get template list from GitHub.
Raises FileNotFoundError if GitHub fetch fails.
"""
templates = _fetch_template_list()
if templates is not None:
return templates
raise FileNotFoundError('Could not fetch templates from GitHub. Check your internet connection.')
def _fetch_from_github(file_path: str) -> str | None:
"""
Fetch template file from GitHub.
Returns file content if successful, None if failed.
"""
try:
url = f'{TEMPLATE_REPO_URL}/{file_path}'
with request.urlopen(url, timeout=5) as response:
return response.read().decode('utf-8')
except (URLError, TimeoutError, Exception):
return None
def _fetch_binary_from_github(file_path: str) -> bytes | None:
"""
Fetch binary file from GitHub.
Returns file content if successful, None if failed.
"""
try:
url = f'{TEMPLATE_REPO_URL}/{file_path}'
with request.urlopen(url, timeout=5) as response:
return response.read()
except (URLError, TimeoutError, Exception):
return None
def _get_template_content(file_path: str) -> str:
"""
Get template file content from GitHub.
Raises exception if fetch fails.
"""
content = _fetch_from_github(file_path)
if content is not None:
return content
raise FileNotFoundError(f'Could not fetch template from GitHub: {file_path}')
# InquirerPy style for template selection (browser-use orange theme)
inquirer_style = InquirerPyStyle(
{
'pointer': '#fe750e bold',
'highlighted': '#fe750e bold',
'question': 'bold',
'answer': '#fe750e bold',
'questionmark': '#fe750e bold',
}
)
def _get_terminal_width() -> int:
"""Get current terminal width in columns."""
return shutil.get_terminal_size().columns
def _format_choice(name: str, metadata: dict[str, Any], width: int, is_default: bool = False) -> str:
"""
Format a template choice with responsive display based on terminal width.
Styling:
- Featured templates get [FEATURED] prefix
- Author name included when width allows (except for default templates)
- Everything turns orange when highlighted (InquirerPy's built-in behavior)
Args:
name: Template name
metadata: Template metadata (description, featured, author)
width: Terminal width in columns
is_default: Whether this is a default template (default, advanced, tools)
Returns:
Formatted choice string
"""
is_featured = metadata.get('featured', False)
description = metadata.get('description', '')
author_name = metadata.get('author', {}).get('name', '') if isinstance(metadata.get('author'), dict) else ''
# Build the choice string based on terminal width
if width > 100:
# Wide: show everything including author (except for default templates)
if is_featured:
if author_name:
return f'[FEATURED] {name} by {author_name} - {description}'
else:
return f'[FEATURED] {name} - {description}'
else:
# Non-featured templates
if author_name and not is_default:
return f'{name} by {author_name} - {description}'
else:
return f'{name} - {description}'
elif width > 60:
# Medium: show name and description, no author
if is_featured:
return f'[FEATURED] {name} - {description}'
else:
return f'{name} - {description}'
else:
# Narrow: show name only
return name
def _write_init_file(output_path: Path, content: str, force: bool = False) -> bool:
"""Write content to a file, with safety checks."""
# Check if file already exists
if output_path.exists() and not force:
console.print(f'[yellow]⚠[/yellow] File already exists: [cyan]{output_path}[/cyan]')
if not click.confirm('Overwrite?', default=False):
console.print('[red]✗[/red] Cancelled')
return False
# Ensure parent directory exists
output_path.parent.mkdir(parents=True, exist_ok=True)
# Write file
try:
output_path.write_text(content, encoding='utf-8')
return True
except Exception as e:
console.print(f'[red]✗[/red] Error writing file: {e}')
return False
@click.command('browser-use-init')
@click.option(
'--template',
'-t',
type=str,
help='Template to use',
)
@click.option(
'--output',
'-o',
type=click.Path(),
help='Output file path (default: browser_use_<template>.py)',
)
@click.option(
'--force',
'-f',
is_flag=True,
help='Overwrite existing files without asking',
)
@click.option(
'--list',
'-l',
'list_templates',
is_flag=True,
help='List available templates',
)
def main(
template: str | None,
output: str | None,
force: bool,
list_templates: bool,
):
"""
Generate a browser-use template file to get started quickly.
Examples:
\b
# Interactive mode - prompts for template selection
uvx browser-use init
uvx browser-use init --template
\b
# Generate default template
uvx browser-use init --template default
\b
# Generate advanced template with custom filename
uvx browser-use init --template advanced --output my_script.py
\b
# List available templates
uvx browser-use init --list
"""
# Fetch template list at runtime
try:
INIT_TEMPLATES = _get_template_list()
except FileNotFoundError as e:
console.print(f'[red]✗[/red] {e}')
sys.exit(1)
# Handle --list flag
if list_templates:
console.print('\n[bold]Available templates:[/bold]\n')
for name, info in INIT_TEMPLATES.items():
console.print(f' [#fe750e]{name:12}[/#fe750e] - {info["description"]}')
console.print()
return
# Interactive template selection if not provided
if not template:
# Get terminal width for responsive formatting
width = _get_terminal_width()
# Separate default and featured templates
default_template_names = ['default', 'advanced', 'tools']
featured_templates = [(name, info) for name, info in INIT_TEMPLATES.items() if info.get('featured', False)]
other_templates = [
(name, info)
for name, info in INIT_TEMPLATES.items()
if name not in default_template_names and not info.get('featured', False)
]
# Sort by last_modified_date (most recent first)
def get_last_modified(item):
name, info = item
date_str = (
info.get('author', {}).get('last_modified_date', '1970-01-01')
if isinstance(info.get('author'), dict)
else '1970-01-01'
)
return date_str
# Sort default templates by last modified
default_templates = [(name, INIT_TEMPLATES[name]) for name in default_template_names if name in INIT_TEMPLATES]
default_templates.sort(key=get_last_modified, reverse=True)
# Sort featured and other templates by last modified
featured_templates.sort(key=get_last_modified, reverse=True)
other_templates.sort(key=get_last_modified, reverse=True)
# Build choices in order: defaults first, then featured, then others
choices = []
# Add default templates
for i, (name, info) in enumerate(default_templates):
formatted = _format_choice(name, info, width, is_default=True)
choices.append(Choice(name=formatted, value=name))
# Add featured templates
for i, (name, info) in enumerate(featured_templates):
formatted = _format_choice(name, info, width, is_default=False)
choices.append(Choice(name=formatted, value=name))
# Add other templates (if any)
for name, info in other_templates:
formatted = _format_choice(name, info, width, is_default=False)
choices.append(Choice(name=formatted, value=name))
# Use fuzzy prompt for search functionality
# Use getattr to avoid static analysis complaining about non-exported names
_fuzzy = getattr(inquirer, 'fuzzy')
template = _fuzzy(
message='Select a template (type to search):',
choices=choices,
style=inquirer_style,
max_height='70%',
).execute()
# Handle user cancellation (Ctrl+C)
if template is None:
console.print('\n[red]✗[/red] Cancelled')
sys.exit(1)
# Template is guaranteed to be set at this point (either from option or prompt)
assert template is not None
# Create template directory
template_dir = Path.cwd() / template
if template_dir.exists() and not force:
console.print(f'[yellow]⚠[/yellow] Directory already exists: [cyan]{template_dir}[/cyan]')
if not click.confirm('Continue and overwrite files?', default=False):
console.print('[red]✗[/red] Cancelled')
sys.exit(1)
# Create directory
template_dir.mkdir(parents=True, exist_ok=True)
# Determine output path
if output:
output_path = template_dir / Path(output)
else:
output_path = template_dir / 'main.py'
# Read template file from GitHub
try:
template_file = INIT_TEMPLATES[template]['file']
content = _get_template_content(template_file)
except Exception as e:
console.print(f'[red]✗[/red] Error reading template: {e}')
sys.exit(1)
# Write file
if _write_init_file(output_path, content, force):
console.print(f'\n[green]✓[/green] Created [cyan]{output_path}[/cyan]')
# Generate additional files if template has a manifest
if 'files' in INIT_TEMPLATES[template]:
import stat
for file_spec in INIT_TEMPLATES[template]['files']:
source_path = file_spec['source']
dest_name = file_spec['dest']
dest_path = output_path.parent / dest_name
is_binary = file_spec.get('binary', False)
is_executable = file_spec.get('executable', False)
# Skip if we already wrote this file (main.py)
if dest_path == output_path:
continue
# Fetch and write file
try:
if is_binary:
file_content = _fetch_binary_from_github(source_path)
if file_content:
if not dest_path.exists() or force:
dest_path.write_bytes(file_content)
console.print(f'[green]✓[/green] Created [cyan]{dest_name}[/cyan]')
else:
console.print(f'[yellow]⚠[/yellow] Could not fetch [cyan]{dest_name}[/cyan] from GitHub')
else:
file_content = _get_template_content(source_path)
if _write_init_file(dest_path, file_content, force):
console.print(f'[green]✓[/green] Created [cyan]{dest_name}[/cyan]')
# Make executable if needed
if is_executable and sys.platform != 'win32':
dest_path.chmod(dest_path.stat().st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except Exception as e:
console.print(f'[yellow]⚠[/yellow] Error generating [cyan]{dest_name}[/cyan]: {e}')
# Create a nice panel for next steps
next_steps = Text()
# Display next steps from manifest if available
if 'next_steps' in INIT_TEMPLATES[template]:
steps = INIT_TEMPLATES[template]['next_steps']
for i, step in enumerate(steps, 1):
# Handle footer separately (no numbering)
if 'footer' in step:
next_steps.append(f'{step["footer"]}\n', style='dim italic')
continue
# Step title
next_steps.append(f'\n{i}. {step["title"]}:\n', style='bold')
# Step commands
for cmd in step.get('commands', []):
# Replace placeholders
cmd = cmd.replace('{template}', template)
cmd = cmd.replace('{output}', output_path.name)
next_steps.append(f' {cmd}\n', style='dim')
# Optional note
if 'note' in step:
next_steps.append(f' {step["note"]}\n', style='dim italic')
next_steps.append('\n')
else:
# Default workflow for templates without custom next_steps
next_steps.append('\n1. Navigate to project directory:\n', style='bold')
next_steps.append(f' cd {template}\n\n', style='dim')
next_steps.append('2. Initialize uv project:\n', style='bold')
next_steps.append(' uv init\n\n', style='dim')
next_steps.append('3. Install browser-use:\n', style='bold')
next_steps.append(' uv add browser-use\n\n', style='dim')
next_steps.append('4. Set up your API key in .env file or environment:\n', style='bold')
next_steps.append(' BROWSER_USE_API_KEY=your-key\n', style='dim')
next_steps.append(
' (Get your key at https://cloud.browser-use.com/dashboard/settings?tab=api-keys&new)\n\n',
style='dim italic',
)
next_steps.append('5. Run your script:\n', style='bold')
next_steps.append(f' uv run {output_path.name}\n', style='dim')
console.print(
Panel(
next_steps,
title='[bold]Next steps[/bold]',
border_style='#fe750e',
padding=(1, 2),
)
)
if __name__ == '__main__':
main()
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/init_cmd.py",
"license": "MIT License",
"lines": 370,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/llm/browser_use/chat.py | """
ChatBrowserUse - Client for browser-use cloud API
This wraps the BaseChatModel protocol and sends requests to the browser-use cloud API
for optimized browser automation LLM inference.
"""
import asyncio
import logging
import os
import random
from typing import Any, TypeVar, overload
import httpx
from pydantic import BaseModel
from browser_use.llm.base import BaseChatModel
from browser_use.llm.exceptions import ModelProviderError, ModelRateLimitError
from browser_use.llm.messages import BaseMessage
from browser_use.llm.views import ChatInvokeCompletion
from browser_use.observability import observe
T = TypeVar('T', bound=BaseModel)
logger = logging.getLogger(__name__)
# HTTP status codes that should trigger a retry
RETRYABLE_STATUS_CODES = {429, 500, 502, 503, 504}
class ChatBrowserUse(BaseChatModel):
"""
Client for browser-use cloud API.
This sends requests to the browser-use cloud API which uses optimized models
and prompts for browser automation tasks.
Usage:
agent = Agent(
task="Find the number of stars of the browser-use repo",
llm=ChatBrowserUse(model='bu-latest'),
)
"""
def __init__(
self,
model: str = 'bu-latest',
api_key: str | None = None,
base_url: str | None = None,
timeout: float = 120.0,
max_retries: int = 5,
retry_base_delay: float = 1.0,
retry_max_delay: float = 60.0,
**kwargs,
):
"""
Initialize ChatBrowserUse client.
Args:
model: Model name to use. Options:
- 'bu-latest' or 'bu-1-0': Default model
- 'bu-2-0': Latest premium model
- 'browser-use/bu-30b-a3b-preview': Browser Use Open Source Model
api_key: API key for browser-use cloud. Defaults to BROWSER_USE_API_KEY env var.
base_url: Base URL for the API. Defaults to BROWSER_USE_LLM_URL env var or production URL.
timeout: Request timeout in seconds.
max_retries: Maximum number of retries for transient errors (default: 5).
retry_base_delay: Base delay in seconds for exponential backoff (default: 1.0).
retry_max_delay: Maximum delay in seconds between retries (default: 60.0).
"""
# Validate model name - allow bu-* and browser-use/* patterns
valid_models = ['bu-latest', 'bu-1-0', 'bu-2-0']
is_valid = model in valid_models or model.startswith('browser-use/')
if not is_valid:
raise ValueError(f"Invalid model: '{model}'. Must be one of {valid_models} or start with 'browser-use/'")
# Normalize bu-latest to bu-1-0 for default models
if model == 'bu-latest':
self.model = 'bu-1-0'
else:
self.model = model
self.fast = False
self.api_key = api_key or os.getenv('BROWSER_USE_API_KEY')
self.base_url = base_url or os.getenv('BROWSER_USE_LLM_URL', 'https://llm.api.browser-use.com')
self.timeout = timeout
self.max_retries = max_retries
self.retry_base_delay = retry_base_delay
self.retry_max_delay = retry_max_delay
if not self.api_key:
raise ValueError(
'You need to set the BROWSER_USE_API_KEY environment variable. '
'Get your key at https://cloud.browser-use.com/new-api-key'
)
@property
def provider(self) -> str:
return 'browser-use'
@property
def name(self) -> str:
return self.model
@overload
async def ainvoke(
self, messages: list[BaseMessage], output_format: None = None, request_type: str = 'browser_agent', **kwargs: Any
) -> ChatInvokeCompletion[str]: ...
@overload
async def ainvoke(
self, messages: list[BaseMessage], output_format: type[T], request_type: str = 'browser_agent', **kwargs: Any
) -> ChatInvokeCompletion[T]: ...
@observe(name='chat_browser_use_ainvoke')
async def ainvoke(
self,
messages: list[BaseMessage],
output_format: type[T] | None = None,
request_type: str = 'browser_agent',
**kwargs: Any,
) -> ChatInvokeCompletion[T] | ChatInvokeCompletion[str]:
"""
Send request to browser-use cloud API.
Args:
messages: List of messages to send
output_format: Expected output format (Pydantic model)
request_type: Type of request - 'browser_agent' or 'judge'
**kwargs: Additional arguments, including:
- session_id: Session ID for sticky routing (same session → same container)
Returns:
ChatInvokeCompletion with structured response and usage info
"""
# Get ANONYMIZED_TELEMETRY setting from config
from browser_use.config import CONFIG
anonymized_telemetry = CONFIG.ANONYMIZED_TELEMETRY
# Extract session_id from kwargs for sticky routing
session_id = kwargs.get('session_id')
# Prepare request payload
payload: dict[str, Any] = {
'model': self.model,
'messages': [self._serialize_message(msg) for msg in messages],
'fast': self.fast,
'request_type': request_type,
'anonymized_telemetry': anonymized_telemetry,
}
# Add session_id for sticky routing if provided
if session_id:
payload['session_id'] = session_id
# Add output format schema if provided
if output_format is not None:
payload['output_format'] = output_format.model_json_schema()
last_error: Exception | None = None
# Retry loop with exponential backoff
for attempt in range(self.max_retries):
try:
result = await self._make_request(payload)
break
except httpx.HTTPStatusError as e:
last_error = e
status_code = e.response.status_code
# Check if this is a retryable error
if status_code in RETRYABLE_STATUS_CODES and attempt < self.max_retries - 1:
delay = min(self.retry_base_delay * (2**attempt), self.retry_max_delay)
jitter = random.uniform(0, delay * 0.1)
total_delay = delay + jitter
logger.warning(
f'⚠️ Got {status_code} error, retrying in {total_delay:.1f}s... (attempt {attempt + 1}/{self.max_retries})'
)
await asyncio.sleep(total_delay)
continue
# Non-retryable HTTP error or exhausted retries
self._raise_http_error(e)
except (httpx.TimeoutException, httpx.ConnectError) as e:
last_error = e
# Network errors are retryable
if attempt < self.max_retries - 1:
delay = min(self.retry_base_delay * (2**attempt), self.retry_max_delay)
jitter = random.uniform(0, delay * 0.1)
total_delay = delay + jitter
error_type = 'timeout' if isinstance(e, httpx.TimeoutException) else 'connection error'
logger.warning(
f'⚠️ Got {error_type}, retrying in {total_delay:.1f}s... (attempt {attempt + 1}/{self.max_retries})'
)
await asyncio.sleep(total_delay)
continue
# Exhausted retries
if isinstance(e, httpx.TimeoutException):
raise ValueError(f'Request timed out after {self.timeout}s (retried {self.max_retries} times)')
raise ValueError(f'Failed to connect to browser-use API after {self.max_retries} attempts: {e}')
except Exception as e:
raise ValueError(f'Failed to connect to browser-use API: {e}')
else:
# Loop completed without break (all retries exhausted)
if last_error is not None:
if isinstance(last_error, httpx.HTTPStatusError):
self._raise_http_error(last_error)
raise ValueError(f'Request failed after {self.max_retries} attempts: {last_error}')
raise RuntimeError('Retry loop completed without return or exception')
# Parse response - server returns structured data as dict
if output_format is not None:
# Server returns structured data as a dict, validate it
completion_data = result['completion']
logger.debug(
f'📥 Got structured data from service: {list(completion_data.keys()) if isinstance(completion_data, dict) else type(completion_data)}'
)
# Convert action dicts to ActionModel instances if needed
# llm-use returns dicts to avoid validation with empty ActionModel
if isinstance(completion_data, dict) and 'action' in completion_data:
actions = completion_data['action']
if actions and isinstance(actions[0], dict):
from typing import get_args
# Get ActionModel type from output_format
action_model_type = get_args(output_format.model_fields['action'].annotation)[0]
# Convert dicts to ActionModel instances
completion_data['action'] = [action_model_type.model_validate(action_dict) for action_dict in actions]
completion = output_format.model_validate(completion_data)
else:
completion = result['completion']
# Parse usage info
usage = None
if 'usage' in result and result['usage'] is not None:
from browser_use.llm.views import ChatInvokeUsage
usage = ChatInvokeUsage(**result['usage'])
return ChatInvokeCompletion(
completion=completion,
usage=usage,
)
async def _make_request(self, payload: dict) -> dict:
"""Make a single API request."""
async with httpx.AsyncClient(timeout=self.timeout) as client:
response = await client.post(
f'{self.base_url}/v1/chat/completions',
json=payload,
headers={
'Authorization': f'Bearer {self.api_key}',
'Content-Type': 'application/json',
},
)
response.raise_for_status()
return response.json()
def _raise_http_error(self, e: httpx.HTTPStatusError) -> None:
"""Raise appropriate ModelProviderError for HTTP errors."""
error_detail = ''
try:
error_data = e.response.json()
error_detail = error_data.get('detail', str(e))
except Exception:
error_detail = str(e)
status_code = e.response.status_code
if status_code == 401:
raise ModelProviderError(message=f'Invalid API key. {error_detail}', status_code=401, model=self.name)
elif status_code == 402:
raise ModelProviderError(message=f'Insufficient credits. {error_detail}', status_code=402, model=self.name)
elif status_code == 429:
raise ModelRateLimitError(message=f'Rate limit exceeded. {error_detail}', status_code=429, model=self.name)
elif status_code in {500, 502, 503, 504}:
raise ModelProviderError(message=f'Server error. {error_detail}', status_code=status_code, model=self.name)
else:
raise ModelProviderError(message=f'API request failed: {error_detail}', status_code=status_code, model=self.name)
def _serialize_message(self, message: BaseMessage) -> dict:
"""Serialize a message to JSON format."""
# Handle Union types by checking the actual message type
msg_dict = message.model_dump()
return {
'role': msg_dict['role'],
'content': msg_dict['content'],
}
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/llm/browser_use/chat.py",
"license": "MIT License",
"lines": 247,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/llm/cerebras/chat.py | from __future__ import annotations
from dataclasses import dataclass
from typing import Any, TypeVar, overload
import httpx
from openai import (
APIConnectionError,
APIError,
APIStatusError,
APITimeoutError,
AsyncOpenAI,
RateLimitError,
)
from openai.types.chat import ChatCompletion
from pydantic import BaseModel
from browser_use.llm.base import BaseChatModel
from browser_use.llm.cerebras.serializer import CerebrasMessageSerializer
from browser_use.llm.exceptions import ModelProviderError, ModelRateLimitError
from browser_use.llm.messages import BaseMessage
from browser_use.llm.views import ChatInvokeCompletion, ChatInvokeUsage
T = TypeVar('T', bound=BaseModel)
@dataclass
class ChatCerebras(BaseChatModel):
"""Cerebras inference wrapper (OpenAI-compatible)."""
model: str = 'llama3.1-8b'
# Generation parameters
max_tokens: int | None = 4096
temperature: float | None = 0.2
top_p: float | None = None
seed: int | None = None
# Connection parameters
api_key: str | None = None
base_url: str | httpx.URL | None = 'https://api.cerebras.ai/v1'
timeout: float | httpx.Timeout | None = None
client_params: dict[str, Any] | None = None
@property
def provider(self) -> str:
return 'cerebras'
def _client(self) -> AsyncOpenAI:
return AsyncOpenAI(
api_key=self.api_key,
base_url=self.base_url,
timeout=self.timeout,
**(self.client_params or {}),
)
@property
def name(self) -> str:
return self.model
def _get_usage(self, response: ChatCompletion) -> ChatInvokeUsage | None:
if response.usage is not None:
usage = ChatInvokeUsage(
prompt_tokens=response.usage.prompt_tokens,
prompt_cached_tokens=None,
prompt_cache_creation_tokens=None,
prompt_image_tokens=None,
completion_tokens=response.usage.completion_tokens,
total_tokens=response.usage.total_tokens,
)
else:
usage = None
return usage
@overload
async def ainvoke(
self,
messages: list[BaseMessage],
output_format: None = None,
**kwargs: Any,
) -> ChatInvokeCompletion[str]: ...
@overload
async def ainvoke(
self,
messages: list[BaseMessage],
output_format: type[T],
**kwargs: Any,
) -> ChatInvokeCompletion[T]: ...
async def ainvoke(
self,
messages: list[BaseMessage],
output_format: type[T] | None = None,
**kwargs: Any,
) -> ChatInvokeCompletion[T] | ChatInvokeCompletion[str]:
"""
Cerebras ainvoke supports:
1. Regular text/multi-turn conversation
2. JSON Output (response_format)
"""
client = self._client()
cerebras_messages = CerebrasMessageSerializer.serialize_messages(messages)
common: dict[str, Any] = {}
if self.temperature is not None:
common['temperature'] = self.temperature
if self.max_tokens is not None:
common['max_tokens'] = self.max_tokens
if self.top_p is not None:
common['top_p'] = self.top_p
if self.seed is not None:
common['seed'] = self.seed
# ① Regular multi-turn conversation/text output
if output_format is None:
try:
resp = await client.chat.completions.create( # type: ignore
model=self.model,
messages=cerebras_messages, # type: ignore
**common,
)
usage = self._get_usage(resp)
return ChatInvokeCompletion(
completion=resp.choices[0].message.content or '',
usage=usage,
)
except RateLimitError as e:
raise ModelRateLimitError(str(e), model=self.name) from e
except (APIError, APIConnectionError, APITimeoutError, APIStatusError) as e:
raise ModelProviderError(str(e), model=self.name) from e
except Exception as e:
raise ModelProviderError(str(e), model=self.name) from e
# ② JSON Output path (response_format)
if output_format is not None and hasattr(output_format, 'model_json_schema'):
try:
# For Cerebras, we'll use a simpler approach without response_format
# Instead, we'll ask the model to return JSON and parse it
import json
# Get the schema to guide the model
schema = output_format.model_json_schema()
schema_str = json.dumps(schema, indent=2)
# Create a prompt that asks for the specific JSON structure
json_prompt = f"""
Please respond with a JSON object that follows this exact schema:
{schema_str}
Your response must be valid JSON only, no other text.
"""
# Add or modify the last user message to include the JSON prompt
if cerebras_messages and cerebras_messages[-1]['role'] == 'user':
if isinstance(cerebras_messages[-1]['content'], str):
cerebras_messages[-1]['content'] += json_prompt
elif isinstance(cerebras_messages[-1]['content'], list):
cerebras_messages[-1]['content'].append({'type': 'text', 'text': json_prompt})
else:
# Add as a new user message
cerebras_messages.append({'role': 'user', 'content': json_prompt})
resp = await client.chat.completions.create( # type: ignore
model=self.model,
messages=cerebras_messages, # type: ignore
**common,
)
content = resp.choices[0].message.content
if not content:
raise ModelProviderError('Empty JSON content in Cerebras response', model=self.name)
usage = self._get_usage(resp)
# Try to extract JSON from the response
import re
json_match = re.search(r'\{.*\}', content, re.DOTALL)
if json_match:
json_str = json_match.group(0)
else:
json_str = content
parsed = output_format.model_validate_json(json_str)
return ChatInvokeCompletion(
completion=parsed,
usage=usage,
)
except RateLimitError as e:
raise ModelRateLimitError(str(e), model=self.name) from e
except (APIError, APIConnectionError, APITimeoutError, APIStatusError) as e:
raise ModelProviderError(str(e), model=self.name) from e
except Exception as e:
raise ModelProviderError(str(e), model=self.name) from e
raise ModelProviderError('No valid ainvoke execution path for Cerebras LLM', model=self.name)
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/llm/cerebras/chat.py",
"license": "MIT License",
"lines": 167,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/llm/cerebras/serializer.py | from __future__ import annotations
import json
from typing import Any, overload
from browser_use.llm.messages import (
AssistantMessage,
BaseMessage,
ContentPartImageParam,
ContentPartTextParam,
SystemMessage,
ToolCall,
UserMessage,
)
MessageDict = dict[str, Any]
class CerebrasMessageSerializer:
"""Serializer for converting browser-use messages to Cerebras messages."""
# -------- content 处理 --------------------------------------------------
@staticmethod
def _serialize_text_part(part: ContentPartTextParam) -> str:
return part.text
@staticmethod
def _serialize_image_part(part: ContentPartImageParam) -> dict[str, Any]:
url = part.image_url.url
if url.startswith('data:'):
return {'type': 'image_url', 'image_url': {'url': url}}
return {'type': 'image_url', 'image_url': {'url': url}}
@staticmethod
def _serialize_content(content: Any) -> str | list[dict[str, Any]]:
if content is None:
return ''
if isinstance(content, str):
return content
serialized: list[dict[str, Any]] = []
for part in content:
if part.type == 'text':
serialized.append({'type': 'text', 'text': CerebrasMessageSerializer._serialize_text_part(part)})
elif part.type == 'image_url':
serialized.append(CerebrasMessageSerializer._serialize_image_part(part))
elif part.type == 'refusal':
serialized.append({'type': 'text', 'text': f'[Refusal] {part.refusal}'})
return serialized
# -------- Tool-call 处理 -------------------------------------------------
@staticmethod
def _serialize_tool_calls(tool_calls: list[ToolCall]) -> list[dict[str, Any]]:
cerebras_tool_calls: list[dict[str, Any]] = []
for tc in tool_calls:
try:
arguments = json.loads(tc.function.arguments)
except json.JSONDecodeError:
arguments = {'arguments': tc.function.arguments}
cerebras_tool_calls.append(
{
'id': tc.id,
'type': 'function',
'function': {
'name': tc.function.name,
'arguments': arguments,
},
}
)
return cerebras_tool_calls
# -------- 单条消息序列化 -------------------------------------------------
@overload
@staticmethod
def serialize(message: UserMessage) -> MessageDict: ...
@overload
@staticmethod
def serialize(message: SystemMessage) -> MessageDict: ...
@overload
@staticmethod
def serialize(message: AssistantMessage) -> MessageDict: ...
@staticmethod
def serialize(message: BaseMessage) -> MessageDict:
if isinstance(message, UserMessage):
return {
'role': 'user',
'content': CerebrasMessageSerializer._serialize_content(message.content),
}
if isinstance(message, SystemMessage):
return {
'role': 'system',
'content': CerebrasMessageSerializer._serialize_content(message.content),
}
if isinstance(message, AssistantMessage):
msg: MessageDict = {
'role': 'assistant',
'content': CerebrasMessageSerializer._serialize_content(message.content),
}
if message.tool_calls:
msg['tool_calls'] = CerebrasMessageSerializer._serialize_tool_calls(message.tool_calls)
return msg
raise ValueError(f'Unknown message type: {type(message)}')
# -------- 列表序列化 -----------------------------------------------------
@staticmethod
def serialize_messages(messages: list[BaseMessage]) -> list[MessageDict]:
return [CerebrasMessageSerializer.serialize(m) for m in messages]
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/llm/cerebras/serializer.py",
"license": "MIT License",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/llm/oci_raw/chat.py | """
OCI Raw API chat model integration for browser-use.
This module provides direct integration with Oracle Cloud Infrastructure's
Generative AI service using raw API calls without Langchain dependencies.
"""
import asyncio
import json
from dataclasses import dataclass
from typing import Any, TypeVar, overload
import oci
from oci.generative_ai_inference import GenerativeAiInferenceClient
from oci.generative_ai_inference.models import (
BaseChatRequest,
ChatDetails,
CohereChatRequest,
GenericChatRequest,
OnDemandServingMode,
)
from pydantic import BaseModel
from browser_use.llm.base import BaseChatModel
from browser_use.llm.exceptions import ModelProviderError, ModelRateLimitError
from browser_use.llm.messages import BaseMessage
from browser_use.llm.schema import SchemaOptimizer
from browser_use.llm.views import ChatInvokeCompletion, ChatInvokeUsage
from .serializer import OCIRawMessageSerializer
T = TypeVar('T', bound=BaseModel)
@dataclass
class ChatOCIRaw(BaseChatModel):
"""
A direct OCI Raw API integration for browser-use that bypasses Langchain.
This class provides a browser-use compatible interface for OCI GenAI models
using direct API calls to Oracle Cloud Infrastructure.
Args:
model_id: The OCI GenAI model OCID
service_endpoint: The OCI service endpoint URL
compartment_id: The OCI compartment OCID
provider: The model provider (e.g., "meta", "cohere", "xai")
temperature: Temperature for response generation (0.0-2.0) - supported by all providers
max_tokens: Maximum tokens in response - supported by all providers
frequency_penalty: Frequency penalty for response generation - supported by Meta and Cohere only
presence_penalty: Presence penalty for response generation - supported by Meta only
top_p: Top-p sampling parameter - supported by all providers
top_k: Top-k sampling parameter - supported by Cohere and xAI only
auth_type: Authentication type (e.g., "API_KEY")
auth_profile: Authentication profile name
timeout: Request timeout in seconds
"""
# Model configuration
model_id: str
service_endpoint: str
compartment_id: str
provider: str = 'meta'
# Model parameters
temperature: float | None = 1.0
max_tokens: int | None = 600
frequency_penalty: float | None = 0.0
presence_penalty: float | None = 0.0
top_p: float | None = 0.75
top_k: int | None = 0 # Used by Cohere models
# Authentication
auth_type: str = 'API_KEY'
auth_profile: str = 'DEFAULT'
# Client configuration
timeout: float = 60.0
# Static properties
@property
def provider_name(self) -> str:
return 'oci-raw'
@property
def name(self) -> str:
# Return a shorter name for telemetry (max 100 chars)
if len(self.model_id) > 90:
# Extract the model name from the OCID
parts = self.model_id.split('.')
if len(parts) >= 4:
return f'oci-{self.provider}-{parts[3]}' # e.g., "oci-meta-us-chicago-1"
else:
return f'oci-{self.provider}-model'
return self.model_id
@property
def model(self) -> str:
return self.model_id
@property
def model_name(self) -> str:
# Override for telemetry - return shorter name (max 100 chars)
if len(self.model_id) > 90:
# Extract the model name from the OCID
parts = self.model_id.split('.')
if len(parts) >= 4:
return f'oci-{self.provider}-{parts[3]}' # e.g., "oci-meta-us-chicago-1"
else:
return f'oci-{self.provider}-model'
return self.model_id
def _uses_cohere_format(self) -> bool:
"""Check if the provider uses Cohere chat request format."""
return self.provider.lower() == 'cohere'
def _get_supported_parameters(self) -> dict[str, bool]:
"""Get which parameters are supported by the current provider."""
provider = self.provider.lower()
if provider == 'meta':
return {
'temperature': True,
'max_tokens': True,
'frequency_penalty': True,
'presence_penalty': True,
'top_p': True,
'top_k': False,
}
elif provider == 'cohere':
return {
'temperature': True,
'max_tokens': True,
'frequency_penalty': True,
'presence_penalty': False,
'top_p': True,
'top_k': True,
}
elif provider == 'xai':
return {
'temperature': True,
'max_tokens': True,
'frequency_penalty': False,
'presence_penalty': False,
'top_p': True,
'top_k': True,
}
else:
# Default: assume all parameters are supported
return {
'temperature': True,
'max_tokens': True,
'frequency_penalty': True,
'presence_penalty': True,
'top_p': True,
'top_k': True,
}
def _get_oci_client(self) -> GenerativeAiInferenceClient:
"""Get the OCI GenerativeAiInferenceClient following your working example."""
if not hasattr(self, '_client'):
# Configure OCI client based on auth_type (following your working example)
if self.auth_type == 'API_KEY':
config = oci.config.from_file('~/.oci/config', self.auth_profile)
self._client = GenerativeAiInferenceClient(
config=config,
service_endpoint=self.service_endpoint,
retry_strategy=oci.retry.NoneRetryStrategy(),
timeout=(10, 240), # Following your working example
)
elif self.auth_type == 'INSTANCE_PRINCIPAL':
config = {}
signer = oci.auth.signers.InstancePrincipalsSecurityTokenSigner()
self._client = GenerativeAiInferenceClient(
config=config,
signer=signer,
service_endpoint=self.service_endpoint,
retry_strategy=oci.retry.NoneRetryStrategy(),
timeout=(10, 240),
)
elif self.auth_type == 'RESOURCE_PRINCIPAL':
config = {}
signer = oci.auth.signers.get_resource_principals_signer()
self._client = GenerativeAiInferenceClient(
config=config,
signer=signer,
service_endpoint=self.service_endpoint,
retry_strategy=oci.retry.NoneRetryStrategy(),
timeout=(10, 240),
)
else:
# Fallback to API_KEY
config = oci.config.from_file('~/.oci/config', self.auth_profile)
self._client = GenerativeAiInferenceClient(
config=config,
service_endpoint=self.service_endpoint,
retry_strategy=oci.retry.NoneRetryStrategy(),
timeout=(10, 240),
)
return self._client
def _extract_usage(self, response) -> ChatInvokeUsage | None:
"""Extract usage information from OCI response."""
try:
# The response is the direct OCI response object, not a dict
if hasattr(response, 'data') and hasattr(response.data, 'chat_response'):
chat_response = response.data.chat_response
if hasattr(chat_response, 'usage'):
usage = chat_response.usage
return ChatInvokeUsage(
prompt_tokens=getattr(usage, 'prompt_tokens', 0),
prompt_cached_tokens=None,
prompt_cache_creation_tokens=None,
prompt_image_tokens=None,
completion_tokens=getattr(usage, 'completion_tokens', 0),
total_tokens=getattr(usage, 'total_tokens', 0),
)
return None
except Exception:
return None
def _extract_content(self, response) -> str:
"""Extract text content from OCI response."""
try:
# The response is the direct OCI response object, not a dict
if not hasattr(response, 'data'):
raise ModelProviderError(message='Invalid response format: no data attribute', status_code=500, model=self.name)
chat_response = response.data.chat_response
# Handle different response types based on provider
if hasattr(chat_response, 'text'):
# Cohere response format - has direct text attribute
return chat_response.text or ''
elif hasattr(chat_response, 'choices') and chat_response.choices:
# Generic response format - has choices array (Meta, xAI)
choice = chat_response.choices[0]
message = choice.message
content_parts = message.content
# Extract text from content parts
text_parts = []
for part in content_parts:
if hasattr(part, 'text'):
text_parts.append(part.text)
return '\n'.join(text_parts) if text_parts else ''
else:
raise ModelProviderError(
message=f'Unsupported response format: {type(chat_response).__name__}', status_code=500, model=self.name
)
except Exception as e:
raise ModelProviderError(
message=f'Failed to extract content from response: {str(e)}', status_code=500, model=self.name
) from e
async def _make_request(self, messages: list[BaseMessage]):
"""Make async request to OCI API using proper OCI SDK models."""
# Create chat request based on provider type
if self._uses_cohere_format():
# Cohere models use CohereChatRequest with single message string
message_text = OCIRawMessageSerializer.serialize_messages_for_cohere(messages)
chat_request = CohereChatRequest()
chat_request.message = message_text
chat_request.max_tokens = self.max_tokens
chat_request.temperature = self.temperature
chat_request.frequency_penalty = self.frequency_penalty
chat_request.top_p = self.top_p
chat_request.top_k = self.top_k
else:
# Meta, xAI and other models use GenericChatRequest with messages array
oci_messages = OCIRawMessageSerializer.serialize_messages(messages)
chat_request = GenericChatRequest()
chat_request.api_format = BaseChatRequest.API_FORMAT_GENERIC
chat_request.messages = oci_messages
chat_request.max_tokens = self.max_tokens
chat_request.temperature = self.temperature
chat_request.top_p = self.top_p
# Provider-specific parameters
if self.provider.lower() == 'meta':
# Meta models support frequency_penalty and presence_penalty
chat_request.frequency_penalty = self.frequency_penalty
chat_request.presence_penalty = self.presence_penalty
elif self.provider.lower() == 'xai':
# xAI models support top_k but not frequency_penalty or presence_penalty
chat_request.top_k = self.top_k
else:
# Default: include all parameters for unknown providers
chat_request.frequency_penalty = self.frequency_penalty
chat_request.presence_penalty = self.presence_penalty
# Create serving mode
serving_mode = OnDemandServingMode(model_id=self.model_id)
# Create chat details
chat_details = ChatDetails()
chat_details.serving_mode = serving_mode
chat_details.chat_request = chat_request
chat_details.compartment_id = self.compartment_id
# Make the request in a thread to avoid blocking
def _sync_request():
try:
client = self._get_oci_client()
response = client.chat(chat_details)
return response # Return the raw response object
except Exception as e:
# Handle OCI-specific exceptions
status_code = getattr(e, 'status', 500)
if status_code == 429:
raise ModelRateLimitError(message=f'Rate limit exceeded: {str(e)}', model=self.name) from e
else:
raise ModelProviderError(message=str(e), status_code=status_code, model=self.name) from e
# Run in thread pool to make it async
loop = asyncio.get_event_loop()
return await loop.run_in_executor(None, _sync_request)
@overload
async def ainvoke(
self, messages: list[BaseMessage], output_format: None = None, **kwargs: Any
) -> ChatInvokeCompletion[str]: ...
@overload
async def ainvoke(self, messages: list[BaseMessage], output_format: type[T], **kwargs: Any) -> ChatInvokeCompletion[T]: ...
async def ainvoke(
self, messages: list[BaseMessage], output_format: type[T] | None = None, **kwargs: Any
) -> ChatInvokeCompletion[T] | ChatInvokeCompletion[str]:
"""
Invoke the OCI GenAI model with the given messages using raw API.
Args:
messages: List of chat messages
output_format: Optional Pydantic model class for structured output
Returns:
Either a string response or an instance of output_format
"""
try:
if output_format is None:
# Return string response
response = await self._make_request(messages)
content = self._extract_content(response)
usage = self._extract_usage(response)
return ChatInvokeCompletion(
completion=content,
usage=usage,
)
else:
# For structured output, add JSON schema instructions
optimized_schema = SchemaOptimizer.create_optimized_json_schema(output_format)
# Add JSON schema instruction to messages
system_instruction = f"""
You must respond with ONLY a valid JSON object that matches this exact schema:
{json.dumps(optimized_schema, indent=2)}
IMPORTANT:
- Your response must be ONLY the JSON object, no additional text
- The JSON must be valid and parseable
- All required fields must be present
- No extra fields are allowed
- Use proper JSON syntax with double quotes
"""
# Clone messages and add system instruction
modified_messages = messages.copy()
# Add or modify system message
from browser_use.llm.messages import SystemMessage
if modified_messages and hasattr(modified_messages[0], 'role') and modified_messages[0].role == 'system':
# Modify existing system message
existing_content = modified_messages[0].content
if isinstance(existing_content, str):
modified_messages[0].content = existing_content + '\n\n' + system_instruction
else:
# Handle list content
modified_messages[0].content = str(existing_content) + '\n\n' + system_instruction
else:
# Insert new system message at the beginning
modified_messages.insert(0, SystemMessage(content=system_instruction))
response = await self._make_request(modified_messages)
response_text = self._extract_content(response)
# Clean and parse the JSON response
try:
# Clean the response text
cleaned_text = response_text.strip()
# Remove markdown code blocks if present
if cleaned_text.startswith('```json'):
cleaned_text = cleaned_text[7:]
if cleaned_text.startswith('```'):
cleaned_text = cleaned_text[3:]
if cleaned_text.endswith('```'):
cleaned_text = cleaned_text[:-3]
cleaned_text = cleaned_text.strip()
# Try to find JSON object in the response
if not cleaned_text.startswith('{'):
start_idx = cleaned_text.find('{')
end_idx = cleaned_text.rfind('}')
if start_idx != -1 and end_idx != -1 and end_idx > start_idx:
cleaned_text = cleaned_text[start_idx : end_idx + 1]
# Parse the JSON
parsed_data = json.loads(cleaned_text)
parsed = output_format.model_validate(parsed_data)
usage = self._extract_usage(response)
return ChatInvokeCompletion(
completion=parsed,
usage=usage,
)
except (json.JSONDecodeError, ValueError) as e:
raise ModelProviderError(
message=f'Failed to parse structured output: {str(e)}. Response was: {response_text[:200]}...',
status_code=500,
model=self.name,
) from e
except ModelRateLimitError:
# Re-raise rate limit errors as-is
raise
except ModelProviderError:
# Re-raise provider errors as-is
raise
except Exception as e:
# Handle any other exceptions
raise ModelProviderError(
message=f'Unexpected error: {str(e)}',
status_code=500,
model=self.name,
) from e
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/llm/oci_raw/chat.py",
"license": "MIT License",
"lines": 387,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/llm/oci_raw/serializer.py | """
Message serializer for OCI Raw API integration.
This module handles the conversion between browser-use message formats
and the OCI Raw API message format using proper OCI SDK models.
"""
from oci.generative_ai_inference.models import ImageContent, ImageUrl, Message, TextContent
from browser_use.llm.messages import (
AssistantMessage,
BaseMessage,
ContentPartImageParam,
SystemMessage,
UserMessage,
)
class OCIRawMessageSerializer:
"""
Serializer for converting between browser-use message types and OCI Raw API message formats.
Uses proper OCI SDK model objects as shown in the working example.
Supports both:
- GenericChatRequest (Meta, xAI models) - uses messages array
- CohereChatRequest (Cohere models) - uses single message string
"""
@staticmethod
def _is_base64_image(url: str) -> bool:
"""Check if the URL is a base64 encoded image."""
return url.startswith('data:image/')
@staticmethod
def _parse_base64_url(url: str) -> str:
"""Parse base64 URL and return the base64 data."""
if not OCIRawMessageSerializer._is_base64_image(url):
raise ValueError(f'Not a base64 image URL: {url}')
# Extract the base64 data from data:image/png;base64,<data>
try:
header, data = url.split(',', 1)
return data
except ValueError:
raise ValueError(f'Invalid base64 image URL format: {url}')
@staticmethod
def _create_image_content(part: ContentPartImageParam) -> ImageContent:
"""Convert ContentPartImageParam to OCI ImageContent."""
url = part.image_url.url
if OCIRawMessageSerializer._is_base64_image(url):
# Handle base64 encoded images - OCI expects data URLs as-is
image_url = ImageUrl(url=url)
else:
# Handle regular URLs
image_url = ImageUrl(url=url)
return ImageContent(image_url=image_url)
@staticmethod
def serialize_messages(messages: list[BaseMessage]) -> list[Message]:
"""
Serialize a list of browser-use messages to OCI Raw API Message objects.
Args:
messages: List of browser-use messages
Returns:
List of OCI Message objects
"""
oci_messages = []
for message in messages:
oci_message = Message()
if isinstance(message, UserMessage):
oci_message.role = 'USER'
content = message.content
if isinstance(content, str):
text_content = TextContent()
text_content.text = content
oci_message.content = [text_content]
elif isinstance(content, list):
# Handle content parts - text and images
contents = []
for part in content:
if part.type == 'text':
text_content = TextContent()
text_content.text = part.text
contents.append(text_content)
elif part.type == 'image_url':
image_content = OCIRawMessageSerializer._create_image_content(part)
contents.append(image_content)
if contents:
oci_message.content = contents
elif isinstance(message, SystemMessage):
oci_message.role = 'SYSTEM'
content = message.content
if isinstance(content, str):
text_content = TextContent()
text_content.text = content
oci_message.content = [text_content]
elif isinstance(content, list):
# Handle content parts - typically just text for system messages
contents = []
for part in content:
if part.type == 'text':
text_content = TextContent()
text_content.text = part.text
contents.append(text_content)
elif part.type == 'image_url':
# System messages can theoretically have images too
image_content = OCIRawMessageSerializer._create_image_content(part)
contents.append(image_content)
if contents:
oci_message.content = contents
elif isinstance(message, AssistantMessage):
oci_message.role = 'ASSISTANT'
content = message.content
if isinstance(content, str):
text_content = TextContent()
text_content.text = content
oci_message.content = [text_content]
elif isinstance(content, list):
# Handle content parts - text, images, and refusals
contents = []
for part in content:
if part.type == 'text':
text_content = TextContent()
text_content.text = part.text
contents.append(text_content)
elif part.type == 'image_url':
# Assistant messages can have images in responses
# Note: This is currently unreachable in browser-use but kept for completeness
image_content = OCIRawMessageSerializer._create_image_content(part)
contents.append(image_content)
elif part.type == 'refusal':
text_content = TextContent()
text_content.text = f'[Refusal] {part.refusal}'
contents.append(text_content)
if contents:
oci_message.content = contents
else:
# Fallback for any message format issues
oci_message.role = 'USER'
text_content = TextContent()
text_content.text = str(message)
oci_message.content = [text_content]
# Only append messages that have content
if hasattr(oci_message, 'content') and oci_message.content:
oci_messages.append(oci_message)
return oci_messages
@staticmethod
def serialize_messages_for_cohere(messages: list[BaseMessage]) -> str:
"""
Serialize messages for Cohere models which expect a single message string.
Cohere models use CohereChatRequest.message (string) instead of messages array.
We combine all messages into a single conversation string.
Args:
messages: List of browser-use messages
Returns:
Single string containing the conversation
"""
conversation_parts = []
for message in messages:
content = ''
if isinstance(message, UserMessage):
if isinstance(message.content, str):
content = message.content
elif isinstance(message.content, list):
# Extract text from content parts
text_parts = []
for part in message.content:
if part.type == 'text':
text_parts.append(part.text)
elif part.type == 'image_url':
# Cohere may not support images in all models, use a short placeholder
# to avoid massive token usage from base64 data URIs
if part.image_url.url.startswith('data:image/'):
text_parts.append('[Image: base64_data]')
else:
text_parts.append('[Image: external_url]')
content = ' '.join(text_parts)
conversation_parts.append(f'User: {content}')
elif isinstance(message, SystemMessage):
if isinstance(message.content, str):
content = message.content
elif isinstance(message.content, list):
# Extract text from content parts
text_parts = []
for part in message.content:
if part.type == 'text':
text_parts.append(part.text)
content = ' '.join(text_parts)
conversation_parts.append(f'System: {content}')
elif isinstance(message, AssistantMessage):
if isinstance(message.content, str):
content = message.content
elif isinstance(message.content, list):
# Extract text from content parts
text_parts = []
for part in message.content:
if part.type == 'text':
text_parts.append(part.text)
elif part.type == 'refusal':
text_parts.append(f'[Refusal] {part.refusal}')
content = ' '.join(text_parts)
conversation_parts.append(f'Assistant: {content}')
else:
# Fallback
conversation_parts.append(f'User: {str(message)}')
return '\n\n'.join(conversation_parts)
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/llm/oci_raw/serializer.py",
"license": "MIT License",
"lines": 196,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/llm/vercel/chat.py | import json
from collections.abc import Mapping
from dataclasses import dataclass, field
from typing import Any, Literal, TypeAlias, TypeVar, overload
import httpx
from openai import APIConnectionError, APIStatusError, AsyncOpenAI, RateLimitError
from openai.types.chat.chat_completion import ChatCompletion
from openai.types.shared_params.response_format_json_schema import (
JSONSchema,
ResponseFormatJSONSchema,
)
from pydantic import BaseModel
from browser_use.llm.base import BaseChatModel
from browser_use.llm.exceptions import ModelProviderError, ModelRateLimitError
from browser_use.llm.messages import BaseMessage, ContentPartTextParam, SystemMessage
from browser_use.llm.schema import SchemaOptimizer
from browser_use.llm.vercel.serializer import VercelMessageSerializer
from browser_use.llm.views import ChatInvokeCompletion, ChatInvokeUsage
T = TypeVar('T', bound=BaseModel)
ChatVercelModel: TypeAlias = Literal[
'alibaba/qwen-3-14b',
'alibaba/qwen-3-235b',
'alibaba/qwen-3-30b',
'alibaba/qwen-3-32b',
'alibaba/qwen3-coder',
'alibaba/qwen3-coder-30b-a3b',
'alibaba/qwen3-coder-plus',
'alibaba/qwen3-max',
'alibaba/qwen3-max-preview',
'alibaba/qwen3-next-80b-a3b-instruct',
'alibaba/qwen3-next-80b-a3b-thinking',
'alibaba/qwen3-vl-instruct',
'alibaba/qwen3-vl-thinking',
'amazon/nova-lite',
'amazon/nova-micro',
'amazon/nova-pro',
'amazon/titan-embed-text-v2',
'anthropic/claude-3-haiku',
'anthropic/claude-3-opus',
'anthropic/claude-3.5-haiku',
'anthropic/claude-3.5-sonnet',
'anthropic/claude-3.5-sonnet-20240620',
'anthropic/claude-3.7-sonnet',
'anthropic/claude-haiku-4.5',
'anthropic/claude-opus-4',
'anthropic/claude-opus-4.1',
'anthropic/claude-sonnet-4',
'anthropic/claude-sonnet-4.5',
'cohere/command-a',
'cohere/command-r',
'cohere/command-r-plus',
'cohere/embed-v4.0',
'deepseek/deepseek-r1',
'deepseek/deepseek-r1-distill-llama-70b',
'deepseek/deepseek-v3',
'deepseek/deepseek-v3.1',
'deepseek/deepseek-v3.1-base',
'deepseek/deepseek-v3.1-terminus',
'deepseek/deepseek-v3.2-exp',
'deepseek/deepseek-v3.2-exp-thinking',
'google/gemini-2.0-flash',
'google/gemini-2.0-flash-lite',
'google/gemini-2.5-flash',
'google/gemini-2.5-flash-image',
'google/gemini-2.5-flash-image-preview',
'google/gemini-2.5-flash-lite',
'google/gemini-2.5-flash-lite-preview-09-2025',
'google/gemini-2.5-flash-preview-09-2025',
'google/gemini-2.5-pro',
'google/gemini-embedding-001',
'google/gemma-2-9b',
'google/text-embedding-005',
'google/text-multilingual-embedding-002',
'inception/mercury-coder-small',
'meituan/longcat-flash-chat',
'meituan/longcat-flash-thinking',
'meta/llama-3-70b',
'meta/llama-3-8b',
'meta/llama-3.1-70b',
'meta/llama-3.1-8b',
'meta/llama-3.2-11b',
'meta/llama-3.2-1b',
'meta/llama-3.2-3b',
'meta/llama-3.2-90b',
'meta/llama-3.3-70b',
'meta/llama-4-maverick',
'meta/llama-4-scout',
'mistral/codestral',
'mistral/codestral-embed',
'mistral/devstral-small',
'mistral/magistral-medium',
'mistral/magistral-medium-2506',
'mistral/magistral-small',
'mistral/magistral-small-2506',
'mistral/ministral-3b',
'mistral/ministral-8b',
'mistral/mistral-embed',
'mistral/mistral-large',
'mistral/mistral-medium',
'mistral/mistral-small',
'mistral/mixtral-8x22b-instruct',
'mistral/pixtral-12b',
'mistral/pixtral-large',
'moonshotai/kimi-k2',
'moonshotai/kimi-k2-0905',
'moonshotai/kimi-k2-turbo',
'morph/morph-v3-fast',
'morph/morph-v3-large',
'openai/gpt-3.5-turbo',
'openai/gpt-3.5-turbo-instruct',
'openai/gpt-4-turbo',
'openai/gpt-4.1',
'openai/gpt-4.1-mini',
'openai/gpt-4.1-nano',
'openai/gpt-4o',
'openai/gpt-4o-mini',
'openai/gpt-5',
'openai/gpt-5-codex',
'openai/gpt-5-mini',
'openai/gpt-5-nano',
'openai/gpt-5-pro',
'openai/gpt-oss-120b',
'openai/gpt-oss-20b',
'openai/o1',
'openai/o3',
'openai/o3-mini',
'openai/o4-mini',
'openai/text-embedding-3-large',
'openai/text-embedding-3-small',
'openai/text-embedding-ada-002',
'perplexity/sonar',
'perplexity/sonar-pro',
'perplexity/sonar-reasoning',
'perplexity/sonar-reasoning-pro',
'stealth/sonoma-dusk-alpha',
'stealth/sonoma-sky-alpha',
'vercel/v0-1.0-md',
'vercel/v0-1.5-md',
'voyage/voyage-3-large',
'voyage/voyage-3.5',
'voyage/voyage-3.5-lite',
'voyage/voyage-code-2',
'voyage/voyage-code-3',
'voyage/voyage-finance-2',
'voyage/voyage-law-2',
'xai/grok-2',
'xai/grok-2-vision',
'xai/grok-3',
'xai/grok-3-fast',
'xai/grok-3-mini',
'xai/grok-3-mini-fast',
'xai/grok-4',
'xai/grok-4-fast-non-reasoning',
'xai/grok-4-fast-reasoning',
'xai/grok-code-fast-1',
'zai/glm-4.5',
'zai/glm-4.5-air',
'zai/glm-4.5v',
'zai/glm-4.6',
]
@dataclass
class ChatVercel(BaseChatModel):
"""
A wrapper around Vercel AI Gateway's API, which provides OpenAI-compatible access
to various LLM models with features like rate limiting, caching, and monitoring.
Examples:
```python
from browser_use import Agent, ChatVercel
llm = ChatVercel(model='openai/gpt-4o', api_key='your_vercel_api_key')
agent = Agent(task='Your task here', llm=llm)
```
Args:
model: The model identifier
api_key: Your Vercel API key
base_url: The Vercel AI Gateway endpoint (defaults to https://ai-gateway.vercel.sh/v1)
temperature: Sampling temperature (0-2)
max_tokens: Maximum tokens to generate
reasoning_models: List of reasoning model patterns (e.g., 'o1', 'gpt-oss') that need
prompt-based JSON extraction. Auto-detects common reasoning models by default.
timeout: Request timeout in seconds
max_retries: Maximum number of retries for failed requests
provider_options: Provider routing options for the gateway. Use this to control which
providers are used and in what order. Example: {'gateway': {'order': ['vertex', 'anthropic']}}
"""
# Model configuration
model: ChatVercelModel | str
# Model params
temperature: float | None = None
max_tokens: int | None = None
top_p: float | None = None
reasoning_models: list[str] | None = field(
default_factory=lambda: [
'o1',
'o3',
'o4',
'gpt-oss',
'deepseek-r1',
'qwen3-next-80b-a3b-thinking',
]
)
# Client initialization parameters
api_key: str | None = None
base_url: str | httpx.URL = 'https://ai-gateway.vercel.sh/v1'
timeout: float | httpx.Timeout | None = None
max_retries: int = 5
default_headers: Mapping[str, str] | None = None
default_query: Mapping[str, object] | None = None
http_client: httpx.AsyncClient | None = None
_strict_response_validation: bool = False
provider_options: dict[str, Any] | None = None
# Static
@property
def provider(self) -> str:
return 'vercel'
def _get_client_params(self) -> dict[str, Any]:
"""Prepare client parameters dictionary."""
base_params = {
'api_key': self.api_key,
'base_url': self.base_url,
'timeout': self.timeout,
'max_retries': self.max_retries,
'default_headers': self.default_headers,
'default_query': self.default_query,
'_strict_response_validation': self._strict_response_validation,
}
client_params = {k: v for k, v in base_params.items() if v is not None}
if self.http_client is not None:
client_params['http_client'] = self.http_client
return client_params
def get_client(self) -> AsyncOpenAI:
"""
Returns an AsyncOpenAI client configured for Vercel AI Gateway.
Returns:
AsyncOpenAI: An instance of the AsyncOpenAI client with Vercel base URL.
"""
if not hasattr(self, '_client'):
client_params = self._get_client_params()
self._client = AsyncOpenAI(**client_params)
return self._client
@property
def name(self) -> str:
return str(self.model)
def _get_usage(self, response: ChatCompletion) -> ChatInvokeUsage | None:
"""Extract usage information from the Vercel response."""
if response.usage is None:
return None
prompt_details = getattr(response.usage, 'prompt_tokens_details', None)
cached_tokens = prompt_details.cached_tokens if prompt_details else None
return ChatInvokeUsage(
prompt_tokens=response.usage.prompt_tokens,
prompt_cached_tokens=cached_tokens,
prompt_cache_creation_tokens=None,
prompt_image_tokens=None,
completion_tokens=response.usage.completion_tokens,
total_tokens=response.usage.total_tokens,
)
def _fix_gemini_schema(self, schema: dict[str, Any]) -> dict[str, Any]:
"""
Convert a Pydantic model to a Gemini-compatible schema.
This function removes unsupported properties like 'additionalProperties' and resolves
$ref references that Gemini doesn't support.
"""
# Handle $defs and $ref resolution
if '$defs' in schema:
defs = schema.pop('$defs')
def resolve_refs(obj: Any) -> Any:
if isinstance(obj, dict):
if '$ref' in obj:
ref = obj.pop('$ref')
ref_name = ref.split('/')[-1]
if ref_name in defs:
# Replace the reference with the actual definition
resolved = defs[ref_name].copy()
# Merge any additional properties from the reference
for key, value in obj.items():
if key != '$ref':
resolved[key] = value
return resolve_refs(resolved)
return obj
else:
# Recursively process all dictionary values
return {k: resolve_refs(v) for k, v in obj.items()}
elif isinstance(obj, list):
return [resolve_refs(item) for item in obj]
return obj
schema = resolve_refs(schema)
# Remove unsupported properties
def clean_schema(obj: Any) -> Any:
if isinstance(obj, dict):
# Remove unsupported properties
cleaned = {}
for key, value in obj.items():
if key not in ['additionalProperties', 'title', 'default']:
cleaned_value = clean_schema(value)
# Handle empty object properties - Gemini doesn't allow empty OBJECT types
if (
key == 'properties'
and isinstance(cleaned_value, dict)
and len(cleaned_value) == 0
and isinstance(obj.get('type', ''), str)
and obj.get('type', '').upper() == 'OBJECT'
):
# Convert empty object to have at least one property
cleaned['properties'] = {'_placeholder': {'type': 'string'}}
else:
cleaned[key] = cleaned_value
# If this is an object type with empty properties, add a placeholder
if (
isinstance(cleaned.get('type', ''), str)
and cleaned.get('type', '').upper() == 'OBJECT'
and 'properties' in cleaned
and isinstance(cleaned['properties'], dict)
and len(cleaned['properties']) == 0
):
cleaned['properties'] = {'_placeholder': {'type': 'string'}}
# Also remove 'title' from the required list if it exists
if 'required' in cleaned and isinstance(cleaned.get('required'), list):
cleaned['required'] = [p for p in cleaned['required'] if p != 'title']
return cleaned
elif isinstance(obj, list):
return [clean_schema(item) for item in obj]
return obj
return clean_schema(schema)
@overload
async def ainvoke(
self, messages: list[BaseMessage], output_format: None = None, **kwargs: Any
) -> ChatInvokeCompletion[str]: ...
@overload
async def ainvoke(self, messages: list[BaseMessage], output_format: type[T], **kwargs: Any) -> ChatInvokeCompletion[T]: ...
async def ainvoke(
self, messages: list[BaseMessage], output_format: type[T] | None = None, **kwargs: Any
) -> ChatInvokeCompletion[T] | ChatInvokeCompletion[str]:
"""
Invoke the model with the given messages through Vercel AI Gateway.
Args:
messages: List of chat messages
output_format: Optional Pydantic model class for structured output
Returns:
Either a string response or an instance of output_format
"""
vercel_messages = VercelMessageSerializer.serialize_messages(messages)
try:
model_params: dict[str, Any] = {}
if self.temperature is not None:
model_params['temperature'] = self.temperature
if self.max_tokens is not None:
model_params['max_tokens'] = self.max_tokens
if self.top_p is not None:
model_params['top_p'] = self.top_p
if self.provider_options:
model_params['extra_body'] = {'providerOptions': self.provider_options}
if output_format is None:
# Return string response
response = await self.get_client().chat.completions.create(
model=self.model,
messages=vercel_messages,
**model_params,
)
usage = self._get_usage(response)
return ChatInvokeCompletion(
completion=response.choices[0].message.content or '',
usage=usage,
stop_reason=response.choices[0].finish_reason if response.choices else None,
)
else:
is_google_model = self.model.startswith('google/')
is_anthropic_model = self.model.startswith('anthropic/')
is_reasoning_model = self.reasoning_models and any(
str(pattern).lower() in str(self.model).lower() for pattern in self.reasoning_models
)
if is_google_model or is_anthropic_model or is_reasoning_model:
modified_messages = [m.model_copy(deep=True) for m in messages]
schema = SchemaOptimizer.create_gemini_optimized_schema(output_format)
json_instruction = f'\n\nIMPORTANT: You must respond with ONLY a valid JSON object (no markdown, no code blocks, no explanations) that exactly matches this schema:\n{json.dumps(schema, indent=2)}'
instruction_added = False
if modified_messages and modified_messages[0].role == 'system':
if isinstance(modified_messages[0].content, str):
modified_messages[0].content += json_instruction
instruction_added = True
elif isinstance(modified_messages[0].content, list):
modified_messages[0].content.append(ContentPartTextParam(text=json_instruction))
instruction_added = True
elif modified_messages and modified_messages[-1].role == 'user':
if isinstance(modified_messages[-1].content, str):
modified_messages[-1].content += json_instruction
instruction_added = True
elif isinstance(modified_messages[-1].content, list):
modified_messages[-1].content.append(ContentPartTextParam(text=json_instruction))
instruction_added = True
if not instruction_added:
modified_messages.insert(0, SystemMessage(content=json_instruction))
vercel_messages = VercelMessageSerializer.serialize_messages(modified_messages)
request_params = model_params.copy()
if self.provider_options:
request_params['extra_body'] = {'providerOptions': self.provider_options}
response = await self.get_client().chat.completions.create(
model=self.model,
messages=vercel_messages,
**request_params,
)
content = response.choices[0].message.content if response.choices else None
if not content:
raise ModelProviderError(
message='No response from model',
status_code=500,
model=self.name,
)
try:
text = content.strip()
if text.startswith('```json') and text.endswith('```'):
text = text[7:-3].strip()
elif text.startswith('```') and text.endswith('```'):
text = text[3:-3].strip()
parsed_data = json.loads(text)
parsed = output_format.model_validate(parsed_data)
usage = self._get_usage(response)
return ChatInvokeCompletion(
completion=parsed,
usage=usage,
stop_reason=response.choices[0].finish_reason if response.choices else None,
)
except (json.JSONDecodeError, ValueError) as e:
raise ModelProviderError(
message=f'Failed to parse JSON response: {str(e)}. Raw response: {content[:200]}',
status_code=500,
model=self.name,
) from e
else:
schema = SchemaOptimizer.create_optimized_json_schema(output_format)
response_format_schema: JSONSchema = {
'name': 'agent_output',
'strict': True,
'schema': schema,
}
request_params = model_params.copy()
if self.provider_options:
request_params['extra_body'] = {'providerOptions': self.provider_options}
response = await self.get_client().chat.completions.create(
model=self.model,
messages=vercel_messages,
response_format=ResponseFormatJSONSchema(
json_schema=response_format_schema,
type='json_schema',
),
**request_params,
)
content = response.choices[0].message.content if response.choices else None
if not content:
raise ModelProviderError(
message='Failed to parse structured output from model response - empty or null content',
status_code=500,
model=self.name,
)
usage = self._get_usage(response)
parsed = output_format.model_validate_json(content)
return ChatInvokeCompletion(
completion=parsed,
usage=usage,
stop_reason=response.choices[0].finish_reason if response.choices else None,
)
except RateLimitError as e:
raise ModelRateLimitError(message=e.message, model=self.name) from e
except APIConnectionError as e:
raise ModelProviderError(message=str(e), model=self.name) from e
except APIStatusError as e:
raise ModelProviderError(message=e.message, status_code=e.status_code, model=self.name) from e
except Exception as e:
raise ModelProviderError(message=str(e), model=self.name) from e
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/llm/vercel/chat.py",
"license": "MIT License",
"lines": 468,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/llm/vercel/serializer.py | from openai.types.chat import ChatCompletionMessageParam
from browser_use.llm.messages import BaseMessage
from browser_use.llm.openai.serializer import OpenAIMessageSerializer
class VercelMessageSerializer:
"""
Serializer for converting between custom message types and Vercel AI Gateway message formats.
Vercel AI Gateway uses the OpenAI-compatible API, so we can reuse the OpenAI serializer.
"""
@staticmethod
def serialize_messages(messages: list[BaseMessage]) -> list[ChatCompletionMessageParam]:
"""
Serialize a list of browser_use messages to Vercel AI Gateway-compatible messages.
Args:
messages: List of browser_use messages
Returns:
List of Vercel AI Gateway-compatible messages (identical to OpenAI format)
"""
# Vercel AI Gateway uses the same message format as OpenAI
return OpenAIMessageSerializer.serialize_messages(messages)
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/llm/vercel/serializer.py",
"license": "MIT License",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
browser-use/browser-use:browser_use/sandbox/sandbox.py | import ast
import asyncio
import base64
import dataclasses
import enum
import inspect
import json
import os
import sys
import textwrap
from collections.abc import Callable, Coroutine
from functools import wraps
from typing import TYPE_CHECKING, Any, Concatenate, ParamSpec, TypeVar, Union, cast, get_args, get_origin
import cloudpickle
import httpx
from browser_use.sandbox.views import (
BrowserCreatedData,
ErrorData,
LogData,
ResultData,
SandboxError,
SSEEvent,
SSEEventType,
)
if TYPE_CHECKING:
from browser_use.browser import BrowserSession
T = TypeVar('T')
P = ParamSpec('P')
def get_terminal_width() -> int:
"""Get terminal width, default to 80 if unable to detect"""
try:
return os.get_terminal_size().columns
except (AttributeError, OSError):
return 80
async def _call_callback(callback: Callable[..., Any], *args: Any) -> None:
"""Call a callback that can be either sync or async"""
result = callback(*args)
if asyncio.iscoroutine(result):
await result
def _get_function_source_without_decorator(func: Callable) -> str:
"""Get function source code with decorator removed"""
source = inspect.getsource(func)
source = textwrap.dedent(source)
# Parse and remove decorator
tree = ast.parse(source)
for node in ast.walk(tree):
if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)):
node.decorator_list = []
break
return ast.unparse(tree)
def _get_imports_used_in_function(func: Callable) -> str:
"""Extract only imports that are referenced in the function body or type annotations"""
# Get all names referenced in the function
code = func.__code__
referenced_names = set(code.co_names)
# Also get names from type annotations (recursively for complex types like Union, Literal, etc.)
def extract_type_names(annotation):
"""Recursively extract all type names from annotation"""
if annotation is None or annotation == inspect.Parameter.empty:
return
# Handle Pydantic generics (e.g., AgentHistoryList[MyModel]) - check this FIRST
# Pydantic generics have __pydantic_generic_metadata__ with 'origin' and 'args'
pydantic_meta = getattr(annotation, '__pydantic_generic_metadata__', None)
if pydantic_meta and pydantic_meta.get('origin'):
# Add the origin class name (e.g., 'AgentHistoryList')
origin_class = pydantic_meta['origin']
if hasattr(origin_class, '__name__'):
referenced_names.add(origin_class.__name__)
# Recursively extract from generic args (e.g., MyModel)
for arg in pydantic_meta.get('args', ()):
extract_type_names(arg)
return
# Handle simple types with __name__
if hasattr(annotation, '__name__'):
referenced_names.add(annotation.__name__)
# Handle string annotations
if isinstance(annotation, str):
referenced_names.add(annotation)
# Handle generic types like Union[X, Y], Literal['x'], etc.
origin = get_origin(annotation)
args = get_args(annotation)
if origin:
# Add the origin type name (e.g., 'Union', 'Literal')
if hasattr(origin, '__name__'):
referenced_names.add(origin.__name__)
# Recursively extract from generic args
if args:
for arg in args:
extract_type_names(arg)
sig = inspect.signature(func)
for param in sig.parameters.values():
if param.annotation != inspect.Parameter.empty:
extract_type_names(param.annotation)
# Get return annotation (also extract recursively)
if 'return' in func.__annotations__:
extract_type_names(func.__annotations__['return'])
# Get the module where function is defined
module = inspect.getmodule(func)
if not module or not hasattr(module, '__file__') or module.__file__ is None:
return ''
try:
with open(module.__file__) as f:
module_source = f.read()
tree = ast.parse(module_source)
needed_imports: list[str] = []
for node in tree.body:
if isinstance(node, ast.Import):
# import X, Y
for alias in node.names:
import_name = alias.asname if alias.asname else alias.name
if import_name in referenced_names:
needed_imports.append(ast.unparse(node))
break
elif isinstance(node, ast.ImportFrom):
# from X import Y, Z
imported_names = []
for alias in node.names:
import_name = alias.asname if alias.asname else alias.name
if import_name in referenced_names:
imported_names.append(alias)
if imported_names:
# Create filtered import statement
filtered_import = ast.ImportFrom(module=node.module, names=imported_names, level=node.level)
needed_imports.append(ast.unparse(filtered_import))
return '\n'.join(needed_imports)
except Exception:
return ''
def _extract_all_params(func: Callable, args: tuple, kwargs: dict) -> dict[str, Any]:
"""Extract all parameters including explicit params and closure variables
Args:
func: The function being decorated
args: Positional arguments passed to the function
kwargs: Keyword arguments passed to the function
Returns:
Dictionary of all parameters {name: value}
"""
sig = inspect.signature(func)
bound_args = sig.bind_partial(*args, **kwargs)
bound_args.apply_defaults()
all_params: dict[str, Any] = {}
# 1. Extract explicit parameters (skip 'browser' and 'self')
for param_name, param_value in bound_args.arguments.items():
if param_name == 'browser':
continue
if param_name == 'self' and hasattr(param_value, '__dict__'):
# Extract self attributes as individual variables
for attr_name, attr_value in param_value.__dict__.items():
all_params[attr_name] = attr_value
else:
all_params[param_name] = param_value
# 2. Extract closure variables
if func.__closure__:
closure_vars = func.__code__.co_freevars
closure_values = [cell.cell_contents for cell in func.__closure__]
for name, value in zip(closure_vars, closure_values):
# Skip if already captured from explicit params
if name in all_params:
continue
# Special handling for 'self' in closures
if name == 'self' and hasattr(value, '__dict__'):
for attr_name, attr_value in value.__dict__.items():
if attr_name not in all_params:
all_params[attr_name] = attr_value
else:
all_params[name] = value
# 3. Extract referenced globals (like logger, module-level vars, etc.)
# Let cloudpickle handle serialization instead of special-casing
for name in func.__code__.co_names:
if name in all_params:
continue
if name in func.__globals__:
all_params[name] = func.__globals__[name]
return all_params
def sandbox(
BROWSER_USE_API_KEY: str | None = None,
cloud_profile_id: str | None = None,
cloud_proxy_country_code: str | None = None,
cloud_timeout: int | None = None,
server_url: str | None = None,
log_level: str = 'INFO',
quiet: bool = False,
headers: dict[str, str] | None = None,
on_browser_created: Callable[[BrowserCreatedData], None]
| Callable[[BrowserCreatedData], Coroutine[Any, Any, None]]
| None = None,
on_instance_ready: Callable[[], None] | Callable[[], Coroutine[Any, Any, None]] | None = None,
on_log: Callable[[LogData], None] | Callable[[LogData], Coroutine[Any, Any, None]] | None = None,
on_result: Callable[[ResultData], None] | Callable[[ResultData], Coroutine[Any, Any, None]] | None = None,
on_error: Callable[[ErrorData], None] | Callable[[ErrorData], Coroutine[Any, Any, None]] | None = None,
**env_vars: str,
) -> Callable[[Callable[Concatenate['BrowserSession', P], Coroutine[Any, Any, T]]], Callable[P, Coroutine[Any, Any, T]]]:
"""Decorator to execute browser automation code in a sandbox environment.
The decorated function MUST have 'browser: Browser' as its first parameter.
The browser parameter will be automatically injected - do NOT pass it when calling the decorated function.
All other parameters (explicit or from closure) will be captured and sent via cloudpickle.
Args:
BROWSER_USE_API_KEY: API key (defaults to BROWSER_USE_API_KEY env var)
cloud_profile_id: The ID of the profile to use for the browser session
cloud_proxy_country_code: Country code for proxy location (e.g., 'us', 'uk', 'fr')
cloud_timeout: The timeout for the browser session in minutes (max 240 = 4 hours)
server_url: Sandbox server URL (defaults to https://sandbox.api.browser-use.com/sandbox-stream)
log_level: Logging level (INFO, DEBUG, WARNING, ERROR)
quiet: Suppress console output
headers: Additional HTTP headers to send with the request
on_browser_created: Callback when browser is created
on_instance_ready: Callback when instance is ready
on_log: Callback for log events
on_result: Callback when execution completes
on_error: Callback for errors
**env_vars: Additional environment variables
Example:
@sandbox()
async def task(browser: Browser, url: str, max_steps: int) -> str:
agent = Agent(task=url, browser=browser)
await agent.run(max_steps=max_steps)
return "done"
# Call with:
result = await task(url="https://example.com", max_steps=10)
# With cloud parameters:
@sandbox(cloud_proxy_country_code='us', cloud_timeout=60)
async def task_with_proxy(browser: Browser) -> str:
...
"""
def decorator(
func: Callable[Concatenate['BrowserSession', P], Coroutine[Any, Any, T]],
) -> Callable[P, Coroutine[Any, Any, T]]:
# Validate function has browser parameter
sig = inspect.signature(func)
if 'browser' not in sig.parameters:
raise TypeError(f'{func.__name__}() must have a "browser" parameter')
browser_param = sig.parameters['browser']
if browser_param.annotation != inspect.Parameter.empty:
annotation_str = str(browser_param.annotation)
if 'Browser' not in annotation_str:
raise TypeError(f'{func.__name__}() browser parameter must be typed as Browser, got {annotation_str}')
@wraps(func)
async def wrapper(*args, **kwargs) -> T:
# 1. Get API key
api_key = BROWSER_USE_API_KEY or os.getenv('BROWSER_USE_API_KEY')
if not api_key:
raise SandboxError('BROWSER_USE_API_KEY is required')
# 2. Extract all parameters (explicit + closure)
all_params = _extract_all_params(func, args, kwargs)
# 3. Get function source without decorator and only needed imports
func_source = _get_function_source_without_decorator(func)
needed_imports = _get_imports_used_in_function(func)
# Always include Browser import since it's required for the function signature
if needed_imports:
needed_imports = 'from browser_use import Browser\n' + needed_imports
else:
needed_imports = 'from browser_use import Browser'
# 4. Pickle parameters using cloudpickle for robust serialization
pickled_params = base64.b64encode(cloudpickle.dumps(all_params)).decode()
# 5. Determine which params are in the function signature vs closure/globals
func_param_names = {p.name for p in sig.parameters.values() if p.name != 'browser'}
non_explicit_params = {k: v for k, v in all_params.items() if k not in func_param_names}
explicit_params = {k: v for k, v in all_params.items() if k in func_param_names}
# Inject closure variables and globals as module-level vars
var_injections = []
for var_name in non_explicit_params.keys():
var_injections.append(f"{var_name} = _params['{var_name}']")
var_injection_code = '\n'.join(var_injections) if var_injections else '# No closure variables or globals'
# Build function call
if explicit_params:
function_call = (
f'await {func.__name__}(browser=browser, **{{k: _params[k] for k in {list(explicit_params.keys())!r}}})'
)
else:
function_call = f'await {func.__name__}(browser=browser)'
# 6. Create wrapper code that unpickles params and calls function
execution_code = f"""import cloudpickle
import base64
# Imports used in function
{needed_imports}
# Unpickle all parameters (explicit, closure, and globals)
_pickled_params = base64.b64decode({repr(pickled_params)})
_params = cloudpickle.loads(_pickled_params)
# Inject closure variables and globals into module scope
{var_injection_code}
# Original function (decorator removed)
{func_source}
# Wrapper function that passes explicit params
async def run(browser):
return {function_call}
"""
# 9. Send to server
payload: dict[str, Any] = {'code': base64.b64encode(execution_code.encode()).decode()}
combined_env: dict[str, str] = env_vars.copy() if env_vars else {}
combined_env['LOG_LEVEL'] = log_level.upper()
payload['env'] = combined_env
# Add cloud parameters if provided
if cloud_profile_id is not None:
payload['cloud_profile_id'] = cloud_profile_id
if cloud_proxy_country_code is not None:
payload['cloud_proxy_country_code'] = cloud_proxy_country_code
if cloud_timeout is not None:
payload['cloud_timeout'] = cloud_timeout
url = server_url or 'https://sandbox.api.browser-use.com/sandbox-stream'
request_headers = {'X-API-Key': api_key}
if headers:
request_headers.update(headers)
# 10. Handle SSE streaming
_NO_RESULT = object()
execution_result = _NO_RESULT
live_url_shown = False
execution_started = False
received_final_event = False
async with httpx.AsyncClient(timeout=1800.0) as client:
async with client.stream('POST', url, json=payload, headers=request_headers) as response:
response.raise_for_status()
try:
async for line in response.aiter_lines():
if not line or not line.startswith('data: '):
continue
event_json = line[6:]
try:
event = SSEEvent.from_json(event_json)
if event.type == SSEEventType.BROWSER_CREATED:
assert isinstance(event.data, BrowserCreatedData)
if on_browser_created:
try:
await _call_callback(on_browser_created, event.data)
except Exception as e:
if not quiet:
print(f'⚠️ Error in on_browser_created callback: {e}')
if not quiet and event.data.live_url and not live_url_shown:
width = get_terminal_width()
print('\n' + '━' * width)
print('👁️ LIVE BROWSER VIEW (Click to watch)')
print(f'🔗 {event.data.live_url}')
print('━' * width)
live_url_shown = True
elif event.type == SSEEventType.LOG:
assert isinstance(event.data, LogData)
message = event.data.message
level = event.data.level
if on_log:
try:
await _call_callback(on_log, event.data)
except Exception as e:
if not quiet:
print(f'⚠️ Error in on_log callback: {e}')
if level == 'stdout':
if not quiet:
if not execution_started:
width = get_terminal_width()
print('\n' + '─' * width)
print('⚡ Runtime Output')
print('─' * width)
execution_started = True
print(f' {message}', end='')
elif level == 'stderr':
if not quiet:
if not execution_started:
width = get_terminal_width()
print('\n' + '─' * width)
print('⚡ Runtime Output')
print('─' * width)
execution_started = True
print(f'⚠️ {message}', end='', file=sys.stderr)
elif level == 'info':
if not quiet:
if 'credit' in message.lower():
import re
match = re.search(r'\$[\d,]+\.?\d*', message)
if match:
print(f'💰 You have {match.group()} credits')
else:
print(f'ℹ️ {message}')
else:
if not quiet:
print(f' {message}')
elif event.type == SSEEventType.INSTANCE_READY:
if on_instance_ready:
try:
await _call_callback(on_instance_ready)
except Exception as e:
if not quiet:
print(f'⚠️ Error in on_instance_ready callback: {e}')
if not quiet:
print('✅ Browser ready, starting execution...\n')
elif event.type == SSEEventType.RESULT:
assert isinstance(event.data, ResultData)
exec_response = event.data.execution_response
received_final_event = True
if on_result:
try:
await _call_callback(on_result, event.data)
except Exception as e:
if not quiet:
print(f'⚠️ Error in on_result callback: {e}')
if exec_response.success:
execution_result = exec_response.result
if not quiet and execution_started:
width = get_terminal_width()
print('\n' + '─' * width)
print()
else:
error_msg = exec_response.error or 'Unknown error'
raise SandboxError(f'Execution failed: {error_msg}')
elif event.type == SSEEventType.ERROR:
assert isinstance(event.data, ErrorData)
received_final_event = True
if on_error:
try:
await _call_callback(on_error, event.data)
except Exception as e:
if not quiet:
print(f'⚠️ Error in on_error callback: {e}')
raise SandboxError(f'Execution failed: {event.data.error}')
except (json.JSONDecodeError, ValueError):
continue
except (httpx.RemoteProtocolError, httpx.ReadError, httpx.StreamClosed) as e:
# With deterministic handshake, these should never happen
# If they do, it's a real error
raise SandboxError(
f'Stream error: {e.__class__.__name__}: {e or "connection closed unexpectedly"}'
) from e
# 11. Parse result with type annotation
if execution_result is not _NO_RESULT:
return_annotation = func.__annotations__.get('return')
if return_annotation:
parsed_result = _parse_with_type_annotation(execution_result, return_annotation)
return parsed_result
return execution_result # type: ignore[return-value]
raise SandboxError('No result received from execution')
# Update wrapper signature to remove browser parameter
wrapper.__annotations__ = func.__annotations__.copy()
if 'browser' in wrapper.__annotations__:
del wrapper.__annotations__['browser']
params = [p for p in sig.parameters.values() if p.name != 'browser']
wrapper.__signature__ = sig.replace(parameters=params) # type: ignore[attr-defined]
return cast(Callable[P, Coroutine[Any, Any, T]], wrapper)
return decorator
def _parse_with_type_annotation(data: Any, annotation: Any) -> Any:
"""Parse data with type annotation without validation, recursively handling nested types
This function reconstructs Pydantic models, dataclasses, and enums from JSON dicts
without running validation logic. It recursively parses nested fields to ensure
complete type fidelity.
"""
try:
if data is None:
return None
origin = get_origin(annotation)
args = get_args(annotation)
# Handle Union types
if origin is Union or (hasattr(annotation, '__class__') and annotation.__class__.__name__ == 'UnionType'):
union_args = args or getattr(annotation, '__args__', [])
for arg in union_args:
if arg is type(None) and data is None:
return None
if arg is not type(None):
try:
return _parse_with_type_annotation(data, arg)
except Exception:
continue
return data
# Handle List types
if origin is list:
if not isinstance(data, list):
return data
if args:
return [_parse_with_type_annotation(item, args[0]) for item in data]
return data
# Handle Tuple types (JSON serializes tuples as lists)
if origin is tuple:
if not isinstance(data, (list, tuple)):
return data
if args:
# Parse each element according to its type annotation
parsed_items = []
for i, item in enumerate(data):
# Use the corresponding type arg, or the last one if fewer args than items
type_arg = args[i] if i < len(args) else args[-1] if args else Any
parsed_items.append(_parse_with_type_annotation(item, type_arg))
return tuple(parsed_items)
return tuple(data) if isinstance(data, list) else data
# Handle Dict types
if origin is dict:
if not isinstance(data, dict):
return data
if len(args) == 2:
return {_parse_with_type_annotation(k, args[0]): _parse_with_type_annotation(v, args[1]) for k, v in data.items()}
return data
# Handle Enum types
if inspect.isclass(annotation) and issubclass(annotation, enum.Enum):
if isinstance(data, str):
try:
return annotation[data] # By name
except KeyError:
return annotation(data) # By value
return annotation(data) # By value
# Handle Pydantic v2 - use model_construct to skip validation and recursively parse nested fields
# Get the actual class (unwrap generic if needed)
# For Pydantic generics, get_origin() returns None, so check __pydantic_generic_metadata__ first
pydantic_generic_meta = getattr(annotation, '__pydantic_generic_metadata__', None)
if pydantic_generic_meta and pydantic_generic_meta.get('origin'):
actual_class = pydantic_generic_meta['origin']
generic_args = pydantic_generic_meta.get('args', ())
else:
actual_class = get_origin(annotation) or annotation
generic_args = get_args(annotation)
if hasattr(actual_class, 'model_construct'):
if not isinstance(data, dict):
return data
# Recursively parse each field according to its type annotation
if hasattr(actual_class, 'model_fields'):
parsed_fields = {}
for field_name, field_info in actual_class.model_fields.items():
if field_name in data:
field_annotation = field_info.annotation
parsed_fields[field_name] = _parse_with_type_annotation(data[field_name], field_annotation)
result = actual_class.model_construct(**parsed_fields)
# Special handling for AgentHistoryList: extract and set _output_model_schema from generic type parameter
if actual_class.__name__ == 'AgentHistoryList' and generic_args:
output_model_schema = generic_args[0]
# Only set if it's an actual model class, not a TypeVar
if inspect.isclass(output_model_schema) and hasattr(output_model_schema, 'model_validate_json'):
result._output_model_schema = output_model_schema
return result
# Fallback if model_fields not available
return actual_class.model_construct(**data)
# Handle Pydantic v1 - use construct to skip validation and recursively parse nested fields
if hasattr(annotation, 'construct'):
if not isinstance(data, dict):
return data
# Recursively parse each field if __fields__ is available
if hasattr(annotation, '__fields__'):
parsed_fields = {}
for field_name, field_obj in annotation.__fields__.items():
if field_name in data:
field_annotation = field_obj.outer_type_
parsed_fields[field_name] = _parse_with_type_annotation(data[field_name], field_annotation)
return annotation.construct(**parsed_fields)
# Fallback if __fields__ not available
return annotation.construct(**data)
# Handle dataclasses
if dataclasses.is_dataclass(annotation) and isinstance(data, dict):
# Get field type annotations
field_types = {f.name: f.type for f in dataclasses.fields(annotation)}
# Recursively parse each field
parsed_fields = {}
for field_name, field_type in field_types.items():
if field_name in data:
parsed_fields[field_name] = _parse_with_type_annotation(data[field_name], field_type)
return cast(type[Any], annotation)(**parsed_fields)
# Handle regular classes
if inspect.isclass(annotation) and isinstance(data, dict):
try:
return annotation(**data)
except Exception:
pass
return data
except Exception:
return data
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/sandbox/sandbox.py",
"license": "MIT License",
"lines": 555,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/sandbox/views.py | """Type-safe event models for sandbox execution SSE streaming"""
import json
from enum import Enum
from typing import Any
from pydantic import BaseModel
class SandboxError(Exception):
pass
class SSEEventType(str, Enum):
"""Event types for Server-Sent Events"""
BROWSER_CREATED = 'browser_created'
INSTANCE_CREATED = 'instance_created'
INSTANCE_READY = 'instance_ready'
LOG = 'log'
RESULT = 'result'
ERROR = 'error'
STREAM_COMPLETE = 'stream_complete'
class BrowserCreatedData(BaseModel):
"""Data for browser_created event"""
session_id: str
live_url: str
status: str
class LogData(BaseModel):
"""Data for log event"""
message: str
level: str = 'info' # stdout, stderr, info, warning, error
class ExecutionResponse(BaseModel):
"""Execution result from the executor"""
success: bool
result: Any = None
error: str | None = None
traceback: str | None = None
class ResultData(BaseModel):
"""Data for result event"""
execution_response: ExecutionResponse
class ErrorData(BaseModel):
"""Data for error event"""
error: str
traceback: str | None = None
status_code: int = 500
class SSEEvent(BaseModel):
"""Type-safe SSE Event
Usage:
# Parse from JSON
event = SSEEvent.from_json(event_json_string)
# Type-safe access with type guards
if event.is_browser_created():
assert isinstance(event.data, BrowserCreatedData)
print(event.data.live_url)
# Or check event type directly
if event.type == SSEEventType.LOG:
assert isinstance(event.data, LogData)
print(event.data.message)
"""
type: SSEEventType
data: BrowserCreatedData | LogData | ResultData | ErrorData | dict[str, Any]
timestamp: str | None = None
@classmethod
def from_json(cls, event_json: str) -> 'SSEEvent':
"""Parse SSE event from JSON string with proper type discrimination
Args:
event_json: JSON string from SSE stream
Returns:
Typed SSEEvent with appropriate data model
Raises:
json.JSONDecodeError: If JSON is malformed
ValueError: If event type is invalid
"""
raw_data = json.loads(event_json)
event_type = SSEEventType(raw_data.get('type'))
data_dict = raw_data.get('data', {})
# Parse data based on event type
if event_type == SSEEventType.BROWSER_CREATED:
data = BrowserCreatedData(**data_dict)
elif event_type == SSEEventType.LOG:
data = LogData(**data_dict)
elif event_type == SSEEventType.RESULT:
data = ResultData(**data_dict)
elif event_type == SSEEventType.ERROR:
data = ErrorData(**data_dict)
else:
data = data_dict
return cls(type=event_type, data=data, timestamp=raw_data.get('timestamp'))
def is_browser_created(self) -> bool:
"""Type guard for BrowserCreatedData"""
return self.type == SSEEventType.BROWSER_CREATED and isinstance(self.data, BrowserCreatedData)
def is_log(self) -> bool:
"""Type guard for LogData"""
return self.type == SSEEventType.LOG and isinstance(self.data, LogData)
def is_result(self) -> bool:
"""Type guard for ResultData"""
return self.type == SSEEventType.RESULT and isinstance(self.data, ResultData)
def is_error(self) -> bool:
"""Type guard for ErrorData"""
return self.type == SSEEventType.ERROR and isinstance(self.data, ErrorData)
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/sandbox/views.py",
"license": "MIT License",
"lines": 94,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/tokens/custom_pricing.py | """
Custom model pricing for models not available in LiteLLM's pricing data.
Prices are per token (not per 1M tokens).
"""
from typing import Any
# Custom model pricing data
# Format matches LiteLLM's model_prices_and_context_window.json structure
CUSTOM_MODEL_PRICING: dict[str, dict[str, Any]] = {
'bu-1-0': {
'input_cost_per_token': 0.2 / 1_000_000, # $0.20 per 1M tokens
'output_cost_per_token': 2.00 / 1_000_000, # $2.00 per 1M tokens
'cache_read_input_token_cost': 0.02 / 1_000_000, # $0.02 per 1M tokens
'cache_creation_input_token_cost': None, # Not specified
'max_tokens': None, # Not specified
'max_input_tokens': None, # Not specified
'max_output_tokens': None, # Not specified
},
'bu-2-0': {
'input_cost_per_token': 0.60 / 1_000_000, # $0.60 per 1M tokens
'output_cost_per_token': 3.50 / 1_000_000, # $3.50 per 1M tokens
'cache_read_input_token_cost': 0.06 / 1_000_000, # $0.06 per 1M tokens
'cache_creation_input_token_cost': None, # Not specified
'max_tokens': None, # Not specified
'max_input_tokens': None, # Not specified
'max_output_tokens': None, # Not specified
},
}
CUSTOM_MODEL_PRICING['bu-latest'] = CUSTOM_MODEL_PRICING['bu-1-0']
CUSTOM_MODEL_PRICING['smart'] = CUSTOM_MODEL_PRICING['bu-1-0']
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/tokens/custom_pricing.py",
"license": "MIT License",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:browser_use/tools/utils.py | """Utility functions for browser tools."""
from browser_use.dom.service import EnhancedDOMTreeNode
def get_click_description(node: EnhancedDOMTreeNode) -> str:
"""Get a brief description of the clicked element for memory."""
parts = []
# Tag name
parts.append(node.tag_name)
# Add type for inputs
if node.tag_name == 'input' and node.attributes.get('type'):
input_type = node.attributes['type']
parts.append(f'type={input_type}')
# For checkboxes, include checked state
if input_type == 'checkbox':
is_checked = node.attributes.get('checked', 'false').lower() in ['true', 'checked', '']
# Also check AX node
if node.ax_node and node.ax_node.properties:
for prop in node.ax_node.properties:
if prop.name == 'checked':
is_checked = prop.value is True or prop.value == 'true'
break
state = 'checked' if is_checked else 'unchecked'
parts.append(f'checkbox-state={state}')
# Add role if present
if node.attributes.get('role'):
role = node.attributes['role']
parts.append(f'role={role}')
# For role=checkbox, include state
if role == 'checkbox':
aria_checked = node.attributes.get('aria-checked', 'false').lower()
is_checked = aria_checked in ['true', 'checked']
if node.ax_node and node.ax_node.properties:
for prop in node.ax_node.properties:
if prop.name == 'checked':
is_checked = prop.value is True or prop.value == 'true'
break
state = 'checked' if is_checked else 'unchecked'
parts.append(f'checkbox-state={state}')
# For labels/spans/divs, check if related to a hidden checkbox
if node.tag_name in ['label', 'span', 'div'] and 'type=' not in ' '.join(parts):
# Check children for hidden checkbox
for child in node.children:
if child.tag_name == 'input' and child.attributes.get('type') == 'checkbox':
# Check if hidden
is_hidden = False
if child.snapshot_node and child.snapshot_node.computed_styles:
opacity = child.snapshot_node.computed_styles.get('opacity', '1')
if opacity == '0' or opacity == '0.0':
is_hidden = True
if is_hidden or not child.is_visible:
# Get checkbox state
is_checked = child.attributes.get('checked', 'false').lower() in ['true', 'checked', '']
if child.ax_node and child.ax_node.properties:
for prop in child.ax_node.properties:
if prop.name == 'checked':
is_checked = prop.value is True or prop.value == 'true'
break
state = 'checked' if is_checked else 'unchecked'
parts.append(f'checkbox-state={state}')
break
# Add short text content if available
text = node.get_all_children_text().strip()
if text:
short_text = text[:30] + ('...' if len(text) > 30 else '')
parts.append(f'"{short_text}"')
# Add key attributes like id, name, aria-label
for attr in ['id', 'name', 'aria-label']:
if node.attributes.get(attr):
parts.append(f'{attr}={node.attributes[attr][:20]}')
return ' '.join(parts)
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/tools/utils.py",
"license": "MIT License",
"lines": 69,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:examples/browser/save_cookies.py | """
Export cookies and storage state from your real Chrome browser
This allows you to save your authenticated sessions for later use
without needing to connect to the Chrome profile every time
"""
import asyncio
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Browser
def select_chrome_profile() -> str | None:
"""Prompt user to select a Chrome profile."""
profiles = Browser.list_chrome_profiles()
if not profiles:
return None
print('Available Chrome profiles:')
for i, p in enumerate(profiles, 1):
print(f' {i}. {p["name"]}')
while True:
choice = input(f'\nSelect profile (1-{len(profiles)}): ').strip()
if choice.isdigit() and 1 <= int(choice) <= len(profiles):
return profiles[int(choice) - 1]['directory']
print('Invalid choice, try again.')
async def main():
profile = select_chrome_profile()
browser = Browser.from_system_chrome(profile_directory=profile)
await browser.start()
await browser.export_storage_state('storage_state.json')
await browser.stop()
print('Storage state exported to storage_state.json')
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/browser/save_cookies.py",
"license": "MIT License",
"lines": 34,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:examples/code_agent/extract_products.py | """
Example: Using code-use mode to extract products from multiple pages.
This example demonstrates the new code-use mode, which works like a Jupyter notebook
where the LLM writes Python code that gets executed in a persistent namespace.
The agent can:
- Navigate to pages
- Extract data using JavaScript
- Combine results from multiple pages
- Save data to files
- Export the session as a Jupyter notebook
This solves the problem from the brainstorm where extraction of multiple items
was difficult with the extract tool alone.
"""
import asyncio
from lmnr import Laminar
from browser_use.code_use import CodeAgent
Laminar.initialize()
async def main():
task = """
Go to https://www.flipkart.com. Continue collecting products from Flipkart in the following categories. I need approximately 50 products from:\n\n1. Books & Media (books, stationery) - 15 products\n2. Sports & Fitness (equipment, clothing, accessories) - 15 products \n3. Beauty & Personal Care (cosmetics, skincare, grooming) - 10 products\nAnd 2 other categories you find interesting.\nNavigate to these categories and collect products with:\n- Product URL (working link)\n- Product name/description\n- Actual price (MRP)\n- Deal price (current selling price) \n- Discount percentage\n\nFocus on products with good discounts and clear pricing. Target around 40 products total from these three categories.
"""
# Create code-use agent (uses ChatBrowserUse automatically)
agent = CodeAgent(
task=task,
max_steps=30,
)
try:
# Run the agent
print('Running code-use agent...')
session = await agent.run()
finally:
await agent.close()
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/code_agent/extract_products.py",
"license": "MIT License",
"lines": 34,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
browser-use/browser-use:examples/code_agent/filter_webvoyager_dataset.py | import asyncio
from browser_use.code_use import CodeAgent
async def main():
task = """
Find the WebVoyager dataset, download it and create a new version where you remove all tasks which have older dates than today.
"""
# Create code-use agent
agent = CodeAgent(
task=task,
max_steps=25,
)
try:
# Run the agent
print('Running code-use agent to filter WebVoyager dataset...')
session = await agent.run()
finally:
await agent.close()
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/code_agent/filter_webvoyager_dataset.py",
"license": "MIT License",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:examples/demo_mode_example.py | import asyncio
from browser_use import Agent, ChatBrowserUse
async def main() -> None:
agent = Agent(
task='Please find the latest commit on browser-use/browser-use repo and tell me the commit message. Please summarize what it is about.',
llm=ChatBrowserUse(model='bu-2-0'),
demo_mode=True,
)
await agent.run(max_steps=5)
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/demo_mode_example.py",
"license": "MIT License",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:examples/features/judge_trace.py | """
Setup:
1. Get your API key from https://cloud.browser-use.com/new-api-key
2. Set environment variable: export BROWSER_USE_API_KEY="your-key"
"""
import asyncio
import os
import sys
# Add the parent directory to the path so we can import browser_use
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent
from browser_use.llm.browser_use.chat import ChatBrowserUse
# task from GAIA
task = """
If Eliud Kipchoge could maintain his record-making marathon pace indefinitely, how many thousand hours would it take him to run the distance between the Earth and the Moon its closest approach?
Please use the minimum perigee value on the Wikipedia page for the Moon when carrying out your calculation.
Round your result to the nearest 1000 hours and do not use any comma separators if necessary.
"""
async def main():
llm = ChatBrowserUse(model='bu-2-0')
agent = Agent(
task=task,
llm=llm,
use_judge=True,
judge_llm=llm,
ground_truth='16', # The TRUE answer is 17 but we put 16 to demonstrate judge can detect when the answer is wrong.
)
history = await agent.run()
# Get the judgement result
if history.is_judged():
judgement = history.judgement()
print(f'Agent history judgement: {judgement}')
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/features/judge_trace.py",
"license": "MIT License",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:examples/features/large_blocklist.py | """
Example: Using large blocklists (400k+ domains) with automatic optimization
This example demonstrates:
1. Loading a real-world blocklist (HaGeZi's Pro++ with 439k+ domains)
2. Automatic conversion to set for O(1) lookup performance
3. Testing that blocked domains are actually blocked
Performance: ~0.02ms per domain check (50,000+ checks/second!)
"""
import asyncio
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent, ChatOpenAI
from browser_use.browser import BrowserProfile, BrowserSession
llm = ChatOpenAI(model='gpt-4.1-mini')
def load_blocklist_from_url(url: str) -> list[str]:
"""Load and parse a blocklist from a URL.
Args:
url: URL to the blocklist file
Returns:
List of domain strings (comments and empty lines removed)
"""
import urllib.request
print(f'📥 Downloading blocklist from {url}...')
domains = []
with urllib.request.urlopen(url) as response:
for line in response:
line = line.decode('utf-8').strip()
# Skip comments and empty lines
if line and not line.startswith('#'):
domains.append(line)
print(f'✅ Loaded {len(domains):,} domains')
return domains
async def main():
# Load HaGeZi's Pro++ blocklist (blocks ads, tracking, malware, etc.)
# Source: https://github.com/hagezi/dns-blocklists
blocklist_url = 'https://gitlab.com/hagezi/mirror/-/raw/main/dns-blocklists/domains/pro.plus.txt'
print('=' * 70)
print('🚀 Large Blocklist Demo - 439k+ Blocked Domains')
print('=' * 70)
print()
# Load the blocklist
prohibited_domains = load_blocklist_from_url(blocklist_url)
# Sample some blocked domains to test
test_blocked = [prohibited_domains[0], prohibited_domains[1000], prohibited_domains[-1]]
print(f'\n📋 Sample blocked domains: {", ".join(test_blocked[:3])}')
print(f'\n🔧 Creating browser with {len(prohibited_domains):,} blocked domains...')
print(' (Auto-optimizing to set for O(1) lookup performance)')
# Create browser with the blocklist
# The list will be automatically optimized to a set for fast lookups
browser_session = BrowserSession(
browser_profile=BrowserProfile(
prohibited_domains=prohibited_domains,
headless=False,
user_data_dir='~/.config/browseruse/profiles/blocklist-demo',
),
)
# Task: Try to visit a blocked domain and a safe domain
blocked_site = test_blocked[0] # Will be blocked
safe_site = 'github.com' # Will be allowed
task = f"""
Try to navigate to these websites and report what happens:
1. First, try to visit https://{blocked_site}
2. Then, try to visit https://{safe_site}
Tell me which sites you were able to access and which were blocked.
"""
agent = Agent(
task=task,
llm=llm,
browser_session=browser_session,
)
print(f'\n🤖 Agent task: Try to visit {blocked_site} (blocked) and {safe_site} (allowed)')
print('\n' + '=' * 70)
await agent.run(max_steps=5)
print('\n' + '=' * 70)
print('✅ Demo complete!')
print(f'💡 The blocklist with {len(prohibited_domains):,} domains was optimized to a set')
print(' for instant O(1) domain checking (vs slow O(n) pattern matching)')
print('=' * 70)
input('\nPress Enter to close the browser...')
await browser_session.kill()
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/features/large_blocklist.py",
"license": "MIT License",
"lines": 85,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:examples/features/stop_externally.py | import asyncio
import os
import random
import sys
from browser_use.llm.google.chat import ChatGoogle
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent
llm = ChatGoogle(model='gemini-flash-latest', temperature=1.0)
def check_is_task_stopped():
async def _internal_check_is_task_stopped() -> bool:
if random.random() < 0.1:
print('[TASK STOPPER] Task is stopped')
return True
else:
print('[TASK STOPPER] Task is not stopped')
return False
return _internal_check_is_task_stopped
task = """
Go to https://browser-use.github.io/stress-tests/challenges/wufoo-style-form.html and complete the Wufoo-style form by filling in all required fields and submitting.
"""
agent = Agent(task=task, llm=llm, flash_mode=True, register_should_stop_callback=check_is_task_stopped(), max_actions_per_step=1)
async def main():
await agent.run(max_steps=30)
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/features/stop_externally.py",
"license": "MIT License",
"lines": 27,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:examples/models/browser_use_llm.py | """
Example of the fastest + smartest LLM for browser automation.
Setup:
1. Get your API key from https://cloud.browser-use.com/new-api-key
2. Set environment variable: export BROWSER_USE_API_KEY="your-key"
"""
import asyncio
import os
from dotenv import load_dotenv
from browser_use import Agent, ChatBrowserUse
load_dotenv()
if not os.getenv('BROWSER_USE_API_KEY'):
raise ValueError('BROWSER_USE_API_KEY is not set')
async def main():
agent = Agent(
task='Find the number of stars of the browser-use repo',
llm=ChatBrowserUse(model='bu-2-0'),
)
# Run the agent
await agent.run()
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/models/browser_use_llm.py",
"license": "MIT License",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:examples/models/cerebras_example.py | """
Example of using Cerebras with browser-use.
To use this example:
1. Set your CEREBRAS_API_KEY environment variable
2. Run this script
Cerebras integration is working great for:
- Direct text generation
- Simple tasks without complex structured output
- Fast inference for web automation
Available Cerebras models (9 total):
Small/Fast models (8B-32B):
- cerebras_llama3_1_8b (8B parameters, fast)
- cerebras_llama_4_scout_17b_16e_instruct (17B, instruction-tuned)
- cerebras_llama_4_maverick_17b_128e_instruct (17B, extended context)
- cerebras_qwen_3_32b (32B parameters)
Large/Capable models (70B-480B):
- cerebras_llama3_3_70b (70B parameters, latest version)
- cerebras_gpt_oss_120b (120B parameters, OpenAI's model)
- cerebras_qwen_3_235b_a22b_instruct_2507 (235B, instruction-tuned)
- cerebras_qwen_3_235b_a22b_thinking_2507 (235B, complex reasoning)
- cerebras_qwen_3_coder_480b (480B, code generation)
Note: Cerebras has some limitations with complex structured output due to JSON schema compatibility.
"""
import asyncio
import os
from browser_use import Agent
async def main():
# Set your API key (recommended to use environment variable)
api_key = os.getenv('CEREBRAS_API_KEY')
if not api_key:
raise ValueError('Please set CEREBRAS_API_KEY environment variable')
# Option 1: Use the pre-configured model instance (recommended)
from browser_use import llm
# Choose your model:
# Small/Fast models:
# model = llm.cerebras_llama3_1_8b # 8B, fast
# model = llm.cerebras_llama_4_scout_17b_16e_instruct # 17B, instruction-tuned
# model = llm.cerebras_llama_4_maverick_17b_128e_instruct # 17B, extended context
# model = llm.cerebras_qwen_3_32b # 32B
# Large/Capable models:
# model = llm.cerebras_llama3_3_70b # 70B, latest
# model = llm.cerebras_gpt_oss_120b # 120B, OpenAI's model
# model = llm.cerebras_qwen_3_235b_a22b_instruct_2507 # 235B, instruction-tuned
model = llm.cerebras_qwen_3_235b_a22b_thinking_2507 # 235B, complex reasoning
# model = llm.cerebras_qwen_3_coder_480b # 480B, code generation
# Option 2: Create the model instance directly
# model = ChatCerebras(
# model="qwen-3-coder-480b", # or any other model ID
# api_key=os.getenv("CEREBRAS_API_KEY"),
# temperature=0.2,
# max_tokens=4096,
# )
# Create and run the agent with a simple task
task = 'Explain the concept of quantum entanglement in simple terms.'
agent = Agent(task=task, llm=model)
print(f'Running task with Cerebras {model.name} (ID: {model.model}): {task}')
history = await agent.run(max_steps=3)
result = history.final_result()
print(f'Result: {result}')
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/models/cerebras_example.py",
"license": "MIT License",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:examples/models/gemini-3.py | import asyncio
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
from browser_use import Agent, ChatGoogle
load_dotenv()
api_key = os.getenv('GOOGLE_API_KEY')
if not api_key:
raise ValueError('GOOGLE_API_KEY is not set')
async def run_search():
llm = ChatGoogle(model='gemini-3-pro-preview', api_key=api_key)
agent = Agent(
llm=llm,
task='How many stars does the browser-use repo have?',
flash_mode=True,
)
await agent.run()
if __name__ == '__main__':
asyncio.run(run_search())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/models/gemini-3.py",
"license": "MIT License",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:examples/models/moonshot.py | import asyncio
import os
from dotenv import load_dotenv
from browser_use import Agent, ChatOpenAI
load_dotenv()
# Get API key from environment variable
api_key = os.getenv('MOONSHOT_API_KEY')
if api_key is None:
print('Make sure you have MOONSHOT_API_KEY set in your .env file')
print('Get your API key from https://platform.moonshot.ai/console/api-keys ')
exit(1)
# Configure Moonshot AI model
llm = ChatOpenAI(
model='kimi-k2-thinking',
base_url='https://api.moonshot.ai/v1',
api_key=api_key,
add_schema_to_system_prompt=True,
remove_min_items_from_schema=True, # Moonshot doesn't support minItems in JSON schema
remove_defaults_from_schema=True, # Moonshot doesn't allow default values with anyOf
)
async def main():
agent = Agent(
task='Search for the latest news about AI and summarize the top 3 articles',
llm=llm,
flash_mode=True,
)
await agent.run()
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/models/moonshot.py",
"license": "MIT License",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:examples/models/oci_models.py | """
Oracle Cloud Infrastructure (OCI) Raw API Example
This example demonstrates how to use OCI's Generative AI service with browser-use
using the raw API integration (ChatOCIRaw) without Langchain dependencies.
@dev You need to:
1. Set up OCI configuration file at ~/.oci/config
2. Have access to OCI Generative AI models in your tenancy
3. Install the OCI Python SDK: uv add oci
Requirements:
- OCI account with Generative AI service access
- Proper OCI configuration and authentication
- Model deployment in your OCI compartment
"""
import asyncio
import os
import sys
from pydantic import BaseModel
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from browser_use import Agent
from browser_use.llm import ChatOCIRaw
class SearchSummary(BaseModel):
query: str
results_found: int
top_result_title: str
summary: str
relevance_score: float
# Configuration examples for different providers
compartment_id = 'ocid1.tenancy.oc1..aaaaaaaayeiis5uk2nuubznrekd6xsm56k3m4i7tyvkxmr2ftojqfkpx2ura'
endpoint = 'https://inference.generativeai.us-chicago-1.oci.oraclecloud.com'
# Example 1: Meta Llama model (uses GenericChatRequest)
meta_model_id = 'ocid1.generativeaimodel.oc1.us-chicago-1.amaaaaaask7dceyarojgfh6msa452vziycwfymle5gxdvpwwxzara53topmq'
meta_llm = ChatOCIRaw(
model_id=meta_model_id,
service_endpoint=endpoint,
compartment_id=compartment_id,
provider='meta', # Meta Llama model
temperature=0.7,
max_tokens=800,
frequency_penalty=0.0,
presence_penalty=0.0,
top_p=0.9,
auth_type='API_KEY',
auth_profile='DEFAULT',
)
cohere_model_id = 'ocid1.generativeaimodel.oc1.us-chicago-1.amaaaaaask7dceyanrlpnq5ybfu5hnzarg7jomak3q6kyhkzjsl4qj24fyoq'
# Example 2: Cohere model (uses CohereChatRequest)
# cohere_model_id = "ocid1.generativeaimodel.oc1.us-chicago-1.amaaaaaask7dceyapnibwg42qjhwaxrlqfpreueirtwghiwvv2whsnwmnlva"
cohere_llm = ChatOCIRaw(
model_id=cohere_model_id,
service_endpoint=endpoint,
compartment_id=compartment_id,
provider='cohere', # Cohere model
temperature=1.0,
max_tokens=600,
frequency_penalty=0.0,
top_p=0.75,
top_k=0, # Cohere-specific parameter
auth_type='API_KEY',
auth_profile='DEFAULT',
)
# Example 3: xAI model (uses GenericChatRequest)
xai_model_id = 'ocid1.generativeaimodel.oc1.us-chicago-1.amaaaaaask7dceya3bsfz4ogiuv3yc7gcnlry7gi3zzx6tnikg6jltqszm2q'
xai_llm = ChatOCIRaw(
model_id=xai_model_id,
service_endpoint=endpoint,
compartment_id=compartment_id,
provider='xai', # xAI model
temperature=1.0,
max_tokens=20000,
top_p=1.0,
top_k=0,
auth_type='API_KEY',
auth_profile='DEFAULT',
)
# Use Meta model by default for this example
llm = xai_llm
async def basic_example():
"""Basic example using ChatOCIRaw with a simple task."""
print('🔹 Basic ChatOCIRaw Example')
print('=' * 40)
print(f'Model: {llm.name}')
print(f'Provider: {llm.provider_name}')
# Create agent with a simple task
agent = Agent(
task="Go to google.com and search for 'Oracle Cloud Infrastructure pricing'",
llm=llm,
)
print("Task: Go to google.com and search for 'Oracle Cloud Infrastructure pricing'")
# Run the agent
try:
result = await agent.run(max_steps=5)
print('✅ Task completed successfully!')
print(f'Final result: {result}')
except Exception as e:
print(f'❌ Error: {e}')
async def structured_output_example():
"""Example demonstrating structured output with Pydantic models."""
print('\n🔹 Structured Output Example')
print('=' * 40)
# Create agent that will return structured data
agent = Agent(
task="""Go to github.com, search for 'browser automation python',
find the most popular repository, and return structured information about it""",
llm=llm,
output_format=SearchSummary, # This will enforce structured output
)
print('Task: Search GitHub for browser automation and return structured data')
try:
result = await agent.run(max_steps=5)
if isinstance(result, SearchSummary):
print('✅ Structured output received!')
print(f'Query: {result.query}')
print(f'Results Found: {result.results_found}')
print(f'Top Result: {result.top_result_title}')
print(f'Summary: {result.summary}')
print(f'Relevance Score: {result.relevance_score}')
else:
print(f'Result: {result}')
except Exception as e:
print(f'❌ Error: {e}')
async def advanced_configuration_example():
"""Example showing advanced configuration options."""
print('\n🔹 Advanced Configuration Example')
print('=' * 40)
print(f'Model: {llm.name}')
print(f'Provider: {llm.provider_name}')
print('Configuration: Cohere model with instance principal auth')
# Create agent with a more complex task
agent = Agent(
task="""Navigate to stackoverflow.com, search for questions about 'python web scraping' and tap search help,
analyze the top 3 questions, and provide a detailed summary of common challenges""",
llm=llm,
)
print('Task: Analyze StackOverflow questions about Python web scraping')
try:
result = await agent.run(max_steps=8)
print('✅ Advanced task completed!')
print(f'Analysis result: {result}')
except Exception as e:
print(f'❌ Error: {e}')
async def provider_compatibility_test():
"""Test different provider formats to verify compatibility."""
print('\n🔹 Provider Compatibility Test')
print('=' * 40)
providers_to_test = [('Meta', meta_llm), ('Cohere', cohere_llm), ('xAI', xai_llm)]
for provider_name, model in providers_to_test:
print(f'\nTesting {provider_name} model...')
print(f'Model ID: {model.model_id}')
print(f'Provider: {model.provider}')
print(f'Uses Cohere format: {model._uses_cohere_format()}')
# Create a simple agent to test the model
agent = Agent(
task='Go to google.com and tell me what you see',
llm=model,
)
try:
result = await agent.run(max_steps=3)
print(f'✅ {provider_name} model works correctly!')
print(f'Result: {str(result)[:100]}...')
except Exception as e:
print(f'❌ {provider_name} model failed: {e}')
async def main():
"""Run all OCI Raw examples."""
print('🚀 Oracle Cloud Infrastructure (OCI) Raw API Examples')
print('=' * 60)
print('\n📋 Prerequisites:')
print('1. OCI account with Generative AI service access')
print('2. OCI configuration file at ~/.oci/config')
print('3. Model deployed in your OCI compartment')
print('4. Proper IAM permissions for Generative AI')
print('5. OCI Python SDK installed: uv add oci')
print('=' * 60)
print('\n⚙️ Configuration Notes:')
print('• Update model_id, service_endpoint, and compartment_id with your values')
print('• Supported providers: "meta", "cohere", "xai"')
print('• Auth types: "API_KEY", "INSTANCE_PRINCIPAL", "RESOURCE_PRINCIPAL"')
print('• Default OCI config profile: "DEFAULT"')
print('=' * 60)
print('\n🔧 Provider-Specific API Formats:')
print('• Meta/xAI models: Use GenericChatRequest with messages array')
print('• Cohere models: Use CohereChatRequest with single message string')
print('• The integration automatically detects and uses the correct format')
print('=' * 60)
try:
# Run all examples
await basic_example()
await structured_output_example()
await advanced_configuration_example()
# await provider_compatibility_test()
print('\n🎉 All examples completed successfully!')
except Exception as e:
print(f'\n❌ Example failed: {e}')
print('\n🔧 Troubleshooting:')
print('• Verify OCI configuration: oci setup config')
print('• Check model OCID and availability')
print('• Ensure compartment access and IAM permissions')
print('• Verify service endpoint URL')
print('• Check OCI Python SDK installation')
print("• Ensure you're using the correct provider name in ChatOCIRaw")
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/models/oci_models.py",
"license": "MIT License",
"lines": 201,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:examples/models/vercel_ai_gateway.py | """
Example using Vercel AI Gateway with browser-use.
Vercel AI Gateway provides an OpenAI-compatible API endpoint that can proxy
requests to various AI providers. This allows you to use Vercel's infrastructure
for rate limiting, caching, and monitoring.
Prerequisites:
1. Set VERCEL_API_KEY in your environment variables
To see all available models, visit: https://ai-gateway.vercel.sh/v1/models
"""
import asyncio
import os
from dotenv import load_dotenv
from browser_use import Agent, ChatVercel
load_dotenv()
api_key = os.getenv('VERCEL_API_KEY')
if not api_key:
raise ValueError('VERCEL_API_KEY is not set')
# Basic usage
llm = ChatVercel(
model='openai/gpt-4o',
api_key=api_key,
)
# Example with provider options - control which providers are used and in what order
# This will try Vertex AI first, then fall back to Anthropic if Vertex fails
llm_with_provider_options = ChatVercel(
model='anthropic/claude-sonnet-4',
api_key=api_key,
provider_options={
'gateway': {
'order': ['vertex', 'anthropic'] # Try Vertex AI first, then Anthropic
}
},
)
agent = Agent(
task='Go to example.com and summarize the main content',
llm=llm,
)
agent_with_provider_options = Agent(
task='Go to example.com and summarize the main content',
llm=llm_with_provider_options,
)
async def main():
await agent.run(max_steps=10)
await agent_with_provider_options.run(max_steps=10)
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/models/vercel_ai_gateway.py",
"license": "MIT License",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:examples/sandbox/example.py | """Example of using sandbox execution with Browser-Use Agent
This example demonstrates how to use the @sandbox decorator to run
browser automation tasks with the Agent in a sandbox environment.
To run this example:
1. Set your BROWSER_USE_API_KEY environment variable
2. Set your LLM API key (OPENAI_API_KEY, ANTHROPIC_API_KEY, etc.)
3. Run: python examples/sandbox_execution.py
"""
import asyncio
import os
from browser_use import Browser, ChatBrowserUse, sandbox
from browser_use.agent.service import Agent
# Example with event callbacks to monitor execution
def on_browser_ready(data):
"""Callback when browser session is created"""
print('\n🌐 Browser session created!')
print(f' Session ID: {data.session_id}')
print(f' Live view: {data.live_url}')
print(' Click the link above to watch the AI agent work!\n')
@sandbox(
log_level='INFO',
on_browser_created=on_browser_ready,
# server_url='http://localhost:8080/sandbox-stream',
# cloud_profile_id='21182245-590f-4712-8888-9611651a024c',
# cloud_proxy_country_code='us',
# cloud_timeout=60,
)
async def pydantic_example(browser: Browser):
agent = Agent(
"""go and check my ip address and the location. return the result in json format""",
browser=browser,
llm=ChatBrowserUse(model='bu-2-0'),
)
res = await agent.run()
return res.final_result()
async def main():
"""Run examples"""
# Check if API keys are set
if not os.getenv('BROWSER_USE_API_KEY'):
print('❌ Please set BROWSER_USE_API_KEY environment variable')
return
print('\n\n=== Search with AI Agent (with live browser view) ===')
search_result = await pydantic_example()
print('\nResults:')
print(search_result)
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/sandbox/example.py",
"license": "MIT License",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:examples/use-cases/apply_to_job.py | import argparse
import asyncio
import json
import os
from dotenv import load_dotenv
from browser_use import Agent, Browser, ChatOpenAI, Tools
from browser_use.tools.views import UploadFileAction
load_dotenv()
async def apply_to_rochester_regional_health(info: dict, resume_path: str):
"""
json format:
{
"first_name": "John",
"last_name": "Doe",
"email": "john.doe@example.com",
"phone": "555-555-5555",
"age": "21",
"US_citizen": boolean,
"sponsorship_needed": boolean,
"resume": "Link to resume",
"postal_code": "12345",
"country": "USA",
"city": "Rochester",
"address": "123 Main St",
"gender": "Male",
"race": "Asian",
"Veteran_status": "Not a veteran",
"disability_status": "No disability"
}
"""
llm = ChatOpenAI(model='o3')
tools = Tools()
@tools.action(description='Upload resume file')
async def upload_resume(browser_session):
params = UploadFileAction(path=resume_path, index=0)
return 'Ready to upload resume'
browser = Browser(cross_origin_iframes=True)
task = f"""
- Your goal is to fill out and submit a job application form with the provided information.
- Navigate to https://apply.appcast.io/jobs/50590620606/applyboard/apply/
- Scroll through the entire application and use extract_structured_data action to extract all the relevant information needed to fill out the job application form. use this information and return a structured output that can be used to fill out the entire form: {info}. Use the done action to finish the task. Fill out the job application form with the following information.
- Before completing every step, refer to this information for accuracy. It is structured in a way to help you fill out the form and is the source of truth.
- Follow these instructions carefully:
- if anything pops up that blocks the form, close it out and continue filling out the form.
- Do not skip any fields, even if they are optional. If you do not have the information, make your best guess based on the information provided.
Fill out the form from top to bottom, never skip a field to come back to it later. When filling out a field, only focus on one field per step. For each of these steps, scroll to the related text. These are the steps:
1) use input_text action to fill out the following:
- "First name"
- "Last name"
- "Email"
- "Phone number"
2) use the upload_file_to_element action to fill out the following:
- Resume upload field
3) use input_text action to fill out the following:
- "Postal code"
- "Country"
- "State"
- "City"
- "Address"
- "Age"
4) use click action to select the following options:
- "Are you legally authorized to work in the country for which you are applying?"
- "Will you now or in the future require sponsorship for employment visa status (e.g., H-1B visa status, etc.) to work legally for Rochester Regional Health?"
- "Do you have, or are you in the process of obtaining, a professional license?"
- SELECT NO FOR THIS FIELD
5) use input_text action to fill out the following:
- "What drew you to healthcare?"
6) use click action to select the following options:
- "How many years of experience do you have in a related role?"
- "Gender"
- "Race"
- "Hispanic/Latino"
- "Veteran status"
- "Disability status"
7) use input_text action to fill out the following:
- "Today's date"
8) CLICK THE SUBMIT BUTTON AND CHECK FOR A SUCCESS SCREEN. Once there is a success screen, complete your end task of writing final_result and outputting it.
- Before you start, create a step-by-step plan to complete the entire task. Make sure to delegate a step for each field to be filled out.
*** IMPORTANT ***:
- You are not done until you have filled out every field of the form.
- When you have completed the entire form, press the submit button to submit the application and use the done action once you have confirmed that the application is submitted
- PLACE AN EMPHASIS ON STEP 4, the click action. That section should be filled out.
- At the end of the task, structure your final_result as 1) a human-readable summary of all detections and actions performed on the page with 2) a list with all questions encountered in the page. Do not say "see above." Include a fully written out, human-readable summary at the very end.
"""
available_file_paths = [resume_path]
agent = Agent(
task=task,
llm=llm,
browser=browser,
tools=tools,
available_file_paths=available_file_paths,
)
history = await agent.run()
return history.final_result()
async def main(test_data_path: str, resume_path: str):
# Verify files exist
if not os.path.exists(test_data_path):
raise FileNotFoundError(f'Test data file not found at: {test_data_path}')
if not os.path.exists(resume_path):
raise FileNotFoundError(f'Resume file not found at: {resume_path}')
with open(test_data_path) as f: # noqa: ASYNC230
mock_info = json.load(f)
results = await apply_to_rochester_regional_health(mock_info, resume_path=resume_path)
print('Search Results:', results)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Apply to Rochester Regional Health job')
parser.add_argument('--test-data', required=True, help='Path to test data JSON file')
parser.add_argument('--resume', required=True, help='Path to resume PDF file')
args = parser.parse_args()
asyncio.run(main(args.test_data, args.resume))
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/use-cases/apply_to_job.py",
"license": "MIT License",
"lines": 110,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
browser-use/browser-use:examples/use-cases/buy_groceries.py | import asyncio
from pydantic import BaseModel, Field
from browser_use import Agent, Browser, ChatBrowserUse
class GroceryItem(BaseModel):
"""A single grocery item"""
name: str = Field(..., description='Item name')
price: float = Field(..., description='Price as number')
brand: str | None = Field(None, description='Brand name')
size: str | None = Field(None, description='Size or quantity')
url: str = Field(..., description='Full URL to item')
class GroceryCart(BaseModel):
"""Grocery cart results"""
items: list[GroceryItem] = Field(default_factory=list, description='All grocery items found')
async def add_to_cart(items: list[str] = ['milk', 'eggs', 'bread']):
browser = Browser(cdp_url='http://localhost:9222')
llm = ChatBrowserUse(model='bu-2-0')
# Task prompt
task = f"""
Search for "{items}" on Instacart at the nearest store.
You will buy all of the items at the same store.
For each item:
1. Search for the item
2. Find the best match (closest name, lowest price)
3. Add the item to the cart
Site:
- Instacart: https://www.instacart.com/
"""
# Create agent with structured output
agent = Agent(
browser=browser,
llm=llm,
task=task,
output_model_schema=GroceryCart,
)
# Run the agent
result = await agent.run()
return result
if __name__ == '__main__':
# Get user input
items_input = input('What items would you like to add to cart (comma-separated)? ').strip()
if not items_input:
items = ['milk', 'eggs', 'bread']
print(f'Using default items: {items}')
else:
items = [item.strip() for item in items_input.split(',')]
result = asyncio.run(add_to_cart(items))
# Access structured output
if result and result.structured_output:
cart = result.structured_output
print(f'\n{"=" * 60}')
print('Items Added to Cart')
print(f'{"=" * 60}\n')
for item in cart.items:
print(f'Name: {item.name}')
print(f'Price: ${item.price}')
if item.brand:
print(f'Brand: {item.brand}')
if item.size:
print(f'Size: {item.size}')
print(f'URL: {item.url}')
print(f'{"-" * 60}')
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/use-cases/buy_groceries.py",
"license": "MIT License",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:examples/use-cases/onepassword.py | import os
from onepassword.client import Client
from browser_use import ActionResult, Agent, Browser, ChatOpenAI, Tools
from browser_use.browser.session import BrowserSession
"""
Use Case: Securely log into a website using credentials stored in 1Password vault.
- Use fill_field action to fill in username and password fields with values retrieved from 1Password. The LLM never sees the actual credentials.
- Use blur_page and unblur_page actions to visually obscure sensitive information on the page while filling in credentials for extra security.
**SETUP**
How to setup 1Password with Browser Use
- Get Individual Plan for 1Password
- Go to the Home page and click “New Vault”
- Add the credentials you need for any websites you want to log into
- Go to “Developer” tab, navigate to “Directory” and create a Service Account
- Give the service account access to the vault
- Copy the Service Account Token and set it as environment variable OP_SERVICE_ACCOUNT_TOKEN
- Install the onepassword package: pip install onepassword-sdk
Note: In this example, we assume that you created a vault named "prod-secrets" and added an item named "X" with fields "username" and "password".
"""
async def main():
# Gets your service account token from environment variable
token = os.getenv('OP_SERVICE_ACCOUNT_TOKEN')
# Authenticate with 1Password
op_client = await Client.authenticate(auth=token, integration_name='Browser Use Secure Login', integration_version='v1.0.0')
# Initialize tools
tools = Tools()
@tools.registry.action('Apply CSS blur filter to entire page content')
async def blur_page(browser_session: BrowserSession):
"""
Applies CSS blur filter directly to document.body to obscure all page content.
The blur will remain until unblur_page is called.
DOM remains accessible for element finding while page is visually blurred.
"""
try:
# Get CDP session
cdp_session = await browser_session.get_or_create_cdp_session()
# Apply blur filter to document.body
result = await cdp_session.cdp_client.send.Runtime.evaluate(
params={
'expression': """
(function() {
// Check if already blurred
if (document.body.getAttribute('data-page-blurred') === 'true') {
console.log('[BLUR] Page already blurred');
return true;
}
// Apply CSS blur filter to body
document.body.style.filter = 'blur(15px)';
document.body.style.webkitFilter = 'blur(15px)'; // Safari support
document.body.style.transition = 'filter 0.3s ease';
document.body.setAttribute('data-page-blurred', 'true');
console.log('[BLUR] Applied CSS blur to page');
return true;
})();
""",
'returnByValue': True,
},
session_id=cdp_session.session_id,
)
success = result.get('result', {}).get('value', False)
if success:
print('[BLUR] Applied CSS blur to page')
return ActionResult(extracted_content='Successfully applied CSS blur to page', include_in_memory=True)
else:
return ActionResult(error='Failed to apply blur', include_in_memory=True)
except Exception as e:
print(f'[BLUR ERROR] {e}')
return ActionResult(error=f'Failed to blur page: {str(e)}', include_in_memory=True)
@tools.registry.action('Remove CSS blur filter from page')
async def unblur_page(browser_session: BrowserSession):
"""
Removes the CSS blur filter from document.body, restoring normal page visibility.
"""
try:
# Get CDP session
cdp_session = await browser_session.get_or_create_cdp_session()
# Remove blur filter from body
result = await cdp_session.cdp_client.send.Runtime.evaluate(
params={
'expression': """
(function() {
if (document.body.getAttribute('data-page-blurred') !== 'true') {
console.log('[BLUR] Page not blurred');
return false;
}
// Remove CSS blur filter
document.body.style.filter = 'none';
document.body.style.webkitFilter = 'none';
document.body.removeAttribute('data-page-blurred');
console.log('[BLUR] Removed CSS blur from page');
return true;
})();
""",
'returnByValue': True,
},
session_id=cdp_session.session_id,
)
removed = result.get('result', {}).get('value', False)
if removed:
print('[BLUR] Removed CSS blur from page')
return ActionResult(extracted_content='Successfully removed CSS blur from page', include_in_memory=True)
else:
print('[BLUR] Page was not blurred')
return ActionResult(
extracted_content='Page was not blurred (may have already been removed)', include_in_memory=True
)
except Exception as e:
print(f'[BLUR ERROR] {e}')
return ActionResult(error=f'Failed to unblur page: {str(e)}', include_in_memory=True)
# LLM can call this action to use actors to fill in sensitive fields using 1Password values.
@tools.registry.action('Fill in a specific field for a website using value from 1Password vault')
async def fill_field(vault_name: str, item_name: str, field_name: str, browser_session: BrowserSession):
"""
Fills in a specific field for a website using the value from 1Password.
Note: Use blur_page before calling this if you want visual security.
"""
try:
# Resolve field value from 1Password
field_value = await op_client.secrets.resolve(f'op://{vault_name}/{item_name}/{field_name}')
# Get current page
page = await browser_session.must_get_current_page()
# Find and fill the element
target_field = await page.must_get_element_by_prompt(f'{field_name} input field', llm)
await target_field.fill(field_value)
return ActionResult(
extracted_content=f'Successfully filled {field_name} field for {vault_name}/{item_name}', include_in_memory=True
)
except Exception as e:
return ActionResult(error=f'Failed to fill {field_name} field: {str(e)}', include_in_memory=True)
browser_session = Browser()
llm = ChatOpenAI(model='o3')
agent = Agent(
task="""
Navigate to https://x.com/i/flow/login
Wait for the page to load.
Use fill_field action with vault_name='prod-secrets' and item_name='X' and field_name='username'.
Click the Next button.
Use fill_field action with vault_name='prod-secrets' and item_name='X' and field_name='password'.
Click the Log in button.
Give me the latest 5 tweets from the logged in user's timeline.
**IMPORTANT** Use blur_page action if you anticipate filling sensitive fields.
Only use unblur_page action after you see the logged in user's X timeline.
Your priority is to keep the username and password hidden while filling sensitive fields.
""",
browser_session=browser_session,
llm=llm,
tools=tools,
file_system_path='./agent_data',
)
await agent.run()
if __name__ == '__main__':
import asyncio
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/use-cases/onepassword.py",
"license": "MIT License",
"lines": 153,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
browser-use/browser-use:examples/use-cases/pcpartpicker.py | import asyncio
from browser_use import Agent, Browser, ChatBrowserUse, Tools
async def main():
browser = Browser(cdp_url='http://localhost:9222')
llm = ChatBrowserUse(model='bu-2-0')
tools = Tools()
task = """
Design me a mid-range water-cooled ITX computer
Keep the total budget under $2000
Go to https://pcpartpicker.com/
Make sure the build is complete and has no incompatibilities.
Provide the full list of parts with prices and a link to the completed build.
"""
agent = Agent(
task=task,
browser=browser,
tools=tools,
llm=llm,
)
history = await agent.run(max_steps=100000)
return history
if __name__ == '__main__':
history = asyncio.run(main())
final_result = history.final_result()
print(final_result)
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/use-cases/pcpartpicker.py",
"license": "MIT License",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:examples/use-cases/phone_comparison.py | import asyncio
from pydantic import BaseModel, Field
from browser_use import Agent, Browser, ChatBrowserUse
class ProductListing(BaseModel):
"""A single product listing"""
title: str = Field(..., description='Product title')
url: str = Field(..., description='Full URL to listing')
price: float = Field(..., description='Price as number')
condition: str | None = Field(None, description='Condition: Used, New, Refurbished, etc')
source: str = Field(..., description='Source website: Amazon, eBay, or Swappa')
class PriceComparison(BaseModel):
"""Price comparison results"""
search_query: str = Field(..., description='The search query used')
listings: list[ProductListing] = Field(default_factory=list, description='All product listings')
async def find(item: str = 'Used iPhone 12'):
"""
Search for an item across multiple marketplaces and compare prices.
Args:
item: The item to search for (e.g., "Used iPhone 12")
Returns:
PriceComparison object with structured results
"""
browser = Browser(cdp_url='http://localhost:9222')
llm = ChatBrowserUse(model='bu-2-0')
# Task prompt
task = f"""
Search for "{item}" on eBay, Amazon, and Swappa. Get any 2-3 listings from each site.
For each site:
1. Search for "{item}"
2. Extract ANY 2-3 listings you find (sponsored, renewed, used - all are fine)
3. Get: title, price (number only, if range use lower number), source, full URL, condition
4. Move to next site
Sites:
- eBay: https://www.ebay.com/
- Amazon: https://www.amazon.com/
- Swappa: https://swappa.com/
"""
# Create agent with structured output
agent = Agent(
browser=browser,
llm=llm,
task=task,
output_model_schema=PriceComparison,
)
# Run the agent
result = await agent.run()
return result
if __name__ == '__main__':
# Get user input
query = input('What item would you like to compare prices for? ').strip()
if not query:
query = 'Used iPhone 12'
print(f'Using default query: {query}')
result = asyncio.run(find(query))
# Access structured output
if result and result.structured_output:
comparison = result.structured_output
print(f'\n{"=" * 60}')
print(f'Price Comparison Results: {comparison.search_query}')
print(f'{"=" * 60}\n')
for listing in comparison.listings:
print(f'Title: {listing.title}')
print(f'Price: ${listing.price}')
print(f'Source: {listing.source}')
print(f'URL: {listing.url}')
print(f'Condition: {listing.condition or "N/A"}')
print(f'{"-" * 60}')
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/use-cases/phone_comparison.py",
"license": "MIT License",
"lines": 67,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:tests/ci/browser/test_cross_origin_click.py | """Test clicking elements inside cross-origin iframes."""
import asyncio
import pytest
from browser_use.browser.profile import BrowserProfile, ViewportSize
from browser_use.browser.session import BrowserSession
from browser_use.tools.service import Tools
@pytest.fixture
async def browser_session():
"""Create browser session with cross-origin iframe support."""
session = BrowserSession(
browser_profile=BrowserProfile(
headless=True,
user_data_dir=None,
keep_alive=True,
window_size=ViewportSize(width=1920, height=1400),
cross_origin_iframes=True, # Enable cross-origin iframe extraction
)
)
await session.start()
yield session
await session.kill()
class TestCrossOriginIframeClick:
"""Test clicking elements inside cross-origin iframes."""
async def test_click_element_in_cross_origin_iframe(self, httpserver, browser_session: BrowserSession):
"""Verify that elements inside iframes in different CDP targets can be clicked."""
# Create iframe content with clickable elements
iframe_html = """
<!DOCTYPE html>
<html>
<head><title>Iframe Page</title></head>
<body>
<h1>Iframe Content</h1>
<a href="https://test-domain.example/page" id="iframe-link">Test Link</a>
<button id="iframe-button">Iframe Button</button>
</body>
</html>
"""
# Create main page with iframe pointing to our test server
main_html = """
<!DOCTYPE html>
<html>
<head><title>Multi-Target Test</title></head>
<body>
<h1>Main Page</h1>
<button id="main-button">Main Button</button>
<iframe id="test-iframe" src="/iframe-content" style="width: 800px; height: 600px;"></iframe>
</body>
</html>
"""
# Serve both pages
httpserver.expect_request('/multi-target-test').respond_with_data(main_html, content_type='text/html')
httpserver.expect_request('/iframe-content').respond_with_data(iframe_html, content_type='text/html')
url = httpserver.url_for('/multi-target-test')
# Navigate to the page
await browser_session.navigate_to(url)
# Wait for iframe to load
await asyncio.sleep(2)
# Get DOM state with cross-origin iframe extraction enabled
# Use browser_session.get_browser_state_summary() instead of directly creating DomService
# This goes through the proper event bus and watchdog system
browser_state = await browser_session.get_browser_state_summary(
include_screenshot=False,
include_recent_events=False,
)
assert browser_state.dom_state is not None
state = browser_state.dom_state
print(f'\n📊 Found {len(state.selector_map)} total elements')
# Find elements from different targets
targets_found = set()
main_page_elements = []
iframe_elements = []
for idx, element in state.selector_map.items():
target_id = element.target_id
targets_found.add(target_id)
# Check if element is from iframe (identified by id attributes we set)
# Iframe elements will have a different target_id when cross_origin_iframes=True
if element.attributes:
element_id = element.attributes.get('id', '')
if element_id in ('iframe-link', 'iframe-button'):
iframe_elements.append((idx, element))
print(f' ✅ Found iframe element: [{idx}] {element.tag_name} id={element_id}')
elif element_id == 'main-button':
main_page_elements.append((idx, element))
# Verify we found elements from at least 2 different targets
print(f'\n🎯 Found elements from {len(targets_found)} different CDP targets')
# Check if iframe elements were found
if len(iframe_elements) == 0:
pytest.fail('Expected to find at least one element from iframe, but found none')
# Verify we found at least one element from the iframe
assert len(iframe_elements) > 0, 'Expected to find at least one element from iframe'
# Try clicking the iframe element
print('\n🖱️ Testing Click on Iframe Element:')
tools = Tools()
link_idx, link_element = iframe_elements[0]
print(f' Attempting to click element [{link_idx}] from iframe...')
try:
result = await tools.click(index=link_idx, browser_session=browser_session)
# Check for errors
if result.error:
pytest.fail(f'Click on iframe element [{link_idx}] failed with error: {result.error}')
if result.extracted_content and (
'not available' in result.extracted_content.lower() or 'failed' in result.extracted_content.lower()
):
pytest.fail(f'Click on iframe element [{link_idx}] failed: {result.extracted_content}')
print(f' ✅ Click succeeded on iframe element [{link_idx}]!')
print(' 🎉 Iframe element clicking works!')
except Exception as e:
pytest.fail(f'Exception while clicking iframe element [{link_idx}]: {e}')
print('\n✅ Test passed: Iframe elements can be clicked')
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/browser/test_cross_origin_click.py",
"license": "MIT License",
"lines": 109,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:tests/ci/browser/test_dom_serializer.py | """
Test DOM serializer with complex scenarios: shadow DOM, same-origin and cross-origin iframes.
This test verifies that the DOM serializer correctly:
1. Extracts interactive elements from shadow DOM
2. Processes same-origin iframes
3. Handles cross-origin iframes (should be blocked)
4. Generates correct selector_map with expected element counts
Usage:
uv run pytest tests/ci/browser/test_dom_serializer.py -v -s
"""
import pytest
from pytest_httpserver import HTTPServer
from browser_use.agent.service import Agent
from browser_use.browser import BrowserSession
from browser_use.browser.profile import BrowserProfile, ViewportSize
from tests.ci.conftest import create_mock_llm
@pytest.fixture(scope='session')
def http_server():
"""Create and provide a test HTTP server for DOM serializer tests."""
from pathlib import Path
server = HTTPServer()
server.start()
# Load HTML templates from files
test_dir = Path(__file__).parent
main_page_html = (test_dir / 'test_page_template.html').read_text()
iframe_html = (test_dir / 'iframe_template.html').read_text()
stacked_page_html = (test_dir / 'test_page_stacked_template.html').read_text()
# Route 1: Main page with shadow DOM and iframes
server.expect_request('/dom-test-main').respond_with_data(main_page_html, content_type='text/html')
# Route 2: Same-origin iframe content
server.expect_request('/iframe-same-origin').respond_with_data(iframe_html, content_type='text/html')
# Route 3: Stacked complex scenarios test page
server.expect_request('/stacked-test').respond_with_data(stacked_page_html, content_type='text/html')
yield server
server.stop()
@pytest.fixture(scope='session')
def base_url(http_server):
"""Return the base URL for the test HTTP server."""
return f'http://{http_server.host}:{http_server.port}'
@pytest.fixture(scope='function')
async def browser_session():
"""Create a browser session for DOM serializer tests."""
session = BrowserSession(
browser_profile=BrowserProfile(
headless=True,
user_data_dir=None,
keep_alive=True,
window_size=ViewportSize(width=1920, height=1400), # Taller window to fit all stacked elements
cross_origin_iframes=True, # Enable cross-origin iframe extraction via CDP target switching
)
)
await session.start()
yield session
await session.kill()
class TestDOMSerializer:
"""Test DOM serializer with complex scenarios."""
async def test_dom_serializer_with_shadow_dom_and_iframes(self, browser_session, base_url):
"""Test DOM serializer extracts elements from shadow DOM, same-origin iframes, and cross-origin iframes.
This test verifies:
1. Elements are in the serializer (selector_map)
2. We can click elements using click(index)
Expected interactive elements:
- Regular DOM: 3 elements (button, input, link on main page)
- Shadow DOM: 3 elements (2 buttons, 1 input inside shadow root)
- Same-origin iframe: 2 elements (button, input inside iframe)
- Cross-origin iframe placeholder: about:blank (no interactive elements)
- Iframe tags: 2 elements (the iframe elements themselves)
Total: ~10 interactive elements
"""
from browser_use.tools.service import Tools
tools = Tools()
# Create mock LLM actions that will click elements from each category
# We'll generate actions dynamically after we know the indices
actions = [
f"""
{{
"thinking": "I'll navigate to the DOM test page",
"evaluation_previous_goal": "Starting task",
"memory": "Navigating to test page",
"next_goal": "Navigate to test page",
"action": [
{{
"navigate": {{
"url": "{base_url}/dom-test-main",
"new_tab": false
}}
}}
]
}}
"""
]
await tools.navigate(url=f'{base_url}/dom-test-main', new_tab=False, browser_session=browser_session)
import asyncio
await asyncio.sleep(1)
# Get the browser state to access selector_map
browser_state_summary = await browser_session.get_browser_state_summary(
include_screenshot=False,
include_recent_events=False,
)
assert browser_state_summary is not None, 'Browser state summary should not be None'
assert browser_state_summary.dom_state is not None, 'DOM state should not be None'
selector_map = browser_state_summary.dom_state.selector_map
print(f' Selector map: {selector_map.keys()}')
print('\n📊 DOM Serializer Analysis:')
print(f' Total interactive elements found: {len(selector_map)}')
serilized_text = browser_state_summary.dom_state.llm_representation()
print(f' Serialized text: {serilized_text}')
# assume all selector map keys are as text in the serialized text
# for idx, element in selector_map.items():
# assert str(idx) in serilized_text, f'Element {idx} should be in serialized text'
# print(f' ✓ Element {idx} found in serialized text')
# assume at least 10 interactive elements are in the selector map
assert len(selector_map) >= 10, f'Should find at least 10 interactive elements, found {len(selector_map)}'
# assert all interactive elements marked with [123] from serialized text are in selector map
# find all [index] from serialized text with regex
import re
indices = re.findall(r'\[(\d+)\]', serilized_text)
for idx in indices:
assert int(idx) in selector_map.keys(), f'Element {idx} should be in selector map'
print(f' ✓ Element {idx} found in selector map')
regular_elements = []
shadow_elements = []
iframe_content_elements = []
iframe_tags = []
# Categorize elements by their IDs (more stable than hardcoded indices)
# Check element attributes to identify their location
for idx, element in selector_map.items():
# Check if this is an iframe tag (not content inside iframe)
if element.tag_name == 'iframe':
iframe_tags.append((idx, element))
# Check if element has an ID attribute
elif hasattr(element, 'attributes') and 'id' in element.attributes:
elem_id = element.attributes['id'].lower()
# Shadow DOM elements have IDs starting with "shadow-"
if elem_id.startswith('shadow-'):
shadow_elements.append((idx, element))
# Iframe content elements have IDs starting with "iframe-"
elif elem_id.startswith('iframe-'):
iframe_content_elements.append((idx, element))
# Everything else is regular DOM
else:
regular_elements.append((idx, element))
# Elements without IDs are regular DOM
else:
regular_elements.append((idx, element))
# Verify element counts based on our test page structure:
# - Regular DOM: 3-4 elements (button, input, link on main page + possible cross-origin content)
# - Shadow DOM: 3 elements (2 buttons, 1 input inside shadow root)
# - Iframe content: 2 elements (button, input from same-origin iframe)
# - Iframe tags: 2 elements (the iframe elements themselves)
# Total: ~10-11 interactive elements depending on cross-origin iframe extraction
print('\n✅ DOM Serializer Test Summary:')
print(f' • Regular DOM: {len(regular_elements)} elements {"✓" if len(regular_elements) >= 3 else "✗"}')
print(f' • Shadow DOM: {len(shadow_elements)} elements {"✓" if len(shadow_elements) >= 3 else "✗"}')
print(
f' • Same-origin iframe content: {len(iframe_content_elements)} elements {"✓" if len(iframe_content_elements) >= 2 else "✗"}'
)
print(f' • Iframe tags: {len(iframe_tags)} elements {"✓" if len(iframe_tags) >= 2 else "✗"}')
print(f' • Total elements: {len(selector_map)}')
# Verify we found elements from all sources
assert len(selector_map) >= 8, f'Should find at least 8 interactive elements, found {len(selector_map)}'
assert len(regular_elements) >= 1, f'Should find at least 1 regular DOM element, found {len(regular_elements)}'
assert len(shadow_elements) >= 1, f'Should find at least 1 shadow DOM element, found {len(shadow_elements)}'
assert len(iframe_content_elements) >= 1, (
f'Should find at least 1 iframe content element, found {len(iframe_content_elements)}'
)
# Now test clicking elements from each category using tools.click(index)
print('\n🖱️ Testing Click Functionality:')
# Helper to call tools.click(index) and verify it worked
async def click(index: int, element_description: str, browser_session: BrowserSession):
result = await tools.click(index=index, browser_session=browser_session)
# Check both error field and extracted_content for failure messages
if result.error:
raise AssertionError(f'Click on {element_description} [{index}] failed: {result.error}')
if result.extracted_content and (
'not available' in result.extracted_content.lower() or 'failed' in result.extracted_content.lower()
):
raise AssertionError(f'Click on {element_description} [{index}] failed: {result.extracted_content}')
print(f' ✓ {element_description} [{index}] clicked successfully')
return result
# Test clicking a regular DOM element (button)
if regular_elements:
regular_button_idx = next((idx for idx, el in regular_elements if 'regular-btn' in el.attributes.get('id', '')), None)
if regular_button_idx:
await click(regular_button_idx, 'Regular DOM button', browser_session)
# Test clicking a shadow DOM element (button)
if shadow_elements:
shadow_button_idx = next((idx for idx, el in shadow_elements if 'btn' in el.attributes.get('id', '')), None)
if shadow_button_idx:
await click(shadow_button_idx, 'Shadow DOM button', browser_session)
# Test clicking a same-origin iframe element (button)
if iframe_content_elements:
iframe_button_idx = next((idx for idx, el in iframe_content_elements if 'btn' in el.attributes.get('id', '')), None)
if iframe_button_idx:
await click(iframe_button_idx, 'Same-origin iframe button', browser_session)
# Validate click counter - verify all 3 clicks actually executed JavaScript
print('\n✅ Validating click counter...')
# Get the CDP session for the main page (use target from a regular DOM element)
# Note: browser_session.agent_focus_target_id may point to a different target than the page
if regular_elements and regular_elements[0][1].target_id:
cdp_session = await browser_session.get_or_create_cdp_session(target_id=regular_elements[0][1].target_id)
else:
cdp_session = await browser_session.get_or_create_cdp_session()
result = await cdp_session.cdp_client.send.Runtime.evaluate(
params={
'expression': 'window.getClickCount()',
'returnByValue': True,
},
session_id=cdp_session.session_id,
)
click_count = result.get('result', {}).get('value', 0)
print(f' Click counter value: {click_count}')
assert click_count == 3, (
f'Expected 3 clicks (Regular DOM + Shadow DOM + Iframe), but counter shows {click_count}. '
f'This means some clicks did not execute JavaScript properly.'
)
print('\n🎉 DOM Serializer test completed successfully!')
async def test_dom_serializer_element_counts_detailed(self, browser_session, base_url):
"""Detailed test to verify specific element types are captured correctly."""
actions = [
f"""
{{
"thinking": "Navigating to test page",
"evaluation_previous_goal": "Starting",
"memory": "Navigate",
"next_goal": "Navigate",
"action": [
{{
"navigate": {{
"url": "{base_url}/dom-test-main",
"new_tab": false
}}
}}
]
}}
""",
"""
{
"thinking": "Done",
"evaluation_previous_goal": "Navigated",
"memory": "Complete",
"next_goal": "Done",
"action": [
{
"done": {
"text": "Done",
"success": true
}
}
]
}
""",
]
mock_llm = create_mock_llm(actions=actions)
agent = Agent(
task=f'Navigate to {base_url}/dom-test-main',
llm=mock_llm,
browser_session=browser_session,
)
history = await agent.run(max_steps=2)
# Get current browser state to access selector_map
browser_state_summary = await browser_session.get_browser_state_summary(
include_screenshot=False,
include_recent_events=False,
)
selector_map = browser_state_summary.dom_state.selector_map
# Count different element types
buttons = 0
inputs = 0
links = 0
for idx, element in selector_map.items():
element_str = str(element).lower()
if 'button' in element_str or '<button' in element_str:
buttons += 1
elif 'input' in element_str or '<input' in element_str:
inputs += 1
elif 'link' in element_str or '<a' in element_str or 'href' in element_str:
links += 1
print('\n📊 Element Type Counts:')
print(f' Buttons: {buttons}')
print(f' Inputs: {inputs}')
print(f' Links: {links}')
print(f' Total: {len(selector_map)}')
# We should have at least some of each type from the regular DOM
assert buttons >= 1, f'Should find at least 1 button, found {buttons}'
assert inputs >= 1, f'Should find at least 1 input, found {inputs}'
print('\n✅ Element type verification passed!')
async def test_stacked_complex_scenarios(self, browser_session, base_url):
"""Test clicking through stacked complex scenarios and verify cross-origin iframe extraction.
This test verifies:
1. Open shadow DOM element interaction
2. Closed shadow DOM element interaction (nested inside open shadow)
3. Same-origin iframe element interaction (inside closed shadow)
4. Cross-origin iframe placeholder with about:blank (no external dependencies)
5. Truly nested structure: Open Shadow → Closed Shadow → Iframe
"""
from browser_use.tools.service import Tools
tools = Tools()
# Navigate to stacked test page
await tools.navigate(url=f'{base_url}/stacked-test', new_tab=False, browser_session=browser_session)
import asyncio
await asyncio.sleep(1)
# Get browser state
browser_state_summary = await browser_session.get_browser_state_summary(
include_screenshot=False,
include_recent_events=False,
)
selector_map = browser_state_summary.dom_state.selector_map
print(f'\n📊 Stacked Test - Found {len(selector_map)} elements')
# Debug: Show all elements
print('\n🔍 All elements found:')
for idx, element in selector_map.items():
elem_id = element.attributes.get('id', 'NO_ID') if hasattr(element, 'attributes') else 'NO_ATTR'
print(f' [{idx}] {element.tag_name} id={elem_id} target={element.target_id[-4:] if element.target_id else "None"}')
# Categorize elements
open_shadow_elements = []
closed_shadow_elements = []
iframe_elements = []
final_button = None
for idx, element in selector_map.items():
if hasattr(element, 'attributes') and 'id' in element.attributes:
elem_id = element.attributes['id'].lower()
if 'open-shadow' in elem_id:
open_shadow_elements.append((idx, element))
elif 'closed-shadow' in elem_id:
closed_shadow_elements.append((idx, element))
elif 'iframe' in elem_id and element.tag_name != 'iframe':
iframe_elements.append((idx, element))
elif 'final-button' in elem_id:
final_button = (idx, element)
print('\n📋 Element Distribution:')
print(f' Open Shadow: {len(open_shadow_elements)} elements')
print(f' Closed Shadow: {len(closed_shadow_elements)} elements')
print(f' Iframe content: {len(iframe_elements)} elements')
print(f' Final button: {"Found" if final_button else "Not found"}')
# Test clicking through each stacked layer
print('\n🖱️ Testing Click Functionality Through Stacked Layers:')
async def click(index: int, element_description: str, browser_session: BrowserSession):
result = await tools.click(index=index, browser_session=browser_session)
if result.error:
raise AssertionError(f'Click on {element_description} [{index}] failed: {result.error}')
if result.extracted_content and (
'not available' in result.extracted_content.lower() or 'failed' in result.extracted_content.lower()
):
raise AssertionError(f'Click on {element_description} [{index}] failed: {result.extracted_content}')
print(f' ✓ {element_description} [{index}] clicked successfully')
return result
clicks_performed = 0
# 1. Click open shadow button
if open_shadow_elements:
open_shadow_btn = next((idx for idx, el in open_shadow_elements if 'btn' in el.attributes.get('id', '')), None)
if open_shadow_btn:
await click(open_shadow_btn, 'Open Shadow DOM button', browser_session)
clicks_performed += 1
# 2. Click closed shadow button
if closed_shadow_elements:
closed_shadow_btn = next((idx for idx, el in closed_shadow_elements if 'btn' in el.attributes.get('id', '')), None)
if closed_shadow_btn:
await click(closed_shadow_btn, 'Closed Shadow DOM button', browser_session)
clicks_performed += 1
# 3. Click iframe button
if iframe_elements:
iframe_btn = next((idx for idx, el in iframe_elements if 'btn' in el.attributes.get('id', '')), None)
if iframe_btn:
await click(iframe_btn, 'Same-origin iframe button', browser_session)
clicks_performed += 1
# 4. Try clicking cross-origin iframe tag (can click the tag, but not elements inside)
cross_origin_iframe_tag = None
for idx, element in selector_map.items():
if (
element.tag_name == 'iframe'
and hasattr(element, 'attributes')
and 'cross-origin' in element.attributes.get('id', '').lower()
):
cross_origin_iframe_tag = (idx, element)
break
# Verify cross-origin iframe extraction is working
# Check the full DOM tree (not just selector_map which only has interactive elements)
def count_targets_in_tree(node, targets=None):
if targets is None:
targets = set()
# SimplifiedNode has original_node which is an EnhancedDOMTreeNode
if hasattr(node, 'original_node') and node.original_node and node.original_node.target_id:
targets.add(node.original_node.target_id)
# Recursively check children
if hasattr(node, 'children') and node.children:
for child in node.children:
count_targets_in_tree(child, targets)
return targets
all_targets = count_targets_in_tree(browser_state_summary.dom_state._root)
print('\n📊 Cross-Origin Iframe Extraction:')
print(f' Found elements from {len(all_targets)} different CDP targets in full DOM tree')
if len(all_targets) >= 2:
print(' ✅ Multi-target iframe extraction IS WORKING!')
print(' ✓ Successfully extracted DOM from multiple CDP targets')
print(' ✓ CDP target switching feature is enabled and functional')
else:
print(' ⚠️ Only found elements from 1 target (cross-origin extraction may not be working)')
if cross_origin_iframe_tag:
print(f'\n 📌 Found cross-origin iframe tag [{cross_origin_iframe_tag[0]}]')
# Note: We don't increment clicks_performed since this doesn't trigger our counter
# await click(cross_origin_iframe_tag[0], 'Cross-origin iframe tag (scroll)', browser_session)
# 5. Click final button (after all stacked elements)
if final_button:
await click(final_button[0], 'Final button (after stack)', browser_session)
clicks_performed += 1
# Validate click counter
print('\n✅ Validating click counter...')
# Get CDP session from a non-iframe element (open shadow or final button)
if open_shadow_elements:
cdp_session = await browser_session.get_or_create_cdp_session(target_id=open_shadow_elements[0][1].target_id)
elif final_button:
cdp_session = await browser_session.get_or_create_cdp_session(target_id=final_button[1].target_id)
else:
cdp_session = await browser_session.get_or_create_cdp_session()
result = await cdp_session.cdp_client.send.Runtime.evaluate(
params={
'expression': 'window.getClickCount()',
'returnByValue': True,
},
session_id=cdp_session.session_id,
)
click_count = result.get('result', {}).get('value', 0)
print(f' Click counter value: {click_count}')
print(f' Expected clicks: {clicks_performed}')
assert click_count == clicks_performed, (
f'Expected {clicks_performed} clicks, but counter shows {click_count}. '
f'Some clicks did not execute JavaScript properly.'
)
print('\n🎉 Stacked scenario test completed successfully!')
print(' ✓ Open shadow DOM clicks work')
print(' ✓ Closed shadow DOM clicks work')
print(' ✓ Same-origin iframe clicks work (can access elements inside)')
print(' ✓ Cross-origin iframe extraction works (CDP target switching enabled)')
print(' ✓ Truly nested structure works: Open Shadow → Closed Shadow → Iframe')
if __name__ == '__main__':
"""Run test in debug mode with manual fixture setup."""
import asyncio
import logging
# Set up debug logging
logging.basicConfig(
level=logging.DEBUG,
format='%(levelname)-8s [%(name)s] %(message)s',
)
async def main():
# Set up HTTP server fixture
from pathlib import Path
from pytest_httpserver import HTTPServer
server = HTTPServer()
server.start()
# Load HTML templates from files (same as http_server fixture)
test_dir = Path(__file__).parent
main_page_html = (test_dir / 'test_page_stacked_template.html').read_text()
# Set up routes using templates
server.expect_request('/stacked-test').respond_with_data(main_page_html, content_type='text/html')
base_url = f'http://{server.host}:{server.port}'
print(f'\n🌐 HTTP Server running at {base_url}')
# Set up browser session
from browser_use.browser import BrowserSession
from browser_use.browser.profile import BrowserProfile
session = BrowserSession(
browser_profile=BrowserProfile(
headless=False, # Set to False to see browser in action
user_data_dir=None,
keep_alive=True,
)
)
try:
await session.start()
print('🚀 Browser session started\n')
# Run the test
test = TestDOMSerializer()
await test.test_stacked_complex_scenarios(session, base_url)
print('\n✅ Test completed successfully!')
finally:
# Cleanup
await session.kill()
server.stop()
print('\n🧹 Cleanup complete')
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/browser/test_dom_serializer.py",
"license": "MIT License",
"lines": 480,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:tests/ci/browser/test_navigation.py | """
Test navigation edge cases: broken pages, slow loading, non-existing pages.
Tests verify that:
1. Agent can handle navigation to broken/malformed HTML pages
2. Agent can handle slow-loading pages without hanging
3. Agent can handle non-existing pages (404, connection refused, etc.)
4. Agent can recover and continue making LLM calls after encountering these issues
All tests use:
- max_steps=3 to limit agent actions
- 120s timeout to fail if test takes too long
- Mock LLM to verify agent can still make decisions after navigation errors
Usage:
uv run pytest tests/ci/browser/test_navigation.py -v -s
"""
import asyncio
import time
import pytest
from pytest_httpserver import HTTPServer
from werkzeug import Response
from browser_use.agent.service import Agent
from browser_use.browser import BrowserSession
from browser_use.browser.profile import BrowserProfile
from tests.ci.conftest import create_mock_llm
@pytest.fixture(scope='session')
def http_server():
"""Create and provide a test HTTP server for navigation tests."""
server = HTTPServer()
server.start()
# Route 1: Broken/malformed HTML page
server.expect_request('/broken').respond_with_data(
'<html><head><title>Broken Page</title></head><body><h1>Incomplete HTML',
content_type='text/html',
)
# Route 2: Valid page for testing navigation after error recovery
server.expect_request('/valid').respond_with_data(
'<html><head><title>Valid Page</title></head><body><h1>Valid Page</h1><p>This page loaded successfully</p></body></html>',
content_type='text/html',
)
# Route 3: Slow loading page - delays 10 seconds before responding
def slow_handler(request):
time.sleep(10)
return Response(
'<html><head><title>Slow Page</title></head><body><h1>Slow Loading Page</h1><p>This page took 10 seconds to load</p></body></html>',
content_type='text/html',
)
server.expect_request('/slow').respond_with_handler(slow_handler)
# Route 4: 404 page
server.expect_request('/notfound').respond_with_data(
'<html><head><title>404 Not Found</title></head><body><h1>404 - Page Not Found</h1></body></html>',
status=404,
content_type='text/html',
)
yield server
server.stop()
@pytest.fixture(scope='session')
def base_url(http_server):
"""Return the base URL for the test HTTP server."""
return f'http://{http_server.host}:{http_server.port}'
@pytest.fixture(scope='function')
async def browser_session():
"""Create a browser session for navigation tests."""
session = BrowserSession(
browser_profile=BrowserProfile(
headless=True,
user_data_dir=None,
keep_alive=True,
)
)
await session.start()
yield session
await session.kill()
class TestNavigationEdgeCases:
"""Test navigation error handling and recovery."""
async def test_broken_page_navigation(self, browser_session, base_url):
"""Test that agent can handle broken/malformed HTML and still make LLM calls."""
# Create actions for the agent:
# 1. Navigate to broken page
# 2. Check if page exists
# 3. Done
actions = [
f"""
{{
"thinking": "I need to navigate to the broken page",
"evaluation_previous_goal": "Starting task",
"memory": "Navigating to broken page",
"next_goal": "Navigate to broken page",
"action": [
{{
"navigate": {{
"url": "{base_url}/broken"
}}
}}
]
}}
""",
"""
{
"thinking": "I should check if the page loaded",
"evaluation_previous_goal": "Navigated to page",
"memory": "Checking page state",
"next_goal": "Verify page exists",
"action": [
{
"done": {
"text": "Page exists despite broken HTML",
"success": true
}
}
]
}
""",
]
mock_llm = create_mock_llm(actions=actions)
agent = Agent(
task=f'Navigate to {base_url}/broken and check if page exists',
llm=mock_llm,
browser_session=browser_session,
)
# Run with timeout - should complete within 2 minutes
try:
history = await asyncio.wait_for(agent.run(max_steps=3), timeout=120)
assert len(history) > 0, 'Agent should have completed at least one step'
# If agent completes successfully, it means LLM was called and functioning
final_result = history.final_result()
assert final_result is not None, 'Agent should return a final result'
except TimeoutError:
pytest.fail('Test timed out after 2 minutes - agent hung on broken page')
async def test_slow_loading_page(self, browser_session, base_url):
"""Test that agent can handle slow-loading pages without hanging."""
actions = [
f"""
{{
"thinking": "I need to navigate to the slow page",
"evaluation_previous_goal": "Starting task",
"memory": "Navigating to slow page",
"next_goal": "Navigate to slow page",
"action": [
{{
"navigate": {{
"url": "{base_url}/slow"
}}
}}
]
}}
""",
"""
{
"thinking": "The page loaded, even though it was slow",
"evaluation_previous_goal": "Successfully navigated",
"memory": "Page loaded after delay",
"next_goal": "Complete task",
"action": [
{
"done": {
"text": "Slow page loaded successfully",
"success": true
}
}
]
}
""",
]
mock_llm = create_mock_llm(actions=actions)
agent = Agent(
task=f'Navigate to {base_url}/slow and wait for it to load',
llm=mock_llm,
browser_session=browser_session,
)
# Run with timeout - should complete within 2 minutes
start_time = time.time()
try:
history = await asyncio.wait_for(agent.run(max_steps=3), timeout=120)
elapsed = time.time() - start_time
assert len(history) > 0, 'Agent should have completed at least one step'
assert elapsed >= 10, f'Agent should have waited for slow page (10s delay), but only took {elapsed:.1f}s'
final_result = history.final_result()
assert final_result is not None, 'Agent should return a final result'
except TimeoutError:
pytest.fail('Test timed out after 2 minutes - agent hung on slow page')
async def test_nonexisting_page_404(self, browser_session, base_url):
"""Test that agent can handle 404 pages and still make LLM calls."""
actions = [
f"""
{{
"thinking": "I need to navigate to the non-existing page",
"evaluation_previous_goal": "Starting task",
"memory": "Navigating to 404 page",
"next_goal": "Navigate to non-existing page",
"action": [
{{
"navigate": {{
"url": "{base_url}/notfound"
}}
}}
]
}}
""",
"""
{
"thinking": "I got a 404 error but the browser still works",
"evaluation_previous_goal": "Navigated to 404 page",
"memory": "Page not found",
"next_goal": "Report that page does not exist",
"action": [
{
"done": {
"text": "Page does not exist (404 error)",
"success": false
}
}
]
}
""",
]
mock_llm = create_mock_llm(actions=actions)
agent = Agent(
task=f'Navigate to {base_url}/notfound and check if page exists',
llm=mock_llm,
browser_session=browser_session,
)
# Run with timeout - should complete within 2 minutes
try:
history = await asyncio.wait_for(agent.run(max_steps=3), timeout=120)
assert len(history) > 0, 'Agent should have completed at least one step'
final_result = history.final_result()
assert final_result is not None, 'Agent should return a final result'
except TimeoutError:
pytest.fail('Test timed out after 2 minutes - agent hung on 404 page')
async def test_nonexisting_domain(self, browser_session):
"""Test that agent can handle completely non-existing domains (connection refused)."""
# Use a localhost port that's not listening
nonexisting_url = 'http://localhost:59999/page'
actions = [
f"""
{{
"thinking": "I need to navigate to a non-existing domain",
"evaluation_previous_goal": "Starting task",
"memory": "Attempting to navigate",
"next_goal": "Navigate to non-existing domain",
"action": [
{{
"navigate": {{
"url": "{nonexisting_url}"
}}
}}
]
}}
""",
"""
{
"thinking": "The connection failed but I can still proceed",
"evaluation_previous_goal": "Connection failed",
"memory": "Domain does not exist",
"next_goal": "Report failure",
"action": [
{
"done": {
"text": "Domain does not exist (connection refused)",
"success": false
}
}
]
}
""",
]
mock_llm = create_mock_llm(actions=actions)
agent = Agent(
task=f'Navigate to {nonexisting_url} and check if it exists',
llm=mock_llm,
browser_session=browser_session,
)
# Run with timeout - should complete within 2 minutes
try:
history = await asyncio.wait_for(agent.run(max_steps=3), timeout=120)
assert len(history) > 0, 'Agent should have completed at least one step'
final_result = history.final_result()
assert final_result is not None, 'Agent should return a final result'
except TimeoutError:
pytest.fail('Test timed out after 2 minutes - agent hung on non-existing domain')
async def test_recovery_after_navigation_error(self, browser_session, base_url):
"""Test that agent can recover and navigate to valid page after encountering error."""
actions = [
f"""
{{
"thinking": "First, I'll try the broken page",
"evaluation_previous_goal": "Starting task",
"memory": "Navigating to broken page",
"next_goal": "Navigate to broken page first",
"action": [
{{
"navigate": {{
"url": "{base_url}/broken"
}}
}}
]
}}
""",
f"""
{{
"thinking": "That page was broken, let me try a valid page now",
"evaluation_previous_goal": "Broken page loaded",
"memory": "Now navigating to valid page",
"next_goal": "Navigate to valid page",
"action": [
{{
"navigate": {{
"url": "{base_url}/valid"
}}
}}
]
}}
""",
"""
{
"thinking": "The valid page loaded successfully after the broken one",
"evaluation_previous_goal": "Valid page loaded",
"memory": "Successfully recovered from error",
"next_goal": "Complete task",
"action": [
{
"done": {
"text": "Successfully navigated to valid page after broken page",
"success": true
}
}
]
}
""",
]
mock_llm = create_mock_llm(actions=actions)
agent = Agent(
task=f'First navigate to {base_url}/broken, then navigate to {base_url}/valid',
llm=mock_llm,
browser_session=browser_session,
)
# Run with timeout - should complete within 2 minutes
try:
history = await asyncio.wait_for(agent.run(max_steps=3), timeout=120)
assert len(history) >= 2, 'Agent should have completed at least 2 steps (broken -> valid)'
# Verify final page is the valid one
final_url = await browser_session.get_current_page_url()
assert final_url.endswith('/valid'), f'Final URL should be /valid, got {final_url}'
# Verify agent completed successfully
final_result = history.final_result()
assert final_result is not None, 'Agent should return a final result'
except TimeoutError:
pytest.fail('Test timed out after 2 minutes - agent could not recover from broken page')
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/browser/test_navigation.py",
"license": "MIT License",
"lines": 347,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:tests/ci/browser/test_screenshot.py | import pytest
from pytest_httpserver import HTTPServer
from browser_use.agent.service import Agent
from browser_use.browser.events import NavigateToUrlEvent
from browser_use.browser.profile import BrowserProfile
from browser_use.browser.session import BrowserSession
from tests.ci.conftest import create_mock_llm
@pytest.fixture(scope='session')
def http_server():
"""Create and provide a test HTTP server for screenshot tests."""
server = HTTPServer()
server.start()
# Route: Page with visible content for screenshot testing
server.expect_request('/screenshot-page').respond_with_data(
"""
<!DOCTYPE html>
<html>
<head>
<title>Screenshot Test Page</title>
<style>
body { font-family: Arial; padding: 20px; background: #f0f0f0; }
h1 { color: #333; font-size: 32px; }
.content { background: white; padding: 20px; border-radius: 8px; margin: 10px 0; }
</style>
</head>
<body>
<h1>Screenshot Test Page</h1>
<div class="content">
<p>This page is used to test screenshot capture with vision enabled.</p>
<p>The agent should capture a screenshot when navigating to this page.</p>
</div>
</body>
</html>
""",
content_type='text/html',
)
yield server
server.stop()
@pytest.fixture(scope='session')
def base_url(http_server):
"""Return the base URL for the test HTTP server."""
return f'http://{http_server.host}:{http_server.port}'
@pytest.fixture(scope='function')
async def browser_session():
session = BrowserSession(browser_profile=BrowserProfile(headless=True))
await session.start()
yield session
await session.kill()
@pytest.mark.asyncio
async def test_basic_screenshots(browser_session: BrowserSession, httpserver):
"""Navigate to a local page and ensure screenshot helpers return bytes."""
html = """
<html><body><h1 id='title'>Hello</h1><p>Screenshot demo.</p></body></html>
"""
httpserver.expect_request('/demo').respond_with_data(html, content_type='text/html')
url = httpserver.url_for('/demo')
nav = browser_session.event_bus.dispatch(NavigateToUrlEvent(url=url, new_tab=False))
await nav
data = await browser_session.take_screenshot(full_page=False)
assert data, 'Viewport screenshot returned no data'
element = await browser_session.screenshot_element('h1')
assert element, 'Element screenshot returned no data'
async def test_agent_screenshot_with_vision_enabled(browser_session, base_url):
"""Test that agent captures screenshots when vision is enabled.
This integration test verifies that:
1. Agent with vision=True navigates to a page
2. After prepare_context/update message manager, screenshot is captured
3. Screenshot is included in the agent's history state
"""
# Create mock LLM actions
actions = [
f"""
{{
"thinking": "I'll navigate to the screenshot test page",
"evaluation_previous_goal": "Starting task",
"memory": "Navigating to page",
"next_goal": "Navigate to test page",
"action": [
{{
"navigate": {{
"url": "{base_url}/screenshot-page",
"new_tab": false
}}
}}
]
}}
""",
"""
{
"thinking": "Page loaded, completing task",
"evaluation_previous_goal": "Page loaded",
"memory": "Task completed",
"next_goal": "Complete task",
"action": [
{
"done": {
"text": "Successfully navigated and captured screenshot",
"success": true
}
}
]
}
""",
]
mock_llm = create_mock_llm(actions=actions)
# Create agent with vision enabled
agent = Agent(
task=f'Navigate to {base_url}/screenshot-page',
llm=mock_llm,
browser_session=browser_session,
use_vision=True, # Enable vision/screenshots
)
# Run agent
history = await agent.run(max_steps=2)
# Verify agent completed successfully
assert len(history) >= 1, 'Agent should have completed at least 1 step'
final_result = history.final_result()
assert final_result is not None, 'Agent should return a final result'
# Verify screenshots were captured in the history
screenshot_found = False
for i, step in enumerate(history.history):
# Check if browser state has screenshot path
if step.state and hasattr(step.state, 'screenshot_path') and step.state.screenshot_path:
screenshot_found = True
print(f'\n✅ Step {i + 1}: Screenshot captured at {step.state.screenshot_path}')
# Verify screenshot file exists (it should be saved to disk)
import os
assert os.path.exists(step.state.screenshot_path), f'Screenshot file should exist at {step.state.screenshot_path}'
# Verify screenshot file has content
screenshot_size = os.path.getsize(step.state.screenshot_path)
assert screenshot_size > 0, f'Screenshot file should have content, got {screenshot_size} bytes'
print(f' Screenshot size: {screenshot_size} bytes')
assert screenshot_found, 'At least one screenshot should be captured when vision is enabled'
print('\n🎉 Integration test passed: Screenshots are captured correctly with vision enabled')
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/browser/test_screenshot.py",
"license": "MIT License",
"lines": 134,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:tests/ci/browser/test_tabs.py | """
Test multi-tab operations: creation, switching, closing, and background tabs.
Tests verify that:
1. Agent can create multiple tabs (3) and switch between them
2. Agent can close tabs with vision=True
3. Agent can handle buttons that open new tabs in background
4. Agent can continue and call done() after each tab operation
5. Browser state doesn't timeout during background tab operations
All tests use:
- max_steps=5 to allow multiple tab operations
- 120s timeout to fail if test takes too long
- Mock LLM to verify agent can still make decisions after tab operations
Usage:
uv run pytest tests/ci/browser/test_tabs.py -v -s
"""
import asyncio
import time
import pytest
from pytest_httpserver import HTTPServer
from browser_use.agent.service import Agent
from browser_use.browser import BrowserSession
from browser_use.browser.profile import BrowserProfile
from tests.ci.conftest import create_mock_llm
@pytest.fixture(scope='session')
def http_server():
"""Create and provide a test HTTP server for tab tests."""
server = HTTPServer()
server.start()
# Route 1: Home page
server.expect_request('/home').respond_with_data(
'<html><head><title>Home Page</title></head><body><h1>Home Page</h1><p>This is the home page</p></body></html>',
content_type='text/html',
)
# Route 2: Page 1
server.expect_request('/page1').respond_with_data(
'<html><head><title>Page 1</title></head><body><h1>Page 1</h1><p>First test page</p></body></html>',
content_type='text/html',
)
# Route 3: Page 2
server.expect_request('/page2').respond_with_data(
'<html><head><title>Page 2</title></head><body><h1>Page 2</h1><p>Second test page</p></body></html>',
content_type='text/html',
)
# Route 4: Page 3
server.expect_request('/page3').respond_with_data(
'<html><head><title>Page 3</title></head><body><h1>Page 3</h1><p>Third test page</p></body></html>',
content_type='text/html',
)
# Route 5: Background tab page - has a link that opens a new tab in the background
server.expect_request('/background-tab-test').respond_with_data(
"""
<!DOCTYPE html>
<html>
<head><title>Background Tab Test</title></head>
<body style="padding: 20px; font-family: Arial;">
<h1>Background Tab Test</h1>
<p>Click the link below to open a new tab in the background:</p>
<a href="/page3" target="_blank" id="open-tab-link">Open New Tab (link)</a>
<br><br>
<button id="open-tab-btn" onclick="window.open('/page3', '_blank'); document.getElementById('status').textContent='Tab opened!'">
Open New Tab (button)
</button>
<p id="status" style="margin-top: 20px; color: green;"></p>
</body>
</html>
""",
content_type='text/html',
)
yield server
server.stop()
@pytest.fixture(scope='session')
def base_url(http_server):
"""Return the base URL for the test HTTP server."""
return f'http://{http_server.host}:{http_server.port}'
@pytest.fixture(scope='function')
async def browser_session():
"""Create a browser session for tab tests."""
session = BrowserSession(
browser_profile=BrowserProfile(
headless=True,
user_data_dir=None,
keep_alive=True,
)
)
await session.start()
yield session
await session.kill()
class TestMultiTabOperations:
"""Test multi-tab creation, switching, and closing."""
async def test_create_and_switch_three_tabs(self, browser_session, base_url):
"""Test that agent can create 3 tabs, switch between them, and call done().
This test verifies that browser state is retrieved between each step.
"""
start_time = time.time()
actions = [
# Action 1: Navigate to home page
f"""
{{
"thinking": "I'll start by navigating to the home page",
"evaluation_previous_goal": "Starting task",
"memory": "Navigating to home page",
"next_goal": "Navigate to home page",
"action": [
{{
"navigate": {{
"url": "{base_url}/home",
"new_tab": false
}}
}}
]
}}
""",
# Action 2: Open page1 in new tab
f"""
{{
"thinking": "Now I'll open page 1 in a new tab",
"evaluation_previous_goal": "Home page loaded",
"memory": "Opening page 1 in new tab",
"next_goal": "Open page 1 in new tab",
"action": [
{{
"navigate": {{
"url": "{base_url}/page1",
"new_tab": true
}}
}}
]
}}
""",
# Action 3: Open page2 in new tab
f"""
{{
"thinking": "Now I'll open page 2 in a new tab",
"evaluation_previous_goal": "Page 1 opened in new tab",
"memory": "Opening page 2 in new tab",
"next_goal": "Open page 2 in new tab",
"action": [
{{
"navigate": {{
"url": "{base_url}/page2",
"new_tab": true
}}
}}
]
}}
""",
# Action 4: Switch to first tab
"""
{
"thinking": "Now I'll switch back to the first tab",
"evaluation_previous_goal": "Page 2 opened in new tab",
"memory": "Switching to first tab",
"next_goal": "Switch to first tab",
"action": [
{
"switch": {
"tab_id": "0000"
}
}
]
}
""",
# Action 5: Done
"""
{
"thinking": "I've successfully created 3 tabs and switched between them",
"evaluation_previous_goal": "Switched to first tab",
"memory": "All tabs created and switched",
"next_goal": "Complete task",
"action": [
{
"done": {
"text": "Successfully created 3 tabs and switched between them",
"success": true
}
}
]
}
""",
]
mock_llm = create_mock_llm(actions=actions)
agent = Agent(
task=f'Navigate to {base_url}/home, then open {base_url}/page1 and {base_url}/page2 in new tabs, then switch back to the first tab',
llm=mock_llm,
browser_session=browser_session,
)
# Run with timeout - should complete within 2 minutes
try:
history = await asyncio.wait_for(agent.run(max_steps=5), timeout=120)
elapsed = time.time() - start_time
print(f'\n⏱️ Test completed in {elapsed:.2f} seconds')
print(f'📊 Completed {len(history)} steps')
# Verify each step has browser state
for i, step in enumerate(history.history):
assert step.state is not None, f'Step {i} should have browser state'
assert step.state.url is not None, f'Step {i} should have URL in browser state'
print(f' Step {i + 1}: URL={step.state.url}, tabs={len(step.state.tabs) if step.state.tabs else 0}')
assert len(history) >= 4, 'Agent should have completed at least 4 steps'
# Verify we have 3 tabs open
tabs = await browser_session.get_tabs()
assert len(tabs) >= 3, f'Should have at least 3 tabs open, got {len(tabs)}'
# Verify agent completed successfully
final_result = history.final_result()
assert final_result is not None, 'Agent should return a final result'
assert 'Successfully' in final_result, 'Agent should report success'
# Note: Test is fast (< 1s) because mock LLM returns instantly and pages are simple,
# but browser state IS being retrieved correctly between steps as verified above
except TimeoutError:
pytest.fail('Test timed out after 2 minutes - agent hung during tab operations')
async def test_close_tab_with_vision(self, browser_session, base_url):
"""Test that agent can close a tab with vision=True and call done()."""
actions = [
# Action 1: Navigate to home page
f"""
{{
"thinking": "I'll start by navigating to the home page",
"evaluation_previous_goal": "Starting task",
"memory": "Navigating to home page",
"next_goal": "Navigate to home page",
"action": [
{{
"navigate": {{
"url": "{base_url}/home",
"new_tab": false
}}
}}
]
}}
""",
# Action 2: Open page1 in new tab
f"""
{{
"thinking": "Now I'll open page 1 in a new tab",
"evaluation_previous_goal": "Home page loaded",
"memory": "Opening page 1 in new tab",
"next_goal": "Open page 1 in new tab",
"action": [
{{
"navigate": {{
"url": "{base_url}/page1",
"new_tab": true
}}
}}
]
}}
""",
# Action 3: Close the current tab
"""
{
"thinking": "Now I'll close the current tab (page1)",
"evaluation_previous_goal": "Page 1 opened in new tab",
"memory": "Closing current tab",
"next_goal": "Close current tab",
"action": [
{
"close": {
"tab_id": "0001"
}
}
]
}
""",
# Action 4: Done
"""
{
"thinking": "I've successfully closed the tab",
"evaluation_previous_goal": "Tab closed",
"memory": "Tab closed successfully",
"next_goal": "Complete task",
"action": [
{
"done": {
"text": "Successfully closed the tab",
"success": true
}
}
]
}
""",
]
mock_llm = create_mock_llm(actions=actions)
agent = Agent(
task=f'Navigate to {base_url}/home, then open {base_url}/page1 in a new tab, then close the page1 tab',
llm=mock_llm,
browser_session=browser_session,
use_vision=True, # Enable vision for this test
)
# Run with timeout - should complete within 2 minutes
try:
history = await asyncio.wait_for(agent.run(max_steps=5), timeout=120)
assert len(history) >= 3, 'Agent should have completed at least 3 steps'
# Verify agent completed successfully
final_result = history.final_result()
assert final_result is not None, 'Agent should return a final result'
assert 'Successfully' in final_result, 'Agent should report success'
except TimeoutError:
pytest.fail('Test timed out after 2 minutes - agent hung during tab closing with vision')
async def test_background_tab_open_no_timeout(self, browser_session, base_url):
"""Test that browser state doesn't timeout when a new tab opens in the background."""
start_time = time.time()
actions = [
# Action 1: Navigate to home page
f"""
{{
"thinking": "I'll navigate to the home page first",
"evaluation_previous_goal": "Starting task",
"memory": "Navigating to home page",
"next_goal": "Navigate to home page",
"action": [
{{
"navigate": {{
"url": "{base_url}/home",
"new_tab": false
}}
}}
]
}}
""",
# Action 2: Open page1 in new background tab (stay on home page)
f"""
{{
"thinking": "I'll open page1 in a new background tab",
"evaluation_previous_goal": "Home page loaded",
"memory": "Opening background tab",
"next_goal": "Open background tab without switching to it",
"action": [
{{
"navigate": {{
"url": "{base_url}/page1",
"new_tab": true
}}
}}
]
}}
""",
# Action 3: Immediately check browser state after background tab opens
"""
{
"thinking": "After opening background tab, browser state should still be accessible",
"evaluation_previous_goal": "Background tab opened",
"memory": "Verifying browser state works",
"next_goal": "Complete task",
"action": [
{
"done": {
"text": "Successfully opened background tab, browser state remains accessible",
"success": true
}
}
]
}
""",
]
mock_llm = create_mock_llm(actions=actions)
agent = Agent(
task=f'Navigate to {base_url}/home and open {base_url}/page1 in a new tab',
llm=mock_llm,
browser_session=browser_session,
)
# Run with timeout - this tests if browser state times out when new tabs open
try:
history = await asyncio.wait_for(agent.run(max_steps=3), timeout=120)
elapsed = time.time() - start_time
print(f'\n⏱️ Test completed in {elapsed:.2f} seconds')
print(f'📊 Completed {len(history)} steps')
# Verify each step has browser state (the key test - no timeouts)
for i, step in enumerate(history.history):
assert step.state is not None, f'Step {i} should have browser state'
assert step.state.url is not None, f'Step {i} should have URL in browser state'
print(f' Step {i + 1}: URL={step.state.url}, tabs={len(step.state.tabs) if step.state.tabs else 0}')
assert len(history) >= 2, 'Agent should have completed at least 2 steps'
# Verify agent completed successfully
final_result = history.final_result()
assert final_result is not None, 'Agent should return a final result'
assert 'Successfully' in final_result, 'Agent should report success'
# Verify we have at least 2 tabs
tabs = await browser_session.get_tabs()
print(f' Final tab count: {len(tabs)}')
assert len(tabs) >= 2, f'Should have at least 2 tabs after opening background tab, got {len(tabs)}'
except TimeoutError:
pytest.fail('Test timed out after 2 minutes - browser state timed out after opening background tab')
async def test_rapid_tab_operations_no_timeout(self, browser_session, base_url):
"""Test that browser state doesn't timeout during rapid tab operations."""
actions = [
# Action 1: Navigate to home page
f"""
{{
"thinking": "I'll navigate to the home page",
"evaluation_previous_goal": "Starting task",
"memory": "Navigating to home page",
"next_goal": "Navigate to home page",
"action": [
{{
"navigate": {{
"url": "{base_url}/home",
"new_tab": false
}}
}}
]
}}
""",
# Action 2: Open page1 in new tab
f"""
{{
"thinking": "Opening page1 in new tab",
"evaluation_previous_goal": "Home page loaded",
"memory": "Opening page1",
"next_goal": "Open page1",
"action": [
{{
"navigate": {{
"url": "{base_url}/page1",
"new_tab": true
}}
}}
]
}}
""",
# Action 3: Open page2 in new tab
f"""
{{
"thinking": "Opening page2 in new tab",
"evaluation_previous_goal": "Page1 opened",
"memory": "Opening page2",
"next_goal": "Open page2",
"action": [
{{
"navigate": {{
"url": "{base_url}/page2",
"new_tab": true
}}
}}
]
}}
""",
# Action 4: Open page3 in new tab
f"""
{{
"thinking": "Opening page3 in new tab",
"evaluation_previous_goal": "Page2 opened",
"memory": "Opening page3",
"next_goal": "Open page3",
"action": [
{{
"navigate": {{
"url": "{base_url}/page3",
"new_tab": true
}}
}}
]
}}
""",
# Action 5: Verify browser state is still accessible
"""
{
"thinking": "All tabs opened rapidly, browser state should still be accessible",
"evaluation_previous_goal": "Page3 opened",
"memory": "All tabs opened",
"next_goal": "Complete task",
"action": [
{
"done": {
"text": "Successfully opened 4 tabs rapidly without timeout",
"success": true
}
}
]
}
""",
]
mock_llm = create_mock_llm(actions=actions)
agent = Agent(
task='Open multiple tabs rapidly and verify browser state remains accessible',
llm=mock_llm,
browser_session=browser_session,
)
# Run with timeout - should complete within 2 minutes
try:
history = await asyncio.wait_for(agent.run(max_steps=5), timeout=120)
assert len(history) >= 4, 'Agent should have completed at least 4 steps'
# Verify we have 4 tabs open
tabs = await browser_session.get_tabs()
assert len(tabs) >= 4, f'Should have at least 4 tabs open, got {len(tabs)}'
# Verify agent completed successfully
final_result = history.final_result()
assert final_result is not None, 'Agent should return a final result'
assert 'Successfully' in final_result, 'Agent should report success'
except TimeoutError:
pytest.fail('Test timed out after 2 minutes - browser state timed out during rapid tab operations')
async def test_multiple_tab_switches_and_close(self, browser_session, base_url):
"""Test that agent can switch between multiple tabs and close one."""
actions = [
# Action 1: Navigate to home page
f"""
{{
"thinking": "I'll start by navigating to the home page",
"evaluation_previous_goal": "Starting task",
"memory": "Navigating to home page",
"next_goal": "Navigate to home page",
"action": [
{{
"navigate": {{
"url": "{base_url}/home",
"new_tab": false
}}
}}
]
}}
""",
# Action 2: Open page1 in new tab
f"""
{{
"thinking": "Opening page 1 in new tab",
"evaluation_previous_goal": "Home page loaded",
"memory": "Opening page 1",
"next_goal": "Open page 1",
"action": [
{{
"navigate": {{
"url": "{base_url}/page1",
"new_tab": true
}}
}}
]
}}
""",
# Action 3: Open page2 in new tab
f"""
{{
"thinking": "Opening page 2 in new tab",
"evaluation_previous_goal": "Page 1 opened",
"memory": "Opening page 2",
"next_goal": "Open page 2",
"action": [
{{
"navigate": {{
"url": "{base_url}/page2",
"new_tab": true
}}
}}
]
}}
""",
# Action 4: Switch to tab 1
"""
{
"thinking": "Switching to tab 1 (page1)",
"evaluation_previous_goal": "Page 2 opened",
"memory": "Switching to page 1",
"next_goal": "Switch to page 1",
"action": [
{
"switch": {
"tab_id": "0001"
}
}
]
}
""",
# Action 5: Close current tab
"""
{
"thinking": "Closing the current tab (page1)",
"evaluation_previous_goal": "Switched to page 1",
"memory": "Closing page 1",
"next_goal": "Close page 1",
"action": [
{
"close": {
"tab_id": "0001"
}
}
]
}
""",
# Action 6: Done
"""
{
"thinking": "Successfully completed all tab operations",
"evaluation_previous_goal": "Tab closed",
"memory": "All operations completed",
"next_goal": "Complete task",
"action": [
{
"done": {
"text": "Successfully created, switched, and closed tabs",
"success": true
}
}
]
}
""",
]
mock_llm = create_mock_llm(actions=actions)
agent = Agent(
task='Create 3 tabs, switch to the second one, then close it',
llm=mock_llm,
browser_session=browser_session,
)
# Run with timeout - should complete within 2 minutes
try:
history = await asyncio.wait_for(agent.run(max_steps=6), timeout=120)
assert len(history) >= 5, 'Agent should have completed at least 5 steps'
# Verify agent completed successfully
final_result = history.final_result()
assert final_result is not None, 'Agent should return a final result'
assert 'Successfully' in final_result, 'Agent should report success'
except TimeoutError:
pytest.fail('Test timed out after 2 minutes - agent hung during multiple tab operations')
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/browser/test_tabs.py",
"license": "MIT License",
"lines": 609,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:tests/ci/browser/test_true_cross_origin_click.py | """Test clicking elements inside TRUE cross-origin iframes (external domains)."""
import asyncio
import pytest
from browser_use.browser.profile import BrowserProfile, ViewportSize
from browser_use.browser.session import BrowserSession
from browser_use.tools.service import Tools
@pytest.fixture
async def browser_session():
"""Create browser session with cross-origin iframe support."""
session = BrowserSession(
browser_profile=BrowserProfile(
headless=True,
user_data_dir=None,
keep_alive=True,
window_size=ViewportSize(width=1920, height=1400),
cross_origin_iframes=True, # Enable cross-origin iframe extraction
)
)
await session.start()
yield session
await session.kill()
class TestTrueCrossOriginIframeClick:
"""Test clicking elements inside true cross-origin iframes."""
async def test_click_element_in_true_cross_origin_iframe(self, httpserver, browser_session: BrowserSession):
"""Verify that elements inside TRUE cross-origin iframes (example.com) can be clicked.
This test uses example.com which is a real external domain, testing actual cross-origin
iframe extraction and clicking via CDP target switching.
"""
# Create main page with TRUE cross-origin iframe pointing to example.com
main_html = """
<!DOCTYPE html>
<html>
<head><title>True Cross-Origin Test</title></head>
<body>
<h1>Main Page</h1>
<button id="main-button">Main Button</button>
<iframe id="cross-origin" src="https://example.com" style="width: 800px; height: 600px;"></iframe>
</body>
</html>
"""
# Serve the main page
httpserver.expect_request('/true-cross-origin-test').respond_with_data(main_html, content_type='text/html')
url = httpserver.url_for('/true-cross-origin-test')
# Navigate to the page
await browser_session.navigate_to(url)
# Wait for cross-origin iframe to load (network request)
await asyncio.sleep(5)
# Get DOM state with cross-origin iframe extraction enabled
browser_state = await browser_session.get_browser_state_summary(
include_screenshot=False,
include_recent_events=False,
)
assert browser_state.dom_state is not None
state = browser_state.dom_state
print(f'\n📊 Found {len(state.selector_map)} total elements')
# Find elements from different targets
targets_found = set()
main_page_elements = []
cross_origin_elements = []
for idx, element in state.selector_map.items():
target_id = element.target_id
targets_found.add(target_id)
# Check if element is from cross-origin iframe (example.com)
# Look for links - example.com has a link to iana.org/domains/reserved
if element.attributes:
href = element.attributes.get('href', '')
element_id = element.attributes.get('id', '')
# example.com has a link to iana.org/domains/reserved
if 'iana.org' in href:
cross_origin_elements.append((idx, element))
print(f' ✅ Found cross-origin element: [{idx}] {element.tag_name} href={href}')
elif element_id == 'main-button':
main_page_elements.append((idx, element))
# Verify we found elements from at least 2 different targets
print(f'\n🎯 Found elements from {len(targets_found)} different CDP targets')
# Check if cross-origin iframe loaded
if len(targets_found) < 2:
print('⚠️ Warning: Cross-origin iframe did not create separate CDP target')
print(' This may indicate cross_origin_iframes feature is not working as expected')
pytest.skip('Cross-origin iframe did not create separate CDP target - skipping test')
if len(cross_origin_elements) == 0:
print('⚠️ Warning: No elements found from example.com iframe')
print(' Network may be restricted in CI environment')
pytest.skip('No elements extracted from example.com - skipping click test')
# Verify we found at least one element from the cross-origin iframe
assert len(cross_origin_elements) > 0, 'Expected to find at least one element from cross-origin iframe (example.com)'
# Try clicking the cross-origin element
print('\n🖱️ Testing Click on True Cross-Origin Iframe Element:')
tools = Tools()
link_idx, link_element = cross_origin_elements[0]
print(f' Attempting to click element [{link_idx}] from example.com iframe...')
try:
result = await tools.click(index=link_idx, browser_session=browser_session)
# Check for errors
if result.error:
pytest.fail(f'Click on cross-origin element [{link_idx}] failed with error: {result.error}')
if result.extracted_content and (
'not available' in result.extracted_content.lower() or 'failed' in result.extracted_content.lower()
):
pytest.fail(f'Click on cross-origin element [{link_idx}] failed: {result.extracted_content}')
print(f' ✅ Click succeeded on cross-origin element [{link_idx}]!')
print(' 🎉 True cross-origin iframe element clicking works!')
except Exception as e:
pytest.fail(f'Exception while clicking cross-origin element [{link_idx}]: {e}')
print('\n✅ Test passed: True cross-origin iframe elements can be clicked')
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/browser/test_true_cross_origin_click.py",
"license": "MIT License",
"lines": 105,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:tests/ci/infrastructure/test_registry_validation.py | """
Comprehensive tests for the action registry system - Validation and patterns.
Tests cover:
1. Type 1 and Type 2 patterns
2. Validation rules
3. Decorated function behavior
4. Parameter model generation
5. Parameter ordering
"""
import asyncio
import logging
import pytest
from pydantic import Field
from browser_use.agent.views import ActionResult
from browser_use.browser import BrowserSession
from browser_use.tools.registry.service import Registry
from browser_use.tools.registry.views import ActionModel as BaseActionModel
from tests.ci.conftest import create_mock_llm
# Configure logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
class TestType1Pattern:
"""Test Type 1 Pattern: Pydantic model first (from normalization tests)"""
def test_type1_with_param_model(self):
"""Type 1: action(params: Model, special_args...) should work"""
registry = Registry()
class ClickAction(BaseActionModel):
index: int
delay: float = 0.0
@registry.action('Click element', param_model=ClickAction)
async def click_element(params: ClickAction, browser_session: BrowserSession):
return ActionResult(extracted_content=f'Clicked {params.index}')
# Verify registration
assert 'click_element' in registry.registry.actions
action = registry.registry.actions['click_element']
assert action.param_model == ClickAction
# Verify decorated function signature (should be kwargs-only)
import inspect
sig = inspect.signature(click_element)
params = list(sig.parameters.values())
# Should have no positional-only or positional-or-keyword params
for param in params:
assert param.kind in (inspect.Parameter.KEYWORD_ONLY, inspect.Parameter.VAR_KEYWORD)
def test_type1_with_multiple_special_params(self):
"""Type 1 with multiple special params should work"""
registry = Registry()
class ExtractAction(BaseActionModel):
goal: str
include_links: bool = False
from browser_use.llm.base import BaseChatModel
@registry.action('Extract content', param_model=ExtractAction)
async def extract_content(params: ExtractAction, browser_session: BrowserSession, page_extraction_llm: BaseChatModel):
return ActionResult(extracted_content=params.goal)
assert 'extract_content' in registry.registry.actions
class TestType2Pattern:
"""Test Type 2 Pattern: loose parameters (from normalization tests)"""
def test_type2_simple_action(self):
"""Type 2: action(arg1, arg2, special_args...) should work"""
registry = Registry()
@registry.action('Fill field')
async def fill_field(index: int, text: str, browser_session: BrowserSession):
return ActionResult(extracted_content=f'Filled {index} with {text}')
# Verify registration
assert 'fill_field' in registry.registry.actions
action = registry.registry.actions['fill_field']
# Should auto-generate param model
assert action.param_model is not None
assert 'index' in action.param_model.model_fields
assert 'text' in action.param_model.model_fields
def test_type2_with_defaults(self):
"""Type 2 with default values should preserve defaults"""
registry = Registry()
@registry.action('Scroll page')
async def scroll_page(direction: str = 'down', amount: int = 100, browser_session: BrowserSession = None): # type: ignore
return ActionResult(extracted_content=f'Scrolled {direction} by {amount}')
action = registry.registry.actions['scroll_page']
# Check that defaults are preserved in generated model
schema = action.param_model.model_json_schema()
assert schema['properties']['direction']['default'] == 'down'
assert schema['properties']['amount']['default'] == 100
def test_type2_no_action_params(self):
"""Type 2 with only special params should work"""
registry = Registry()
@registry.action('Save PDF')
async def save_pdf(browser_session: BrowserSession):
return ActionResult(extracted_content='Saved PDF')
action = registry.registry.actions['save_pdf']
# Should have empty or minimal param model
fields = action.param_model.model_fields
assert len(fields) == 0 or all(f in ['title'] for f in fields)
def test_no_special_params_action(self):
"""Test action with no special params (like wait action in Tools)"""
registry = Registry()
@registry.action('Wait for x seconds default 3')
async def wait(seconds: int = 3):
await asyncio.sleep(seconds)
return ActionResult(extracted_content=f'Waited {seconds} seconds')
# Should register successfully
assert 'wait' in registry.registry.actions
action = registry.registry.actions['wait']
# Should have seconds in param model
assert 'seconds' in action.param_model.model_fields
# Should preserve default value
schema = action.param_model.model_json_schema()
assert schema['properties']['seconds']['default'] == 3
class TestValidationRules:
"""Test validation rules for action registration (from normalization tests)"""
def test_error_on_kwargs_in_original_function(self):
"""Should error if original function has kwargs"""
registry = Registry()
with pytest.raises(ValueError, match='kwargs.*not allowed'):
@registry.action('Bad action')
async def bad_action(index: int, browser_session: BrowserSession, **kwargs):
pass
def test_error_on_special_param_name_with_wrong_type(self):
"""Should error if special param name used with wrong type"""
registry = Registry()
# Using 'browser_session' with wrong type should error
with pytest.raises(ValueError, match='conflicts with special argument.*browser_session: BrowserSession'):
@registry.action('Bad session')
async def bad_session(browser_session: str):
pass
def test_special_params_must_match_type(self):
"""Special params with correct types should work"""
registry = Registry()
@registry.action('Good action')
async def good_action(
index: int,
browser_session: BrowserSession, # Correct type
):
return ActionResult()
assert 'good_action' in registry.registry.actions
class TestDecoratedFunctionBehavior:
"""Test behavior of decorated action functions (from normalization tests)"""
async def test_decorated_function_only_accepts_kwargs(self):
"""Decorated functions should only accept kwargs, no positional args"""
registry = Registry()
class MockBrowserSession:
async def get_current_page(self):
return None
@registry.action('Click')
async def click(index: int, browser_session: BrowserSession):
return ActionResult()
# Should raise error when called with positional args
with pytest.raises(TypeError, match='positional arguments'):
await click(5, MockBrowserSession())
async def test_decorated_function_accepts_params_model(self):
"""Decorated function should accept params as model"""
registry = Registry()
class MockBrowserSession:
async def get_current_page(self):
return None
@registry.action('Input text')
async def input_text(index: int, text: str, browser_session: BrowserSession):
return ActionResult(extracted_content=f'{index}:{text}')
# Get the generated param model class
action = registry.registry.actions['input_text']
ParamsModel = action.param_model
# Should work with params model
result = await input_text(params=ParamsModel(index=5, text='hello'), browser_session=MockBrowserSession())
assert result.extracted_content == '5:hello'
async def test_decorated_function_ignores_extra_kwargs(self):
"""Decorated function should ignore extra kwargs for easy unpacking"""
registry = Registry()
@registry.action('Simple action')
async def simple_action(value: int):
return ActionResult(extracted_content=str(value))
# Should work even with extra kwargs
special_context = {
'browser_session': None,
'page_extraction_llm': create_mock_llm(),
'context': {'extra': 'data'},
'unknown_param': 'ignored',
}
action = registry.registry.actions['simple_action']
ParamsModel = action.param_model
result = await simple_action(params=ParamsModel(value=42), **special_context)
assert result.extracted_content == '42'
class TestParamsModelGeneration:
"""Test automatic parameter model generation (from normalization tests)"""
def test_generates_model_from_non_special_args(self):
"""Should generate param model from non-special positional args"""
registry = Registry()
@registry.action('Complex action')
async def complex_action(
query: str,
max_results: int,
include_images: bool = True,
browser_session: BrowserSession = None, # type: ignore
):
return ActionResult()
action = registry.registry.actions['complex_action']
model_fields = action.param_model.model_fields
# Should include only non-special params
assert 'query' in model_fields
assert 'max_results' in model_fields
assert 'include_images' in model_fields
# Should NOT include special params
assert 'browser_session' not in model_fields
def test_preserves_type_annotations(self):
"""Generated model should preserve type annotations"""
registry = Registry()
@registry.action('Typed action')
async def typed_action(
count: int,
rate: float,
enabled: bool,
name: str | None = None,
browser_session: BrowserSession = None, # type: ignore
):
return ActionResult()
action = registry.registry.actions['typed_action']
schema = action.param_model.model_json_schema()
# Check types are preserved
assert schema['properties']['count']['type'] == 'integer'
assert schema['properties']['rate']['type'] == 'number'
assert schema['properties']['enabled']['type'] == 'boolean'
# Optional should allow null
assert 'null' in schema['properties']['name']['anyOf'][1]['type']
class TestParameterOrdering:
"""Test mixed ordering of parameters (from normalization tests)"""
def test_mixed_param_ordering(self):
"""Should handle any ordering of action params and special params"""
registry = Registry()
from browser_use.llm.base import BaseChatModel
# Special params mixed throughout
@registry.action('Mixed params')
async def mixed_action(
first: str,
browser_session: BrowserSession,
second: int,
third: bool = True,
page_extraction_llm: BaseChatModel = None, # type: ignore
):
return ActionResult()
action = registry.registry.actions['mixed_action']
model_fields = action.param_model.model_fields
# Only action params in model
assert set(model_fields.keys()) == {'first', 'second', 'third'}
assert model_fields['third'].default is True
def test_extract_content_pattern_registration(self):
"""Test that the extract_content pattern with mixed params registers correctly"""
registry = Registry()
# This is the problematic pattern: positional arg, then special args, then kwargs with defaults
@registry.action('Extract content from page')
async def extract_content(
goal: str,
page_extraction_llm,
include_links: bool = False,
):
return ActionResult(extracted_content=f'Goal: {goal}, include_links: {include_links}')
# Verify registration
assert 'extract_content' in registry.registry.actions
action = registry.registry.actions['extract_content']
# Check that the param model only includes user-facing params
model_fields = action.param_model.model_fields
assert 'goal' in model_fields
assert 'include_links' in model_fields
assert model_fields['include_links'].default is False
# Special params should NOT be in the model
assert 'page' not in model_fields
assert 'page_extraction_llm' not in model_fields
# Verify the action was properly registered
assert action.name == 'extract_content'
assert action.description == 'Extract content from page'
class TestParamsModelArgsAndKwargs:
async def test_browser_session_double_kwarg(self):
"""Run the test to diagnose browser_session parameter issue
This test demonstrates the problem and our fix. The issue happens because:
1. In tools/service.py, we have:
```python
@registry.action('Google Sheets: Select a specific cell or range of cells')
async def select_cell_or_range(browser_session: BrowserSession, cell_or_range: str):
return await _select_cell_or_range(browser_session=browser_session, cell_or_range=cell_or_range)
```
2. When registry.execute_action calls this function, it adds browser_session to extra_args:
```python
# In registry/service.py
if 'browser_session' in parameter_names:
extra_args['browser_session'] = browser_session
```
3. Then later, when calling action.function:
```python
return await action.function(**params_dict, **extra_args)
```
4. This effectively means browser_session is passed twice:
- Once through extra_args['browser_session']
- And again through params_dict['browser_session'] (from the original function)
The fix is to pass browser_session positionally in select_cell_or_range:
```python
return await _select_cell_or_range(browser_session, cell_or_range)
```
This test confirms that this approach works.
"""
from browser_use.tools.registry.service import Registry
from browser_use.tools.registry.views import ActionModel
# Simple context for testing
class TestContext:
pass
class MockBrowserSession:
async def get_current_page(self):
return None
browser_session = MockBrowserSession()
# Create registry
registry = Registry[TestContext]()
# Model that doesn't include browser_session (renamed to avoid pytest collecting it)
class CellActionParams(ActionModel):
value: str = Field(description='Test value')
# Model that includes browser_session
class ModelWithBrowser(ActionModel):
value: str = Field(description='Test value')
browser_session: BrowserSession = None # type: ignore
# Create a custom param model for select_cell_or_range
class CellRangeParams(ActionModel):
cell_or_range: str = Field(description='Cell or range to select')
# Use the provided real browser session
# Test with the real issue: select_cell_or_range
# logger.info('\n\n=== Test: Simulating select_cell_or_range issue with correct model ===')
# Define the function without using our registry - this will be a helper function
async def _select_cell_or_range(browser_session, cell_or_range):
"""Helper function for select_cell_or_range"""
return f'Selected cell {cell_or_range}'
# This simulates the actual issue we're seeing in the real code
# The browser_session parameter is in both the function signature and passed as a named arg
@registry.action('Google Sheets: Select a cell or range', param_model=CellRangeParams)
async def select_cell_or_range(browser_session: BrowserSession, cell_or_range: str):
# logger.info(f'select_cell_or_range called with browser_session={browser_session}, cell_or_range={cell_or_range}')
# PROBLEMATIC LINE: browser_session is passed by name, matching the parameter name
# This is what causes the "got multiple values" error in the real code
return await _select_cell_or_range(browser_session=browser_session, cell_or_range=cell_or_range)
# Fix attempt: Register a version that uses positional args instead
@registry.action('Google Sheets: Select a cell or range (fixed)', param_model=CellRangeParams)
async def select_cell_or_range_fixed(browser_session: BrowserSession, cell_or_range: str):
# logger.info(f'select_cell_or_range_fixed called with browser_session={browser_session}, cell_or_range={cell_or_range}')
# FIXED LINE: browser_session is passed positionally, avoiding the parameter name conflict
return await _select_cell_or_range(browser_session, cell_or_range)
# Another attempt: explicitly call using **kwargs to simulate what the registry does
@registry.action('Google Sheets: Select with kwargs', param_model=CellRangeParams)
async def select_with_kwargs(browser_session: BrowserSession, cell_or_range: str):
# logger.info(f'select_with_kwargs called with browser_session={browser_session}, cell_or_range={cell_or_range}')
# Get params and extra_args, like in Registry.execute_action
params = {'cell_or_range': cell_or_range, 'browser_session': browser_session}
extra_args = {'browser_session': browser_session}
# Try to call _select_cell_or_range with both params and extra_args
# This will fail with "got multiple values for keyword argument 'browser_session'"
try:
# logger.info('Attempting to call with both params and extra_args (should fail):')
await _select_cell_or_range(**params, **extra_args)
except TypeError as e:
# logger.info(f'Expected error: {e}')
# Remove browser_session from params to avoid the conflict
params_fixed = dict(params)
del params_fixed['browser_session']
# logger.info(f'Fixed params: {params_fixed}')
# This should work
result = await _select_cell_or_range(**params_fixed, **extra_args)
# logger.info(f'Success after fix: {result}')
return result
# Test the original problematic version
# logger.info('\n--- Testing original problematic version ---')
try:
result1 = await registry.execute_action(
'select_cell_or_range',
{'cell_or_range': 'A1:F100'},
browser_session=browser_session, # type: ignore
)
# logger.info(f'Success! Result: {result1}')
except Exception as e:
logger.error(f'Error: {str(e)}')
# Test the fixed version (using positional args)
# logger.info('\n--- Testing fixed version (positional args) ---')
try:
result2 = await registry.execute_action(
'select_cell_or_range_fixed',
{'cell_or_range': 'A1:F100'},
browser_session=browser_session, # type: ignore
)
# logger.info(f'Success! Result: {result2}')
except Exception as e:
logger.error(f'Error: {str(e)}')
# Test with kwargs version that simulates what Registry.execute_action does
# logger.info('\n--- Testing kwargs simulation version ---')
try:
result3 = await registry.execute_action(
'select_with_kwargs',
{'cell_or_range': 'A1:F100'},
browser_session=browser_session, # type: ignore
)
# logger.info(f'Success! Result: {result3}')
except Exception as e:
logger.error(f'Error: {str(e)}')
# Manual test of our theory: browser_session is passed twice
# logger.info('\n--- Direct test of our theory ---')
try:
# Create the model instance
params = CellRangeParams(cell_or_range='A1:F100')
# First check if the extra_args approach works
# logger.info('Checking if extra_args approach works:')
extra_args = {'browser_session': browser_session}
# If we were to modify Registry.execute_action:
# 1. Check if the function parameter needs browser_session
parameter_names = ['browser_session', 'cell_or_range']
browser_keys = ['browser_session', 'browser', 'browser_context']
# Create params dict
param_dict = params.model_dump()
# logger.info(f'params dict before: {param_dict}')
# Apply our fix: remove browser_session from params dict
for key in browser_keys:
if key in param_dict and key in extra_args:
# logger.info(f'Removing {key} from params dict')
del param_dict[key]
# logger.info(f'params dict after: {param_dict}')
# logger.info(f'extra_args: {extra_args}')
# This would be the fixed code:
# return await action.function(**param_dict, **extra_args)
# Call directly to test
result3 = await select_cell_or_range(**param_dict, **extra_args)
# logger.info(f'Success with our fix! Result: {result3}')
except Exception as e:
logger.error(f'Error with our manual test: {str(e)}')
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/infrastructure/test_registry_validation.py",
"license": "MIT License",
"lines": 420,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:tests/ci/models/model_test_helper.py | """Shared test helper for LLM model tests."""
import os
import pytest
from browser_use.agent.service import Agent
from browser_use.browser.profile import BrowserProfile
from browser_use.browser.session import BrowserSession
async def run_model_button_click_test(
model_class,
model_name: str,
api_key_env: str | None,
extra_kwargs: dict,
httpserver,
):
"""Test that an LLM model can click a button.
This test verifies:
1. Model can be initialized with API key
2. Agent can navigate and click a button
3. Button click is verified by checking page state change
4. Completes within max 2 steps
"""
# Handle API key validation - skip test if not available
if api_key_env is not None:
api_key = os.getenv(api_key_env)
if not api_key:
pytest.skip(f'{api_key_env} not set - skipping test')
else:
api_key = None
# Handle Azure-specific endpoint validation
from browser_use.llm.azure.chat import ChatAzureOpenAI
if model_class is ChatAzureOpenAI:
azure_endpoint = os.getenv('AZURE_OPENAI_ENDPOINT')
if not azure_endpoint:
pytest.skip('AZURE_OPENAI_ENDPOINT not set - skipping test')
# Add the azure_endpoint to extra_kwargs
extra_kwargs = {**extra_kwargs, 'azure_endpoint': azure_endpoint}
# Create HTML page with a button that changes page content when clicked
html = """
<!DOCTYPE html>
<html>
<head><title>Button Test</title></head>
<body>
<h1>Button Click Test</h1>
<button id="test-button" onclick="document.getElementById('result').innerText='SUCCESS'">
Click Me
</button>
<div id="result">NOT_CLICKED</div>
</body>
</html>
"""
httpserver.expect_request('/').respond_with_data(html, content_type='text/html')
# Create LLM instance with extra kwargs if provided
llm_kwargs = {'model': model_name}
if api_key is not None:
llm_kwargs['api_key'] = api_key
llm_kwargs.update(extra_kwargs)
llm = model_class(**llm_kwargs) # type: ignore[arg-type]
# Create browser session
browser = BrowserSession(
browser_profile=BrowserProfile(
headless=True,
user_data_dir=None, # Use temporary directory
)
)
try:
# Start browser
await browser.start()
# Create agent with button click task (URL in task triggers auto-navigation)
test_url = httpserver.url_for('/')
agent = Agent(
task=f'{test_url} - Click the button',
llm=llm,
browser_session=browser,
max_steps=2, # Max 2 steps as per requirements
)
# Run the agent
result = await agent.run()
# Verify task completed
assert result is not None
assert len(result.history) > 0
# Verify button was clicked by checking page state across any step
button_clicked = False
for step in result.history:
# Check state_message which contains browser state with page text
if step.state_message and 'SUCCESS' in step.state_message:
button_clicked = True
break
# Check if SUCCESS appears in any step (indicating button was clicked)
assert button_clicked, 'Button was not clicked - SUCCESS not found in any page state'
finally:
# Clean up browser session
await browser.kill()
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/models/model_test_helper.py",
"license": "MIT License",
"lines": 91,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:tests/ci/models/test_llm_anthropic.py | """Test Anthropic model button click."""
from browser_use.llm.anthropic.chat import ChatAnthropic
from tests.ci.models.model_test_helper import run_model_button_click_test
async def test_anthropic_claude_sonnet_4_0(httpserver):
"""Test Anthropic claude-sonnet-4-0 can click a button."""
await run_model_button_click_test(
model_class=ChatAnthropic,
model_name='claude-sonnet-4-0',
api_key_env='ANTHROPIC_API_KEY',
extra_kwargs={},
httpserver=httpserver,
)
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/models/test_llm_anthropic.py",
"license": "MIT License",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:tests/ci/models/test_llm_azure.py | """Test Azure OpenAI model button click."""
from browser_use.llm.azure.chat import ChatAzureOpenAI
from tests.ci.models.model_test_helper import run_model_button_click_test
async def test_azure_gpt_4_1_mini(httpserver):
"""Test Azure OpenAI gpt-4.1-mini can click a button."""
await run_model_button_click_test(
model_class=ChatAzureOpenAI,
model_name='gpt-4.1-mini',
api_key_env='AZURE_OPENAI_KEY',
extra_kwargs={}, # Azure endpoint will be added by helper
httpserver=httpserver,
)
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/models/test_llm_azure.py",
"license": "MIT License",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:tests/ci/models/test_llm_browseruse.py | """Test Browser Use model button click."""
from browser_use.llm.browser_use.chat import ChatBrowserUse
from tests.ci.models.model_test_helper import run_model_button_click_test
async def test_browseruse_bu_latest(httpserver):
"""Test Browser Use bu-latest can click a button."""
await run_model_button_click_test(
model_class=ChatBrowserUse,
model_name='bu-latest',
api_key_env='BROWSER_USE_API_KEY',
extra_kwargs={},
httpserver=httpserver,
)
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/models/test_llm_browseruse.py",
"license": "MIT License",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:tests/ci/models/test_llm_google.py | """Test Google model button click."""
from browser_use.llm.google.chat import ChatGoogle
from tests.ci.models.model_test_helper import run_model_button_click_test
async def test_google_gemini_flash_latest(httpserver):
"""Test Google gemini-flash-latest can click a button."""
await run_model_button_click_test(
model_class=ChatGoogle,
model_name='gemini-flash-latest',
api_key_env='GOOGLE_API_KEY',
extra_kwargs={},
httpserver=httpserver,
)
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/models/test_llm_google.py",
"license": "MIT License",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:tests/ci/models/test_llm_openai.py | """Test OpenAI model button click."""
from browser_use.llm.openai.chat import ChatOpenAI
from tests.ci.models.model_test_helper import run_model_button_click_test
async def test_openai_gpt_4_1_mini(httpserver):
"""Test OpenAI gpt-4.1-mini can click a button."""
await run_model_button_click_test(
model_class=ChatOpenAI,
model_name='gpt-4.1-mini',
api_key_env='OPENAI_API_KEY',
extra_kwargs={},
httpserver=httpserver,
)
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/models/test_llm_openai.py",
"license": "MIT License",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:tests/ci/security/test_ip_blocking.py | """
Comprehensive tests for IP address blocking in SecurityWatchdog.
Tests cover IPv4, IPv6, localhost, private networks, edge cases, and interactions
with allowed_domains and prohibited_domains configurations.
"""
from bubus import EventBus
from browser_use.browser import BrowserProfile, BrowserSession
from browser_use.browser.watchdogs.security_watchdog import SecurityWatchdog
class TestIPv4Blocking:
"""Test blocking of IPv4 addresses."""
def test_block_public_ipv4_addresses(self):
"""Test that public IPv4 addresses are blocked when block_ip_addresses=True."""
browser_profile = BrowserProfile(block_ip_addresses=True, headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Public IPv4 addresses should be blocked
assert watchdog._is_url_allowed('http://180.1.1.1/supersafe.txt') is False
assert watchdog._is_url_allowed('https://8.8.8.8/') is False
assert watchdog._is_url_allowed('http://1.1.1.1:8080/api') is False
assert watchdog._is_url_allowed('https://142.250.185.46/search') is False
assert watchdog._is_url_allowed('http://93.184.216.34/') is False
def test_block_private_ipv4_networks(self):
"""Test that private network IPv4 addresses are blocked."""
browser_profile = BrowserProfile(block_ip_addresses=True, headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Private network ranges (RFC 1918)
assert watchdog._is_url_allowed('http://192.168.1.1/') is False
assert watchdog._is_url_allowed('http://192.168.0.100/admin') is False
assert watchdog._is_url_allowed('http://10.0.0.1/') is False
assert watchdog._is_url_allowed('http://10.255.255.255/') is False
assert watchdog._is_url_allowed('http://172.16.0.1/') is False
assert watchdog._is_url_allowed('http://172.31.255.254/') is False
def test_block_localhost_ipv4(self):
"""Test that localhost IPv4 addresses are blocked."""
browser_profile = BrowserProfile(block_ip_addresses=True, headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Localhost/loopback addresses
assert watchdog._is_url_allowed('http://127.0.0.1/') is False
assert watchdog._is_url_allowed('http://127.0.0.1:8080/') is False
assert watchdog._is_url_allowed('https://127.0.0.1:3000/api/test') is False
assert watchdog._is_url_allowed('http://127.1.2.3/') is False # Any 127.x.x.x
def test_block_ipv4_with_ports_and_paths(self):
"""Test that IPv4 addresses with ports and paths are blocked."""
browser_profile = BrowserProfile(block_ip_addresses=True, headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# With various ports
assert watchdog._is_url_allowed('http://8.8.8.8:80/') is False
assert watchdog._is_url_allowed('https://8.8.8.8:443/') is False
assert watchdog._is_url_allowed('http://192.168.1.1:8080/') is False
assert watchdog._is_url_allowed('http://10.0.0.1:3000/api') is False
# With paths and query strings
assert watchdog._is_url_allowed('http://1.2.3.4/path/to/resource') is False
assert watchdog._is_url_allowed('http://5.6.7.8/api?key=value') is False
assert watchdog._is_url_allowed('https://9.10.11.12/path/to/file.html#anchor') is False
def test_allow_ipv4_when_blocking_disabled(self):
"""Test that IPv4 addresses are allowed when block_ip_addresses=False (default)."""
browser_profile = BrowserProfile(block_ip_addresses=False, headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# All IP addresses should be allowed when blocking is disabled
assert watchdog._is_url_allowed('http://180.1.1.1/supersafe.txt') is True
assert watchdog._is_url_allowed('http://192.168.1.1/') is True
assert watchdog._is_url_allowed('http://127.0.0.1:8080/') is True
assert watchdog._is_url_allowed('http://8.8.8.8/') is True
class TestIPv6Blocking:
"""Test blocking of IPv6 addresses."""
def test_block_ipv6_addresses(self):
"""Test that IPv6 addresses are blocked when block_ip_addresses=True."""
browser_profile = BrowserProfile(block_ip_addresses=True, headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Public IPv6 addresses (with brackets as per URL standard)
assert watchdog._is_url_allowed('http://[2001:db8::1]/') is False
assert watchdog._is_url_allowed('https://[2001:4860:4860::8888]/') is False
assert watchdog._is_url_allowed('http://[2606:4700:4700::1111]/path') is False
assert watchdog._is_url_allowed('https://[2001:db8:85a3::8a2e:370:7334]/api') is False
def test_block_ipv6_localhost(self):
"""Test that IPv6 localhost addresses are blocked."""
browser_profile = BrowserProfile(block_ip_addresses=True, headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# IPv6 loopback
assert watchdog._is_url_allowed('http://[::1]/') is False
assert watchdog._is_url_allowed('http://[::1]:8080/') is False
assert watchdog._is_url_allowed('https://[::1]:3000/api') is False
assert watchdog._is_url_allowed('http://[0:0:0:0:0:0:0:1]/') is False # Expanded form
def test_block_ipv6_with_ports_and_paths(self):
"""Test that IPv6 addresses with ports and paths are blocked."""
browser_profile = BrowserProfile(block_ip_addresses=True, headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# IPv6 with ports
assert watchdog._is_url_allowed('http://[2001:db8::1]:80/') is False
assert watchdog._is_url_allowed('https://[2001:db8::1]:443/') is False
assert watchdog._is_url_allowed('http://[::1]:8080/api') is False
# IPv6 with paths
assert watchdog._is_url_allowed('http://[2001:db8::1]/path/to/resource') is False
assert watchdog._is_url_allowed('https://[2001:db8::1]/api?key=value') is False
def test_allow_ipv6_when_blocking_disabled(self):
"""Test that IPv6 addresses are allowed when block_ip_addresses=False."""
browser_profile = BrowserProfile(block_ip_addresses=False, headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# All IPv6 addresses should be allowed
assert watchdog._is_url_allowed('http://[2001:db8::1]/') is True
assert watchdog._is_url_allowed('http://[::1]:8080/') is True
assert watchdog._is_url_allowed('https://[2001:4860:4860::8888]/') is True
class TestDomainNamesStillAllowed:
"""Test that regular domain names are not affected by IP blocking."""
def test_domain_names_allowed_with_ip_blocking(self):
"""Test that domain names continue to work when IP blocking is enabled."""
browser_profile = BrowserProfile(block_ip_addresses=True, headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Regular domain names should still be allowed
assert watchdog._is_url_allowed('https://example.com') is True
assert watchdog._is_url_allowed('https://www.google.com') is True
assert watchdog._is_url_allowed('http://subdomain.example.org/path') is True
assert watchdog._is_url_allowed('https://api.github.com/repos') is True
assert watchdog._is_url_allowed('http://localhost/') is True # "localhost" is a domain name, not IP
assert watchdog._is_url_allowed('http://localhost:8080/api') is True
def test_domains_with_numbers_allowed(self):
"""Test that domain names containing numbers are still allowed."""
browser_profile = BrowserProfile(block_ip_addresses=True, headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Domains with numbers (but not valid IP addresses)
assert watchdog._is_url_allowed('https://example123.com') is True
assert watchdog._is_url_allowed('https://123example.com') is True
assert watchdog._is_url_allowed('https://server1.example.com') is True
assert watchdog._is_url_allowed('http://web2.site.org') is True
class TestIPBlockingWithAllowedDomains:
"""Test interaction between IP blocking and allowed_domains."""
def test_ip_blocked_even_in_allowed_domains(self):
"""Test that IPs are blocked even if they're in allowed_domains list."""
# Note: It doesn't make sense to add IPs to allowed_domains, but if someone does,
# IP blocking should take precedence
browser_profile = BrowserProfile(
block_ip_addresses=True,
allowed_domains=['example.com', '192.168.1.1'], # IP in allowlist
headless=True,
user_data_dir=None,
)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# IP should be blocked despite being in allowed_domains
assert watchdog._is_url_allowed('http://192.168.1.1/') is False
# Regular domain should work as expected
assert watchdog._is_url_allowed('https://example.com') is True
# Other domains not in allowed_domains should be blocked
assert watchdog._is_url_allowed('https://other.com') is False
def test_allowed_domains_with_ip_blocking_enabled(self):
"""Test that allowed_domains works normally with IP blocking enabled."""
browser_profile = BrowserProfile(
block_ip_addresses=True, allowed_domains=['example.com', '*.google.com'], headless=True, user_data_dir=None
)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Allowed domains should work
assert watchdog._is_url_allowed('https://example.com') is True
assert watchdog._is_url_allowed('https://www.google.com') is True
# Not allowed domains should be blocked
assert watchdog._is_url_allowed('https://other.com') is False
# IPs should be blocked regardless
assert watchdog._is_url_allowed('http://8.8.8.8/') is False
assert watchdog._is_url_allowed('http://192.168.1.1/') is False
class TestIPBlockingWithProhibitedDomains:
"""Test interaction between IP blocking and prohibited_domains."""
def test_ip_blocked_regardless_of_prohibited_domains(self):
"""Test that IPs are blocked when IP blocking is on, independent of prohibited_domains."""
browser_profile = BrowserProfile(
block_ip_addresses=True, prohibited_domains=['example.com'], headless=True, user_data_dir=None
)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# IPs should be blocked due to IP blocking
assert watchdog._is_url_allowed('http://192.168.1.1/') is False
assert watchdog._is_url_allowed('http://8.8.8.8/') is False
# Prohibited domain should be blocked
assert watchdog._is_url_allowed('https://example.com') is False
# Other domains should be allowed
assert watchdog._is_url_allowed('https://other.com') is True
def test_prohibited_domains_without_ip_blocking(self):
"""Test that prohibited_domains works normally when IP blocking is disabled."""
browser_profile = BrowserProfile(
block_ip_addresses=False, prohibited_domains=['example.com', '8.8.8.8'], headless=True, user_data_dir=None
)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Prohibited domain should be blocked
assert watchdog._is_url_allowed('https://example.com') is False
# IP in prohibited list should be blocked (by prohibited_domains, not IP blocking)
assert watchdog._is_url_allowed('http://8.8.8.8/') is False
# Other IPs should be allowed (IP blocking is off)
assert watchdog._is_url_allowed('http://192.168.1.1/') is True
# Other domains should be allowed
assert watchdog._is_url_allowed('https://other.com') is True
class TestEdgeCases:
"""Test edge cases and invalid inputs."""
def test_invalid_urls_handled_gracefully(self):
"""Test that invalid URLs don't cause crashes."""
browser_profile = BrowserProfile(block_ip_addresses=True, headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Invalid URLs should return False
assert watchdog._is_url_allowed('not-a-url') is False
assert watchdog._is_url_allowed('') is False
assert watchdog._is_url_allowed('http://') is False
assert watchdog._is_url_allowed('://example.com') is False
def test_internal_browser_urls_allowed(self):
"""Test that internal browser URLs are still allowed with IP blocking."""
browser_profile = BrowserProfile(block_ip_addresses=True, headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Internal URLs should always be allowed
assert watchdog._is_url_allowed('about:blank') is True
assert watchdog._is_url_allowed('chrome://new-tab-page/') is True
assert watchdog._is_url_allowed('chrome://new-tab-page') is True
assert watchdog._is_url_allowed('chrome://newtab/') is True
def test_ipv4_lookalike_domains_allowed(self):
"""Test that domains that look like IPs but aren't are still allowed."""
browser_profile = BrowserProfile(block_ip_addresses=True, headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# These look like IPs but have too many/few octets or invalid ranges
# The IP parser should reject them, so they're treated as domain names
assert watchdog._is_url_allowed('http://999.999.999.999/') is True # Invalid IP range
assert watchdog._is_url_allowed('http://1.2.3.4.5/') is True # Too many octets
assert watchdog._is_url_allowed('http://1.2.3/') is True # Too few octets
def test_different_schemes_with_ips(self):
"""Test that IP blocking works across different URL schemes."""
browser_profile = BrowserProfile(block_ip_addresses=True, headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# HTTP and HTTPS
assert watchdog._is_url_allowed('http://192.168.1.1/') is False
assert watchdog._is_url_allowed('https://192.168.1.1/') is False
# FTP (if browser supports it)
assert watchdog._is_url_allowed('ftp://192.168.1.1/') is False
# WebSocket (parsed as regular URL)
assert watchdog._is_url_allowed('ws://192.168.1.1:8080/') is False
assert watchdog._is_url_allowed('wss://192.168.1.1:8080/') is False
class TestIsIPAddressHelper:
"""Test the _is_ip_address helper method directly."""
def test_valid_ipv4_detection(self):
"""Test that valid IPv4 addresses are correctly detected."""
browser_profile = BrowserProfile(block_ip_addresses=True, headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Valid IPv4 addresses
assert watchdog._is_ip_address('127.0.0.1') is True
assert watchdog._is_ip_address('192.168.1.1') is True
assert watchdog._is_ip_address('8.8.8.8') is True
assert watchdog._is_ip_address('255.255.255.255') is True
assert watchdog._is_ip_address('0.0.0.0') is True
def test_valid_ipv6_detection(self):
"""Test that valid IPv6 addresses are correctly detected."""
browser_profile = BrowserProfile(block_ip_addresses=True, headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Valid IPv6 addresses (without brackets - those are URL-specific)
assert watchdog._is_ip_address('::1') is True
assert watchdog._is_ip_address('2001:db8::1') is True
assert watchdog._is_ip_address('2001:4860:4860::8888') is True
assert watchdog._is_ip_address('fe80::1') is True
assert watchdog._is_ip_address('2001:db8:85a3::8a2e:370:7334') is True
def test_invalid_ip_detection(self):
"""Test that non-IP strings are correctly identified as not IPs."""
browser_profile = BrowserProfile(block_ip_addresses=True, headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Domain names (not IPs)
assert watchdog._is_ip_address('example.com') is False
assert watchdog._is_ip_address('www.google.com') is False
assert watchdog._is_ip_address('localhost') is False
# Invalid IPs
assert watchdog._is_ip_address('999.999.999.999') is False
assert watchdog._is_ip_address('1.2.3') is False
assert watchdog._is_ip_address('1.2.3.4.5') is False
assert watchdog._is_ip_address('not-an-ip') is False
assert watchdog._is_ip_address('') is False
# IPs with ports or paths (not valid for the helper - it only checks hostnames)
assert watchdog._is_ip_address('192.168.1.1:8080') is False
assert watchdog._is_ip_address('192.168.1.1/path') is False
class TestDefaultBehavior:
"""Test that default behavior (no IP blocking) is maintained."""
def test_default_block_ip_addresses_is_false(self):
"""Test that block_ip_addresses defaults to False."""
browser_profile = BrowserProfile(headless=True, user_data_dir=None)
# Default should be False
assert browser_profile.block_ip_addresses is False
def test_no_blocking_by_default(self):
"""Test that IPs are not blocked by default."""
browser_profile = BrowserProfile(headless=True, user_data_dir=None)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# All IPs should be allowed by default
assert watchdog._is_url_allowed('http://180.1.1.1/supersafe.txt') is True
assert watchdog._is_url_allowed('http://192.168.1.1/') is True
assert watchdog._is_url_allowed('http://127.0.0.1:8080/') is True
assert watchdog._is_url_allowed('http://[::1]/') is True
assert watchdog._is_url_allowed('https://8.8.8.8/') is True
class TestComplexScenarios:
"""Test complex real-world scenarios."""
def test_mixed_configuration_comprehensive(self):
"""Test a complex configuration with multiple security settings."""
browser_profile = BrowserProfile(
block_ip_addresses=True,
allowed_domains=['example.com', '*.google.com'],
prohibited_domains=['bad.example.com'], # Should be ignored when allowlist is set
headless=True,
user_data_dir=None,
)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Allowed domains should work
assert watchdog._is_url_allowed('https://example.com') is True
assert watchdog._is_url_allowed('https://www.google.com') is True
assert watchdog._is_url_allowed('https://mail.google.com') is True
# IPs should be blocked
assert watchdog._is_url_allowed('http://8.8.8.8/') is False
assert watchdog._is_url_allowed('http://192.168.1.1/') is False
# Domains not in allowlist should be blocked
assert watchdog._is_url_allowed('https://other.com') is False
def test_localhost_development_scenario(self):
"""Test typical local development scenario."""
# Developer wants to block external IPs but allow domain names
browser_profile = BrowserProfile(
block_ip_addresses=True,
headless=True,
user_data_dir=None, # No domain restrictions
)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Domain names should work (including localhost as a name)
assert watchdog._is_url_allowed('http://localhost:3000/') is True
assert watchdog._is_url_allowed('http://localhost:8080/api') is True
# But localhost IP should be blocked
assert watchdog._is_url_allowed('http://127.0.0.1:3000/') is False
# External domains should work
assert watchdog._is_url_allowed('https://api.example.com') is True
# External IPs should be blocked
assert watchdog._is_url_allowed('http://8.8.8.8/') is False
def test_security_hardening_scenario(self):
"""Test maximum security scenario with IP blocking and domain restrictions."""
browser_profile = BrowserProfile(
block_ip_addresses=True,
allowed_domains=['example.com', 'api.example.com'],
headless=True,
user_data_dir=None,
)
browser_session = BrowserSession(browser_profile=browser_profile)
event_bus = EventBus()
watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus)
# Only specified domains allowed
assert watchdog._is_url_allowed('https://example.com') is True
assert watchdog._is_url_allowed('https://api.example.com') is True
# IPs blocked
assert watchdog._is_url_allowed('http://192.168.1.1/') is False
# Other domains blocked
assert watchdog._is_url_allowed('https://other.com') is False
# Even localhost blocked
assert watchdog._is_url_allowed('http://127.0.0.1/') is False
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/security/test_ip_blocking.py",
"license": "MIT License",
"lines": 391,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:tests/ci/test_file_system_docx.py | """Tests for DOCX file support in the FileSystem."""
from pathlib import Path
import pytest
from browser_use.filesystem.file_system import (
DocxFile,
FileSystem,
)
class TestDocxFile:
"""Test DOCX file operations."""
@pytest.mark.asyncio
async def test_create_docx_file(self, tmp_path: Path):
"""Test creating a DOCX file."""
fs = FileSystem(tmp_path)
content = """# Heading 1
## Heading 2
### Heading 3
Regular paragraph text.
Another paragraph."""
result = await fs.write_file('test.docx', content)
assert 'successfully' in result.lower()
assert 'test.docx' in fs.list_files()
@pytest.mark.asyncio
async def test_read_docx_file_internal(self, tmp_path: Path):
"""Test reading internal DOCX file."""
fs = FileSystem(tmp_path)
content = """# Title
Some content here."""
await fs.write_file('test.docx', content)
result = await fs.read_file('test.docx')
assert 'test.docx' in result
assert 'Title' in result or 'content' in result
@pytest.mark.asyncio
async def test_read_docx_file_external(self, tmp_path: Path):
"""Test reading external DOCX file."""
from docx import Document
# Create an external DOCX file
external_file = tmp_path / 'external.docx'
doc = Document()
doc.add_heading('Test Heading', level=1)
doc.add_paragraph('Test paragraph content.')
doc.save(str(external_file))
fs = FileSystem(tmp_path / 'workspace')
structured_result = await fs.read_file_structured(str(external_file), external_file=True)
assert 'message' in structured_result
assert 'Test Heading' in structured_result['message']
assert 'Test paragraph content' in structured_result['message']
def test_docx_file_extension(self):
"""Test DOCX file extension property."""
docx_file = DocxFile(name='test')
assert docx_file.extension == 'docx'
assert docx_file.full_name == 'test.docx'
@pytest.mark.asyncio
async def test_docx_with_unicode_characters(self, tmp_path: Path):
"""Test DOCX with unicode and emoji content."""
fs = FileSystem(tmp_path)
content = """# Unicode Test 🚀
Chinese: 你好世界
Arabic: مرحبا بالعالم
Emoji: 😀 👍 🎉"""
result = await fs.write_file('unicode.docx', content)
assert 'successfully' in result.lower()
read_result = await fs.read_file('unicode.docx')
assert 'Unicode Test' in read_result
# Note: Emoji may not be preserved in all systems
@pytest.mark.asyncio
async def test_empty_docx_file(self, tmp_path: Path):
"""Test creating an empty DOCX file."""
fs = FileSystem(tmp_path)
result = await fs.write_file('empty.docx', '')
assert 'successfully' in result.lower()
@pytest.mark.asyncio
async def test_large_docx_file(self, tmp_path: Path):
"""Test creating a large DOCX file."""
fs = FileSystem(tmp_path)
# Create content with 1000 lines
lines = [f'Line {i}: This is a test line with some content.' for i in range(1000)]
content = '\n'.join(lines)
result = await fs.write_file('large.docx', content)
assert 'successfully' in result.lower()
# Verify it can be read back
read_result = await fs.read_file('large.docx')
assert 'Line 0:' in read_result
assert 'Line 999:' in read_result
@pytest.mark.asyncio
async def test_corrupted_docx_file(self, tmp_path: Path):
"""Test reading a corrupted DOCX file."""
# Create a corrupted DOCX file
external_file = tmp_path / 'corrupted.docx'
external_file.write_bytes(b'This is not a valid DOCX file')
fs = FileSystem(tmp_path / 'workspace')
structured_result = await fs.read_file_structured(str(external_file), external_file=True)
assert 'message' in structured_result
assert 'error' in structured_result['message'].lower() or 'could not' in structured_result['message'].lower()
@pytest.mark.asyncio
async def test_docx_with_multiple_paragraphs(self, tmp_path: Path):
"""Test DOCX with various paragraph styles."""
fs = FileSystem(tmp_path)
content = """# Main Title
## Subtitle
This is a regular paragraph.
This is another paragraph with some text.
### Section 3
Final paragraph here."""
await fs.write_file('multi.docx', content)
result = await fs.read_file('multi.docx')
# Should contain all the text (headings converted to paragraphs)
assert 'Main Title' in result
assert 'Subtitle' in result
assert 'regular paragraph' in result
assert 'Final paragraph' in result
class TestFileSystemDocxIntegration:
"""Integration tests for DOCX file type."""
@pytest.mark.asyncio
async def test_multiple_file_types_with_docx(self, tmp_path: Path):
"""Test working with DOCX alongside other file types."""
fs = FileSystem(tmp_path)
# Create different file types
await fs.write_file('doc.docx', '# Document\nContent here')
await fs.write_file('data.json', '{"key": "value"}')
await fs.write_file('notes.txt', 'Some notes')
# Verify all files exist
files = fs.list_files()
assert 'doc.docx' in files
assert 'data.json' in files
assert 'notes.txt' in files
assert 'todo.md' in files # Default file
@pytest.mark.asyncio
async def test_file_system_state_with_docx(self, tmp_path: Path):
"""Test FileSystem state serialization with DOCX files."""
fs = FileSystem(tmp_path)
# Create files
await fs.write_file('test.docx', '# Title\nContent')
await fs.write_file('data.txt', 'Some text')
# Get state
state = fs.get_state()
assert 'test.docx' in state.files
assert 'data.txt' in state.files
# Restore from state
fs2 = FileSystem.from_state(state)
assert 'test.docx' in fs2.list_files()
assert 'data.txt' in fs2.list_files()
def test_allowed_extensions_include_docx(self, tmp_path: Path):
"""Test that DOCX is in allowed extensions."""
fs = FileSystem(tmp_path)
allowed = fs.get_allowed_extensions()
assert 'docx' in allowed
if __name__ == '__main__':
pytest.main([__file__, '-v'])
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/test_file_system_docx.py",
"license": "MIT License",
"lines": 148,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:tests/ci/test_file_system_images.py | """Tests for image file support in the FileSystem."""
import base64
import io
from pathlib import Path
import pytest
from PIL import Image
from browser_use.filesystem.file_system import FileSystem
class TestImageFiles:
"""Test image file operations - only external reading supported."""
def create_test_image(self, width: int = 100, height: int = 100, format: str = 'PNG') -> bytes:
"""Create a test image and return bytes."""
img = Image.new('RGB', (width, height), color='red')
buffer = io.BytesIO()
img.save(buffer, format=format)
buffer.seek(0)
return buffer.read()
@pytest.mark.asyncio
async def test_read_external_png_image(self, tmp_path: Path):
"""Test reading external PNG image file."""
# Create an external image file
external_file = tmp_path / 'test.png'
img_bytes = self.create_test_image(width=300, height=200, format='PNG')
external_file.write_bytes(img_bytes)
fs = FileSystem(tmp_path / 'workspace')
structured_result = await fs.read_file_structured(str(external_file), external_file=True)
assert 'message' in structured_result
assert 'Read image file' in structured_result['message']
assert 'images' in structured_result
assert structured_result['images'] is not None
assert len(structured_result['images']) == 1
img_data = structured_result['images'][0]
assert img_data['name'] == 'test.png'
assert 'data' in img_data
# Verify base64 is valid
decoded = base64.b64decode(img_data['data'])
assert decoded == img_bytes
@pytest.mark.asyncio
async def test_read_external_jpg_image(self, tmp_path: Path):
"""Test reading external JPG image file."""
# Create an external image file
external_file = tmp_path / 'photo.jpg'
img_bytes = self.create_test_image(width=150, height=100, format='JPEG')
external_file.write_bytes(img_bytes)
fs = FileSystem(tmp_path / 'workspace')
structured_result = await fs.read_file_structured(str(external_file), external_file=True)
assert 'message' in structured_result
assert 'images' in structured_result
assert structured_result['images'] is not None
img_data = structured_result['images'][0]
assert img_data['name'] == 'photo.jpg'
decoded = base64.b64decode(img_data['data'])
assert len(decoded) > 0
@pytest.mark.asyncio
async def test_read_jpeg_extension(self, tmp_path: Path):
"""Test reading .jpeg extension (not just .jpg)."""
external_file = tmp_path / 'test.jpeg'
img_bytes = self.create_test_image(format='JPEG')
external_file.write_bytes(img_bytes)
fs = FileSystem(tmp_path / 'workspace')
structured_result = await fs.read_file_structured(str(external_file), external_file=True)
assert structured_result['images'] is not None
assert structured_result['images'][0]['name'] == 'test.jpeg'
@pytest.mark.asyncio
async def test_read_nonexistent_image(self, tmp_path: Path):
"""Test reading a nonexistent image file."""
fs = FileSystem(tmp_path / 'workspace')
structured_result = await fs.read_file_structured('/path/to/nonexistent.png', external_file=True)
assert 'message' in structured_result
assert 'not found' in structured_result['message'].lower()
assert structured_result['images'] is None
@pytest.mark.asyncio
async def test_corrupted_image_file(self, tmp_path: Path):
"""Test reading a corrupted image file."""
external_file = tmp_path / 'corrupted.png'
# Write invalid PNG data
external_file.write_bytes(b'Not a valid PNG file')
fs = FileSystem(tmp_path / 'workspace')
structured_result = await fs.read_file_structured(str(external_file), external_file=True)
# Should still return base64 data (we don't validate image format)
assert 'message' in structured_result
assert 'Read image file' in structured_result['message']
# Base64 encoding will succeed even for invalid image data
assert structured_result['images'] is not None
@pytest.mark.asyncio
async def test_large_image_file(self, tmp_path: Path):
"""Test reading a large image file."""
# Create a large image (2000x2000)
external_file = tmp_path / 'large.png'
img = Image.new('RGB', (2000, 2000), color='blue')
img.save(str(external_file), format='PNG')
fs = FileSystem(tmp_path / 'workspace')
structured_result = await fs.read_file_structured(str(external_file), external_file=True)
assert 'images' in structured_result
assert structured_result['images'] is not None
# Verify base64 data is present and substantial
assert len(structured_result['images'][0]['data']) > 10000
@pytest.mark.asyncio
async def test_multiple_images_in_sequence(self, tmp_path: Path):
"""Test reading multiple images in sequence."""
fs = FileSystem(tmp_path / 'workspace')
# Create three different images
for i, color in enumerate(['red', 'green', 'blue']):
img_file = tmp_path / f'image_{i}.png'
img = Image.new('RGB', (100, 100), color=color)
img.save(str(img_file), format='PNG')
# Read them all
results = []
for i in range(3):
img_file = tmp_path / f'image_{i}.png'
result = await fs.read_file_structured(str(img_file), external_file=True)
results.append(result)
# Verify all were read successfully
for i, result in enumerate(results):
assert result['images'] is not None
assert result['images'][0]['name'] == f'image_{i}.png'
@pytest.mark.asyncio
async def test_different_image_formats(self, tmp_path: Path):
"""Test reading different image format variations."""
fs = FileSystem(tmp_path / 'workspace')
# Test .jpg
jpg_file = tmp_path / 'test.jpg'
img = Image.new('RGB', (50, 50), color='yellow')
img.save(str(jpg_file), format='JPEG')
result_jpg = await fs.read_file_structured(str(jpg_file), external_file=True)
assert result_jpg['images'] is not None
# Test .jpeg
jpeg_file = tmp_path / 'test.jpeg'
img.save(str(jpeg_file), format='JPEG')
result_jpeg = await fs.read_file_structured(str(jpeg_file), external_file=True)
assert result_jpeg['images'] is not None
# Test .png
png_file = tmp_path / 'test.png'
img.save(str(png_file), format='PNG')
result_png = await fs.read_file_structured(str(png_file), external_file=True)
assert result_png['images'] is not None
@pytest.mark.asyncio
async def test_image_with_transparency(self, tmp_path: Path):
"""Test reading PNG with transparency (RGBA)."""
external_file = tmp_path / 'transparent.png'
# Create RGBA image with transparency
img = Image.new('RGBA', (100, 100), color=(255, 0, 0, 128))
img.save(str(external_file), format='PNG')
fs = FileSystem(tmp_path / 'workspace')
structured_result = await fs.read_file_structured(str(external_file), external_file=True)
assert structured_result['images'] is not None
assert len(structured_result['images'][0]['data']) > 0
class TestActionResultImages:
"""Test ActionResult with images field."""
def test_action_result_with_images(self):
"""Test creating ActionResult with images."""
from browser_use.agent.views import ActionResult
images = [{'name': 'test.png', 'data': 'base64_encoded_data_here'}]
result = ActionResult(
extracted_content='Read image file test.png',
long_term_memory='Read image file test.png',
images=images,
include_extracted_content_only_once=True,
)
assert result.images is not None
assert len(result.images) == 1
assert result.images[0]['name'] == 'test.png'
assert result.images[0]['data'] == 'base64_encoded_data_here'
def test_action_result_without_images(self):
"""Test ActionResult without images (default behavior)."""
from browser_use.agent.views import ActionResult
result = ActionResult(extracted_content='Some text', long_term_memory='Memory')
assert result.images is None
def test_action_result_with_multiple_images(self):
"""Test ActionResult with multiple images."""
from browser_use.agent.views import ActionResult
images = [
{'name': 'image1.png', 'data': 'base64_data_1'},
{'name': 'image2.jpg', 'data': 'base64_data_2'},
]
result = ActionResult(
extracted_content='Read multiple images',
long_term_memory='Read image files',
images=images,
include_extracted_content_only_once=True,
)
assert result.images is not None
assert len(result.images) == 2
assert result.images[0]['name'] == 'image1.png'
assert result.images[1]['name'] == 'image2.jpg'
def test_action_result_with_empty_images_list(self):
"""Test ActionResult with empty images list."""
from browser_use.agent.views import ActionResult
result = ActionResult(
extracted_content='No images',
images=[],
)
# Empty list is still valid
assert result.images == []
if __name__ == '__main__':
pytest.main([__file__, '-v'])
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/test_file_system_images.py",
"license": "MIT License",
"lines": 195,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:tests/ci/test_file_system_llm_integration.py | """Integration tests for DOCX and image file support in LLM messages."""
import base64
import io
from pathlib import Path
import pytest
from PIL import Image
from browser_use.agent.message_manager.service import MessageManager
from browser_use.agent.prompts import AgentMessagePrompt
from browser_use.agent.views import ActionResult, AgentStepInfo
from browser_use.browser.views import BrowserStateSummary, TabInfo
from browser_use.dom.views import SerializedDOMState
from browser_use.filesystem.file_system import FileSystem
from browser_use.llm.messages import ContentPartImageParam, ContentPartTextParam, SystemMessage
class TestImageInLLMMessages:
"""Test that images flow correctly through to LLM messages."""
def create_test_image(self, width: int = 100, height: int = 100) -> bytes:
"""Create a test image and return bytes."""
img = Image.new('RGB', (width, height), color='red')
buffer = io.BytesIO()
img.save(buffer, format='PNG')
buffer.seek(0)
return buffer.read()
@pytest.mark.asyncio
async def test_image_stored_in_message_manager(self, tmp_path: Path):
"""Test that images are stored in MessageManager state."""
fs = FileSystem(tmp_path)
system_message = SystemMessage(content='Test system message')
mm = MessageManager(task='test', system_message=system_message, file_system=fs)
# Create ActionResult with images
images = [{'name': 'test.png', 'data': 'base64_test_data'}]
action_results = [
ActionResult(
extracted_content='Read image file test.png',
long_term_memory='Read image file test.png',
images=images,
include_extracted_content_only_once=True,
)
]
# Update message manager with results
step_info = AgentStepInfo(step_number=1, max_steps=10)
mm._update_agent_history_description(model_output=None, result=action_results, step_info=step_info)
# Verify images are stored
assert mm.state.read_state_images is not None
assert len(mm.state.read_state_images) == 1
assert mm.state.read_state_images[0]['name'] == 'test.png'
assert mm.state.read_state_images[0]['data'] == 'base64_test_data'
@pytest.mark.asyncio
async def test_images_cleared_after_step(self, tmp_path: Path):
"""Test that images are cleared after each step."""
fs = FileSystem(tmp_path)
system_message = SystemMessage(content='Test system message')
mm = MessageManager(task='test', system_message=system_message, file_system=fs)
# First step with images
images = [{'name': 'test.png', 'data': 'base64_data'}]
action_results = [ActionResult(images=images, include_extracted_content_only_once=True)]
step_info = AgentStepInfo(step_number=1, max_steps=10)
mm._update_agent_history_description(model_output=None, result=action_results, step_info=step_info)
assert len(mm.state.read_state_images) == 1
# Second step without images - should clear
action_results_2 = [ActionResult(extracted_content='No images')]
step_info_2 = AgentStepInfo(step_number=2, max_steps=10)
mm._update_agent_history_description(model_output=None, result=action_results_2, step_info=step_info_2)
assert len(mm.state.read_state_images) == 0
@pytest.mark.asyncio
async def test_multiple_images_accumulated(self, tmp_path: Path):
"""Test that multiple images in one step are accumulated."""
fs = FileSystem(tmp_path)
system_message = SystemMessage(content='Test system message')
mm = MessageManager(task='test', system_message=system_message, file_system=fs)
# Multiple action results with images
action_results = [
ActionResult(images=[{'name': 'img1.png', 'data': 'data1'}], include_extracted_content_only_once=True),
ActionResult(images=[{'name': 'img2.jpg', 'data': 'data2'}], include_extracted_content_only_once=True),
]
step_info = AgentStepInfo(step_number=1, max_steps=10)
mm._update_agent_history_description(model_output=None, result=action_results, step_info=step_info)
assert len(mm.state.read_state_images) == 2
assert mm.state.read_state_images[0]['name'] == 'img1.png'
assert mm.state.read_state_images[1]['name'] == 'img2.jpg'
def test_agent_message_prompt_includes_images(self, tmp_path: Path):
"""Test that AgentMessagePrompt includes images in message content."""
fs = FileSystem(tmp_path)
# Create browser state
browser_state = BrowserStateSummary(
url='https://example.com',
title='Test',
tabs=[TabInfo(target_id='test-0', url='https://example.com', title='Test')],
screenshot=None,
dom_state=SerializedDOMState(_root=None, selector_map={}),
)
# Create images
read_state_images = [{'name': 'test.png', 'data': 'base64_image_data_here'}]
# Create message prompt
prompt = AgentMessagePrompt(
browser_state_summary=browser_state,
file_system=fs,
read_state_images=read_state_images,
)
# Get user message with vision enabled
user_message = prompt.get_user_message(use_vision=True)
# Verify message has content parts (not just string)
assert isinstance(user_message.content, list)
# Find image content parts
image_parts = [part for part in user_message.content if isinstance(part, ContentPartImageParam)]
text_parts = [part for part in user_message.content if isinstance(part, ContentPartTextParam)]
# Should have at least one image
assert len(image_parts) >= 1
# Should have text label
image_labels = [part.text for part in text_parts if 'test.png' in part.text]
assert len(image_labels) >= 1
# Verify image data URL format
img_part = image_parts[0]
assert 'data:image/' in img_part.image_url.url
assert 'base64,base64_image_data_here' in img_part.image_url.url
def test_agent_message_prompt_png_vs_jpg_media_type(self, tmp_path: Path):
"""Test that AgentMessagePrompt correctly detects PNG vs JPG media types."""
fs = FileSystem(tmp_path)
browser_state = BrowserStateSummary(
url='https://example.com',
title='Test',
tabs=[TabInfo(target_id='test-0', url='https://example.com', title='Test')],
screenshot=None,
dom_state=SerializedDOMState(_root=None, selector_map={}),
)
# Test PNG
read_state_images_png = [{'name': 'test.png', 'data': 'data'}]
prompt_png = AgentMessagePrompt(
browser_state_summary=browser_state,
file_system=fs,
read_state_images=read_state_images_png,
)
message_png = prompt_png.get_user_message(use_vision=True)
image_parts_png = [part for part in message_png.content if isinstance(part, ContentPartImageParam)]
assert 'data:image/png;base64' in image_parts_png[0].image_url.url
# Test JPG
read_state_images_jpg = [{'name': 'photo.jpg', 'data': 'data'}]
prompt_jpg = AgentMessagePrompt(
browser_state_summary=browser_state,
file_system=fs,
read_state_images=read_state_images_jpg,
)
message_jpg = prompt_jpg.get_user_message(use_vision=True)
image_parts_jpg = [part for part in message_jpg.content if isinstance(part, ContentPartImageParam)]
assert 'data:image/jpeg;base64' in image_parts_jpg[0].image_url.url
def test_agent_message_prompt_no_images(self, tmp_path: Path):
"""Test that message works correctly when no images are present."""
fs = FileSystem(tmp_path)
browser_state = BrowserStateSummary(
url='https://example.com',
title='Test',
tabs=[TabInfo(target_id='test-0', url='https://example.com', title='Test')],
screenshot=None,
dom_state=SerializedDOMState(_root=None, selector_map={}),
)
# No images
prompt = AgentMessagePrompt(
browser_state_summary=browser_state,
file_system=fs,
read_state_images=[],
)
# Get user message without vision
user_message = prompt.get_user_message(use_vision=False)
# Should be plain text, not content parts
assert isinstance(user_message.content, str)
def test_agent_message_prompt_empty_base64_skipped(self, tmp_path: Path):
"""Test that images with empty base64 data are skipped."""
fs = FileSystem(tmp_path)
browser_state = BrowserStateSummary(
url='https://example.com',
title='Test',
tabs=[TabInfo(target_id='test-0', url='https://example.com', title='Test')],
screenshot=None,
dom_state=SerializedDOMState(_root=None, selector_map={}),
)
# Image with empty data field
read_state_images = [
{'name': 'empty.png', 'data': ''}, # Empty - should be skipped
{'name': 'valid.png', 'data': 'valid_data'}, # Valid
]
prompt = AgentMessagePrompt(
browser_state_summary=browser_state,
file_system=fs,
read_state_images=read_state_images,
)
user_message = prompt.get_user_message(use_vision=True)
image_parts = [part for part in user_message.content if isinstance(part, ContentPartImageParam)]
# Should only have 1 image (the valid one)
assert len(image_parts) == 1
assert 'valid_data' in image_parts[0].image_url.url
class TestDocxInLLMMessages:
"""Test that DOCX content flows correctly through to LLM messages."""
@pytest.mark.asyncio
async def test_docx_in_extracted_content(self, tmp_path: Path):
"""Test that DOCX text appears in extracted_content."""
fs = FileSystem(tmp_path)
# Create DOCX file
content = """# Title
Some important content here."""
await fs.write_file('test.docx', content)
# Read it
result = await fs.read_file('test.docx')
# Verify content is in the result
assert 'Title' in result
assert 'important content' in result
@pytest.mark.asyncio
async def test_docx_in_message_manager(self, tmp_path: Path):
"""Test that DOCX content appears in message manager state."""
fs = FileSystem(tmp_path)
system_message = SystemMessage(content='Test system message')
mm = MessageManager(task='test', system_message=system_message, file_system=fs)
# Simulate read_file action result
docx_content = """Read from file test.docx.
<content>
Title
Some content here.
</content>"""
action_results = [
ActionResult(
extracted_content=docx_content,
long_term_memory='Read file test.docx',
include_extracted_content_only_once=True,
)
]
step_info = AgentStepInfo(step_number=1, max_steps=10)
mm._update_agent_history_description(model_output=None, result=action_results, step_info=step_info)
# Verify it's in read_state_description
assert 'Title' in mm.state.read_state_description
assert 'Some content' in mm.state.read_state_description
class TestEndToEndIntegration:
"""End-to-end tests for file reading and LLM message creation."""
def create_test_image(self) -> bytes:
"""Create a test image."""
img = Image.new('RGB', (50, 50), color='blue')
buffer = io.BytesIO()
img.save(buffer, format='PNG')
buffer.seek(0)
return buffer.read()
@pytest.mark.asyncio
async def test_image_end_to_end(self, tmp_path: Path):
"""Test complete flow: external image → FileSystem → ActionResult → MessageManager → Prompt."""
# Step 1: Create external image
external_file = tmp_path / 'photo.png'
img_bytes = self.create_test_image()
external_file.write_bytes(img_bytes)
# Step 2: Read via FileSystem
fs = FileSystem(tmp_path / 'workspace')
structured_result = await fs.read_file_structured(str(external_file), external_file=True)
assert structured_result['images'] is not None
# Step 3: Create ActionResult (simulating tools/service.py)
action_result = ActionResult(
extracted_content=structured_result['message'],
long_term_memory='Read image file photo.png',
images=structured_result['images'],
include_extracted_content_only_once=True,
)
# Step 4: Process in MessageManager
system_message = SystemMessage(content='Test system message')
mm = MessageManager(task='test', system_message=system_message, file_system=fs)
step_info = AgentStepInfo(step_number=1, max_steps=10)
mm._update_agent_history_description(model_output=None, result=[action_result], step_info=step_info)
# Verify images stored
assert len(mm.state.read_state_images) == 1
assert mm.state.read_state_images[0]['name'] == 'photo.png'
# Step 5: Create message with AgentMessagePrompt
browser_state = BrowserStateSummary(
url='https://example.com',
title='Test',
tabs=[TabInfo(target_id='test-0', url='https://example.com', title='Test')],
screenshot=None,
dom_state=SerializedDOMState(_root=None, selector_map={}),
)
prompt = AgentMessagePrompt(
browser_state_summary=browser_state,
file_system=fs,
read_state_images=mm.state.read_state_images,
)
user_message = prompt.get_user_message(use_vision=True)
# Verify image is in message
assert isinstance(user_message.content, list)
image_parts = [part for part in user_message.content if isinstance(part, ContentPartImageParam)]
assert len(image_parts) >= 1
# Verify image data is correct
base64_str = base64.b64encode(img_bytes).decode('utf-8')
assert base64_str in image_parts[0].image_url.url
@pytest.mark.asyncio
async def test_docx_end_to_end(self, tmp_path: Path):
"""Test complete flow: DOCX file → FileSystem → ActionResult → MessageManager."""
# Step 1: Create DOCX
fs = FileSystem(tmp_path)
docx_content = """# Important Document
This is critical information."""
await fs.write_file('important.docx', docx_content)
# Step 2: Read it
read_result = await fs.read_file('important.docx')
# Step 3: Create ActionResult (simulating tools/service.py)
action_result = ActionResult(
extracted_content=read_result,
long_term_memory=read_result[:100] if len(read_result) > 100 else read_result,
include_extracted_content_only_once=True,
)
# Step 4: Process in MessageManager
system_message = SystemMessage(content='Test system message')
mm = MessageManager(task='test', system_message=system_message, file_system=fs)
step_info = AgentStepInfo(step_number=1, max_steps=10)
mm._update_agent_history_description(model_output=None, result=[action_result], step_info=step_info)
# Verify content is in read_state
assert 'Important Document' in mm.state.read_state_description
assert 'critical information' in mm.state.read_state_description
if __name__ == '__main__':
pytest.main([__file__, '-v'])
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/test_file_system_llm_integration.py",
"license": "MIT License",
"lines": 310,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:tests/ci/test_history_wait_time.py | from browser_use.agent.views import StepMetadata
def test_step_metadata_has_step_interval_field():
"""Test that StepMetadata includes step_interval field"""
metadata = StepMetadata(step_number=1, step_start_time=10.0, step_end_time=12.5, step_interval=2.5)
assert hasattr(metadata, 'step_interval')
assert metadata.step_interval == 2.5
def test_step_metadata_step_interval_optional():
"""Test that step_interval is optional (None for first step)"""
# Explicitly None
metadata_none = StepMetadata(step_number=0, step_start_time=0.0, step_end_time=1.0, step_interval=None)
assert metadata_none.step_interval is None
# Omitted (defaults to None)
metadata_default = StepMetadata(step_number=0, step_start_time=0.0, step_end_time=1.0)
assert metadata_default.step_interval is None
def test_step_interval_calculation():
"""Test step_interval calculation logic (uses previous step's duration)"""
# Previous step (Step 1): runs from 100.0 to 102.5 (duration: 2.5s)
previous_start = 100.0
previous_end = 102.5
previous_duration = previous_end - previous_start
# Current step (Step 2): should have step_interval = previous step's duration
# This tells the rerun system "wait 2.5s before executing Step 2"
expected_step_interval = previous_duration
calculated_step_interval = max(0, previous_end - previous_start)
assert abs(calculated_step_interval - expected_step_interval) < 0.001 # Float comparison
assert calculated_step_interval == 2.5
def test_step_metadata_serialization_with_step_interval():
"""Test that step_interval is included in metadata serialization"""
# With step_interval
metadata_with_wait = StepMetadata(step_number=1, step_start_time=10.0, step_end_time=12.5, step_interval=2.5)
data = metadata_with_wait.model_dump()
assert 'step_interval' in data
assert data['step_interval'] == 2.5
# Without step_interval (None)
metadata_without_wait = StepMetadata(step_number=0, step_start_time=0.0, step_end_time=1.0, step_interval=None)
data = metadata_without_wait.model_dump()
assert 'step_interval' in data
assert data['step_interval'] is None
def test_step_metadata_deserialization_with_step_interval():
"""Test that step_interval can be loaded from dict"""
# Load with step_interval
data_with_wait = {'step_number': 1, 'step_start_time': 10.0, 'step_end_time': 12.5, 'step_interval': 2.5}
metadata = StepMetadata.model_validate(data_with_wait)
assert metadata.step_interval == 2.5
# Load without step_interval (old format)
data_without_wait = {
'step_number': 0,
'step_start_time': 0.0,
'step_end_time': 1.0,
# step_interval is missing
}
metadata = StepMetadata.model_validate(data_without_wait)
assert metadata.step_interval is None # Defaults to None
def test_step_interval_backwards_compatibility():
"""Test that old metadata without step_interval still works"""
# Simulate old format from JSON
old_metadata_dict = {
'step_number': 0,
'step_start_time': 1000.0,
'step_end_time': 1002.5,
# step_interval field doesn't exist (old format)
}
# Should load successfully with step_interval defaulting to None
metadata = StepMetadata.model_validate(old_metadata_dict)
assert metadata.step_number == 0
assert metadata.step_start_time == 1000.0
assert metadata.step_end_time == 1002.5
assert metadata.step_interval is None # Default value
def test_duration_seconds_property_still_works():
"""Test that existing duration_seconds property still works"""
metadata = StepMetadata(step_number=1, step_start_time=10.0, step_end_time=13.5, step_interval=2.0)
# duration_seconds should be 3.5 (13.5 - 10.0)
assert metadata.duration_seconds == 3.5
# step_interval is separate from duration
assert metadata.step_interval == 2.0
def test_step_metadata_json_round_trip():
"""Test that step_interval survives JSON serialization round-trip"""
metadata = StepMetadata(step_number=1, step_start_time=100.0, step_end_time=102.5, step_interval=1.5)
# Serialize to JSON
json_str = metadata.model_dump_json()
# Deserialize from JSON
loaded = StepMetadata.model_validate_json(json_str)
assert loaded.step_interval == 1.5
assert loaded.step_number == 1
assert loaded.step_start_time == 100.0
assert loaded.step_end_time == 102.5
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/test_history_wait_time.py",
"license": "MIT License",
"lines": 86,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:tests/ci/test_rerun_ai_summary.py | """Tests for AI summary generation during rerun"""
from unittest.mock import AsyncMock
from browser_use.agent.service import Agent
from browser_use.agent.views import ActionResult, AgentHistory, AgentHistoryList, RerunSummaryAction, StepMetadata
from browser_use.browser.views import BrowserStateHistory
from browser_use.dom.views import DOMRect, NodeType
from tests.ci.conftest import create_mock_llm
async def test_generate_rerun_summary_success():
"""Test that _generate_rerun_summary generates an AI summary for successful rerun"""
# Create mock LLM that returns RerunSummaryAction
summary_action = RerunSummaryAction(
summary='Form filled successfully',
success=True,
completion_status='complete',
)
async def custom_ainvoke(*args, **kwargs):
# Get output_format from second positional arg or kwargs
output_format = args[1] if len(args) > 1 else kwargs.get('output_format')
assert output_format is RerunSummaryAction
from browser_use.llm.views import ChatInvokeCompletion
return ChatInvokeCompletion(completion=summary_action, usage=None)
# Mock ChatOpenAI class
mock_openai = AsyncMock()
mock_openai.ainvoke.side_effect = custom_ainvoke
llm = create_mock_llm(actions=None)
agent = Agent(task='Test task', llm=llm)
await agent.browser_session.start()
try:
# Create some successful results
results = [
ActionResult(long_term_memory='Step 1 completed'),
ActionResult(long_term_memory='Step 2 completed'),
]
# Pass the mock LLM directly as summary_llm
summary = await agent._generate_rerun_summary('Test task', results, summary_llm=mock_openai)
# Check that result is the AI summary
assert summary.is_done is True
assert summary.success is True
assert summary.extracted_content == 'Form filled successfully'
assert 'Rerun completed' in (summary.long_term_memory or '')
finally:
await agent.close()
async def test_generate_rerun_summary_with_errors():
"""Test that AI summary correctly reflects errors in execution"""
# Create mock LLM for summary
summary_action = RerunSummaryAction(
summary='Rerun had errors',
success=False,
completion_status='failed',
)
async def custom_ainvoke(*args, **kwargs):
output_format = args[1] if len(args) > 1 else kwargs.get('output_format')
assert output_format is RerunSummaryAction
from browser_use.llm.views import ChatInvokeCompletion
return ChatInvokeCompletion(completion=summary_action, usage=None)
mock_openai = AsyncMock()
mock_openai.ainvoke.side_effect = custom_ainvoke
llm = create_mock_llm(actions=None)
agent = Agent(task='Test task', llm=llm)
await agent.browser_session.start()
try:
# Create results with errors
results_with_errors = [
ActionResult(error='Failed to find element'),
ActionResult(error='Timeout'),
]
# Pass the mock LLM directly as summary_llm
summary = await agent._generate_rerun_summary('Test task', results_with_errors, summary_llm=mock_openai)
# Verify summary reflects errors
assert summary.is_done is True
assert summary.success is False
assert summary.extracted_content == 'Rerun had errors'
finally:
await agent.close()
async def test_generate_rerun_summary_fallback_on_error():
"""Test that a fallback summary is generated if LLM fails"""
# Mock ChatOpenAI to throw an error
mock_openai = AsyncMock()
mock_openai.ainvoke.side_effect = Exception('LLM service unavailable')
llm = create_mock_llm(actions=None)
agent = Agent(task='Test task', llm=llm)
await agent.browser_session.start()
try:
# Create some results
results = [
ActionResult(long_term_memory='Step 1 completed'),
ActionResult(long_term_memory='Step 2 completed'),
]
# Pass the mock LLM directly as summary_llm
summary = await agent._generate_rerun_summary('Test task', results, summary_llm=mock_openai)
# Verify fallback summary
assert summary.is_done is True
assert summary.success is True # No errors, so success=True
assert 'Rerun completed' in (summary.extracted_content or '')
assert '2/2' in (summary.extracted_content or '') # Should show stats
finally:
await agent.close()
async def test_generate_rerun_summary_statistics():
"""Test that summary includes execution statistics in the prompt"""
# Create mock LLM
summary_action = RerunSummaryAction(
summary='3 of 5 steps succeeded',
success=False,
completion_status='partial',
)
async def custom_ainvoke(*args, **kwargs):
output_format = args[1] if len(args) > 1 else kwargs.get('output_format')
assert output_format is RerunSummaryAction
from browser_use.llm.views import ChatInvokeCompletion
return ChatInvokeCompletion(completion=summary_action, usage=None)
mock_openai = AsyncMock()
mock_openai.ainvoke.side_effect = custom_ainvoke
llm = create_mock_llm(actions=None)
agent = Agent(task='Test task', llm=llm)
await agent.browser_session.start()
try:
# Create results with mix of success and errors
results = [
ActionResult(long_term_memory='Step 1 completed'),
ActionResult(error='Step 2 failed'),
ActionResult(long_term_memory='Step 3 completed'),
ActionResult(error='Step 4 failed'),
ActionResult(long_term_memory='Step 5 completed'),
]
# Pass the mock LLM directly as summary_llm
summary = await agent._generate_rerun_summary('Test task', results, summary_llm=mock_openai)
# Verify summary
assert summary.is_done is True
assert summary.success is False # partial completion
assert '3 of 5' in (summary.extracted_content or '')
finally:
await agent.close()
async def test_rerun_skips_steps_with_original_errors():
"""Test that rerun_history skips steps that had errors in the original run when skip_failures=True"""
# Create a mock LLM for summary
summary_action = RerunSummaryAction(
summary='Rerun completed with skipped steps',
success=True,
completion_status='complete',
)
async def custom_ainvoke(*args, **kwargs):
output_format = args[1] if len(args) > 1 else kwargs.get('output_format')
if output_format is RerunSummaryAction:
from browser_use.llm.views import ChatInvokeCompletion
return ChatInvokeCompletion(completion=summary_action, usage=None)
raise ValueError('Unexpected output_format')
mock_summary_llm = AsyncMock()
mock_summary_llm.ainvoke.side_effect = custom_ainvoke
llm = create_mock_llm(actions=None)
agent = Agent(task='Test task', llm=llm)
# Create mock history with a step that has an error
mock_state = BrowserStateHistory(
url='https://example.com',
title='Test Page',
tabs=[],
interacted_element=[None],
)
# Get the dynamically created AgentOutput type from the agent
AgentOutput = agent.AgentOutput
# Create a step that originally had an error (using navigate action which doesn't require element matching)
failed_step = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Trying to navigate',
next_goal=None,
action=[{'navigate': {'url': 'https://example.com/page'}}], # type: ignore[arg-type]
),
result=[ActionResult(error='Navigation failed - network error')],
state=mock_state,
metadata=StepMetadata(
step_start_time=0,
step_end_time=1,
step_number=1,
step_interval=1.0,
),
)
# Create history with the failed step
history = AgentHistoryList(history=[failed_step])
try:
# Run rerun with skip_failures=True - should skip the step with original error
results = await agent.rerun_history(
history,
skip_failures=True,
summary_llm=mock_summary_llm,
)
# The step should have been skipped (not retried) because it originally had an error
# We should have 2 results: the skipped step result and the AI summary
assert len(results) == 2
# First result should indicate the step was skipped
skipped_result = results[0]
assert skipped_result.error is not None
assert 'Skipped - original step had error' in skipped_result.error
# Second result should be the AI summary
summary_result = results[1]
assert summary_result.is_done is True
finally:
await agent.close()
async def test_rerun_does_not_skip_originally_failed_when_skip_failures_false():
"""Test that rerun_history does NOT skip steps with original errors when skip_failures=False.
When skip_failures=False, the step should be attempted (and will succeed since navigate doesn't need element matching)."""
# Create a mock LLM for summary (will be reached after the step succeeds)
summary_action = RerunSummaryAction(
summary='Rerun completed',
success=True,
completion_status='complete',
)
async def custom_ainvoke(*args, **kwargs):
output_format = args[1] if len(args) > 1 else kwargs.get('output_format')
if output_format is RerunSummaryAction:
from browser_use.llm.views import ChatInvokeCompletion
return ChatInvokeCompletion(completion=summary_action, usage=None)
raise ValueError('Unexpected output_format')
mock_summary_llm = AsyncMock()
mock_summary_llm.ainvoke.side_effect = custom_ainvoke
llm = create_mock_llm(actions=None)
agent = Agent(task='Test task', llm=llm)
# Create mock history with a step that has an error
mock_state = BrowserStateHistory(
url='https://example.com',
title='Test Page',
tabs=[],
interacted_element=[None],
)
# Get the dynamically created AgentOutput type from the agent
AgentOutput = agent.AgentOutput
# Create a step that originally had an error but uses navigate (which will work on rerun)
failed_step = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Trying to navigate',
next_goal=None,
action=[{'navigate': {'url': 'https://example.com/page'}}], # type: ignore[arg-type]
),
result=[ActionResult(error='Navigation failed - network error')],
state=mock_state,
metadata=StepMetadata(
step_start_time=0,
step_end_time=1,
step_number=1,
step_interval=1.0,
),
)
# Create history with the failed step
history = AgentHistoryList(history=[failed_step])
try:
# Run rerun with skip_failures=False - should attempt to replay (and succeed since navigate works)
results = await agent.rerun_history(
history,
skip_failures=False,
max_retries=1,
summary_llm=mock_summary_llm,
)
# With skip_failures=False, the step should NOT be skipped even if original had error
# The navigate action should succeed
assert len(results) == 2
# First result should be the successful navigation (not skipped)
nav_result = results[0]
# It should NOT contain "Skipped" since skip_failures=False
if nav_result.error:
assert 'Skipped' not in nav_result.error
finally:
await agent.close()
async def test_rerun_cleanup_on_failure(httpserver):
"""Test that rerun_history properly cleans up resources (closes browser/connections) even when it fails.
This test verifies the try/finally cleanup logic by creating a step that will fail
(element matching fails) and checking that the browser session is properly closed afterward.
"""
from browser_use.dom.views import DOMInteractedElement
# Set up a test page with a button that has DIFFERENT attributes than our historical element
test_html = """<!DOCTYPE html>
<html>
<body>
<button id="real-button" aria-label="real-button">Click me</button>
</body>
</html>"""
httpserver.expect_request('/test').respond_with_data(test_html, content_type='text/html')
test_url = httpserver.url_for('/test')
llm = create_mock_llm(actions=None)
agent = Agent(task='Test task', llm=llm)
AgentOutput = agent.AgentOutput
# Step 1: Navigate to test page
navigate_step = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Navigate to test page',
next_goal=None,
action=[{'navigate': {'url': test_url}}], # type: ignore[arg-type]
),
result=[ActionResult(long_term_memory='Navigated')],
state=BrowserStateHistory(
url=test_url,
title='Test Page',
tabs=[],
interacted_element=[None],
),
metadata=StepMetadata(
step_start_time=0,
step_end_time=1,
step_number=1,
step_interval=0.1,
),
)
# Step 2: Click on element that won't be found (different identifiers)
failing_step = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Trying to click non-existent button',
next_goal=None,
action=[{'click': {'index': 100}}], # type: ignore[arg-type]
),
result=[ActionResult(long_term_memory='Clicked button')], # Original succeeded
state=BrowserStateHistory(
url=test_url,
title='Test Page',
tabs=[],
interacted_element=[
DOMInteractedElement(
node_id=1,
backend_node_id=9999,
frame_id=None,
node_type=NodeType.ELEMENT_NODE,
node_value='',
node_name='BUTTON',
attributes={'aria-label': 'non-existent-button', 'id': 'fake-id'},
x_path='html/body/button[999]',
element_hash=123456789,
stable_hash=987654321,
bounds=DOMRect(x=0, y=0, width=100, height=50),
ax_name='non-existent',
)
],
),
metadata=StepMetadata(
step_start_time=0,
step_end_time=1,
step_number=2,
step_interval=0.1,
),
)
history = AgentHistoryList(history=[navigate_step, failing_step])
# Run rerun with skip_failures=False - should fail and raise RuntimeError
# but the try/finally should ensure cleanup happens
try:
await agent.rerun_history(
history,
skip_failures=False,
max_retries=1, # Fail quickly
)
assert False, 'Expected RuntimeError to be raised'
except RuntimeError as e:
# Expected - the step should fail on element matching
assert 'failed after 1 attempts' in str(e)
# If we get here without hanging, the cleanup worked
# The browser session should be closed by the finally block in rerun_history
# We can verify by checking that calling close again doesn't cause issues
# (close() is idempotent - calling it multiple times should be safe)
await agent.close() # Should not hang or error since already closed
async def test_rerun_records_errors_when_skip_failures_true(httpserver):
"""Test that rerun_history records errors in results even when skip_failures=True.
This ensures the AI summary correctly counts failures. Previously, when skip_failures=True
and a step failed after all retries, no error result was appended, causing the AI summary
to incorrectly report success=True even with multiple failures.
"""
from browser_use.dom.views import DOMInteractedElement
# Set up a test page with a button that has DIFFERENT attributes than our historical element
# This ensures element matching will fail (the historical element won't be found)
test_html = """<!DOCTYPE html>
<html>
<body>
<button id="real-button" aria-label="real-button">Click me</button>
</body>
</html>"""
httpserver.expect_request('/test').respond_with_data(test_html, content_type='text/html')
test_url = httpserver.url_for('/test')
# Create a mock LLM for summary that returns partial success
summary_action = RerunSummaryAction(
summary='Some steps failed',
success=False,
completion_status='partial',
)
async def custom_ainvoke(*args, **kwargs):
output_format = args[1] if len(args) > 1 else kwargs.get('output_format')
if output_format is RerunSummaryAction:
from browser_use.llm.views import ChatInvokeCompletion
return ChatInvokeCompletion(completion=summary_action, usage=None)
raise ValueError('Unexpected output_format')
mock_summary_llm = AsyncMock()
mock_summary_llm.ainvoke.side_effect = custom_ainvoke
llm = create_mock_llm(actions=None)
agent = Agent(task='Test task', llm=llm)
# Create history with:
# 1. First step navigates to test page (will succeed)
# 2. Second step tries to click a non-existent element (will fail on element matching)
AgentOutput = agent.AgentOutput
# Step 1: Navigate to test page
navigate_step = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Navigate to test page',
next_goal=None,
action=[{'navigate': {'url': test_url}}], # type: ignore[arg-type]
),
result=[ActionResult(long_term_memory='Navigated')],
state=BrowserStateHistory(
url=test_url,
title='Test Page',
tabs=[],
interacted_element=[None],
),
metadata=StepMetadata(
step_start_time=0,
step_end_time=1,
step_number=1,
step_interval=0.1,
),
)
# Step 2: Click on element that won't exist on current page (different hash/attributes)
failing_step = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Trying to click non-existent button',
next_goal=None,
action=[{'click': {'index': 100}}], # type: ignore[arg-type] # Original index doesn't matter, matching will fail
),
result=[ActionResult(long_term_memory='Clicked button')], # Original succeeded
state=BrowserStateHistory(
url=test_url,
title='Test Page',
tabs=[],
interacted_element=[
DOMInteractedElement(
node_id=1,
backend_node_id=9999,
frame_id=None,
node_type=NodeType.ELEMENT_NODE,
node_value='',
node_name='BUTTON',
# This element has completely different identifiers than the real button
attributes={'aria-label': 'non-existent-button', 'id': 'fake-id'},
x_path='html/body/button[999]', # XPath that doesn't exist
element_hash=123456789, # Hash that won't match
stable_hash=987654321, # Stable hash that won't match
bounds=DOMRect(x=0, y=0, width=100, height=50),
ax_name='non-existent',
)
],
),
metadata=StepMetadata(
step_start_time=0,
step_end_time=1,
step_number=2,
step_interval=0.1,
),
)
history = AgentHistoryList(history=[navigate_step, failing_step])
try:
# Run rerun with skip_failures=True - should NOT raise but should record the error
results = await agent.rerun_history(
history,
skip_failures=True,
max_retries=1, # Fail quickly
summary_llm=mock_summary_llm,
)
# Should have 3 results: navigation success + error from failed step + AI summary
assert len(results) == 3
# First result should be successful navigation
nav_result = results[0]
assert nav_result.error is None
# Second result should be the error (element matching failed)
error_result = results[1]
assert error_result.error is not None
assert 'failed after 1 attempts' in error_result.error
# Third result should be the AI summary
summary_result = results[2]
assert summary_result.is_done is True
finally:
await agent.close()
async def test_rerun_skips_redundant_retry_steps(httpserver):
"""Test that rerun_history skips redundant retry steps.
This handles cases where the original run needed to click the same element multiple
times due to slow page response, but during replay the first click already succeeded.
When consecutive steps target the same element with the same action, the second step
should be skipped as a redundant retry.
"""
from browser_use.dom.views import DOMInteractedElement
# Set up a test page with a button
test_html = """<!DOCTYPE html>
<html>
<body>
<button id="login-btn" aria-label="Log In">Log In</button>
</body>
</html>"""
httpserver.expect_request('/test').respond_with_data(test_html, content_type='text/html')
test_url = httpserver.url_for('/test')
# Create a mock LLM for summary
summary_action = RerunSummaryAction(
summary='Rerun completed with skipped redundant step',
success=True,
completion_status='complete',
)
async def custom_ainvoke(*args, **kwargs):
output_format = args[1] if len(args) > 1 else kwargs.get('output_format')
if output_format is RerunSummaryAction:
from browser_use.llm.views import ChatInvokeCompletion
return ChatInvokeCompletion(completion=summary_action, usage=None)
raise ValueError('Unexpected output_format')
mock_summary_llm = AsyncMock()
mock_summary_llm.ainvoke.side_effect = custom_ainvoke
llm = create_mock_llm(actions=None)
agent = Agent(task='Test task', llm=llm)
AgentOutput = agent.AgentOutput
# Create an interacted element that matches the button on the page
login_button_element = DOMInteractedElement(
node_id=1,
backend_node_id=1,
frame_id=None,
node_type=NodeType.ELEMENT_NODE,
node_value='',
node_name='BUTTON',
attributes={'aria-label': 'Log In', 'id': 'login-btn'},
x_path='html/body/button',
element_hash=12345, # Same hash for both steps (same element)
stable_hash=12345,
bounds=DOMRect(x=0, y=0, width=100, height=50),
)
# Step 1: Navigate to test page
navigate_step = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Navigate to test page',
next_goal=None,
action=[{'navigate': {'url': test_url}}], # type: ignore[arg-type]
),
result=[ActionResult(long_term_memory='Navigated')],
state=BrowserStateHistory(
url=test_url,
title='Test Page',
tabs=[],
interacted_element=[None],
),
metadata=StepMetadata(
step_start_time=0,
step_end_time=1,
step_number=1,
step_interval=0.1,
),
)
# Step 2: Click login button (first click)
click_step_1 = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Click login button',
next_goal=None,
action=[{'click': {'index': 1}}], # type: ignore[arg-type]
),
result=[ActionResult(long_term_memory='Clicked login button')],
state=BrowserStateHistory(
url=test_url,
title='Test Page',
tabs=[],
interacted_element=[login_button_element],
),
metadata=StepMetadata(
step_start_time=1,
step_end_time=2,
step_number=2,
step_interval=0.1,
),
)
# Step 3: Click login button AGAIN (redundant retry - same element, same action)
click_step_2 = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Page did not change, clicking login button again',
next_goal=None,
action=[{'click': {'index': 1}}], # type: ignore[arg-type] # Same action type
),
result=[ActionResult(long_term_memory='Clicked login button')],
state=BrowserStateHistory(
url=test_url,
title='Test Page',
tabs=[],
interacted_element=[login_button_element], # Same element!
),
metadata=StepMetadata(
step_start_time=2,
step_end_time=3,
step_number=3,
step_interval=0.1,
),
)
history = AgentHistoryList(history=[navigate_step, click_step_1, click_step_2])
try:
results = await agent.rerun_history(
history,
skip_failures=True,
summary_llm=mock_summary_llm,
)
# Should have 4 results: navigate + click + skipped redundant + AI summary
assert len(results) == 4
# First result: navigation succeeded
nav_result = results[0]
assert nav_result.error is None
# Second result: first click succeeded
click_result = results[1]
assert click_result.error is None
# Third result: redundant retry was SKIPPED (not an error)
skipped_result = results[2]
assert skipped_result.error is None # Not an error - intentionally skipped
assert 'Skipped - redundant retry' in (skipped_result.extracted_content or '')
# Fourth result: AI summary
summary_result = results[3]
assert summary_result.is_done is True
finally:
await agent.close()
async def test_is_redundant_retry_step_detection():
"""Test the _is_redundant_retry_step method directly."""
from browser_use.dom.views import DOMInteractedElement
llm = create_mock_llm(actions=None)
agent = Agent(task='Test task', llm=llm)
AgentOutput = agent.AgentOutput
# Create an interacted element
button_element = DOMInteractedElement(
node_id=1,
backend_node_id=1,
frame_id=None,
node_type=NodeType.ELEMENT_NODE,
node_value='',
node_name='BUTTON',
attributes={'aria-label': 'Submit'},
x_path='html/body/button',
element_hash=12345,
stable_hash=12345,
bounds=DOMRect(x=0, y=0, width=100, height=50),
)
different_element = DOMInteractedElement(
node_id=2,
backend_node_id=2,
frame_id=None,
node_type=NodeType.ELEMENT_NODE,
node_value='',
node_name='INPUT',
attributes={'name': 'email'},
x_path='html/body/input',
element_hash=99999, # Different hash
stable_hash=99999,
bounds=DOMRect(x=0, y=0, width=200, height=30),
)
# Step with click on button
click_step = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Click button',
next_goal=None,
action=[{'click': {'index': 1}}], # type: ignore[arg-type]
),
result=[ActionResult(long_term_memory='Clicked')],
state=BrowserStateHistory(
url='http://test.com',
title='Test',
tabs=[],
interacted_element=[button_element],
),
metadata=StepMetadata(step_start_time=0, step_end_time=1, step_number=1, step_interval=0.1),
)
# Same click on same button (redundant retry)
retry_click_step = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Click button again',
next_goal=None,
action=[{'click': {'index': 1}}], # type: ignore[arg-type]
),
result=[ActionResult(long_term_memory='Clicked')],
state=BrowserStateHistory(
url='http://test.com',
title='Test',
tabs=[],
interacted_element=[button_element], # Same element
),
metadata=StepMetadata(step_start_time=1, step_end_time=2, step_number=2, step_interval=0.1),
)
# Different action type on same element (not redundant)
input_step = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Type in button (weird but valid)',
next_goal=None,
action=[{'input': {'index': 1, 'text': 'hello'}}], # type: ignore[arg-type] # Different action type
),
result=[ActionResult(long_term_memory='Typed')],
state=BrowserStateHistory(
url='http://test.com',
title='Test',
tabs=[],
interacted_element=[button_element],
),
metadata=StepMetadata(step_start_time=2, step_end_time=3, step_number=3, step_interval=0.1),
)
# Same action type but different element (not redundant)
different_element_step = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Click different element',
next_goal=None,
action=[{'click': {'index': 2}}], # type: ignore[arg-type]
),
result=[ActionResult(long_term_memory='Clicked')],
state=BrowserStateHistory(
url='http://test.com',
title='Test',
tabs=[],
interacted_element=[different_element], # Different element
),
metadata=StepMetadata(step_start_time=3, step_end_time=4, step_number=4, step_interval=0.1),
)
try:
# Test 1: Same element, same action, previous succeeded -> redundant
assert agent._is_redundant_retry_step(retry_click_step, click_step, True) is True
# Test 2: Same element, same action, previous FAILED -> NOT redundant
assert agent._is_redundant_retry_step(retry_click_step, click_step, False) is False
# Test 3: Same element, different action type -> NOT redundant
assert agent._is_redundant_retry_step(input_step, click_step, True) is False
# Test 4: Different element, same action type -> NOT redundant
assert agent._is_redundant_retry_step(different_element_step, click_step, True) is False
# Test 5: No previous step -> NOT redundant
assert agent._is_redundant_retry_step(click_step, None, True) is False
finally:
await agent.close()
async def test_count_expected_elements_from_history():
"""Test that _count_expected_elements_from_history correctly estimates element count based on action indices."""
llm = create_mock_llm(actions=None)
agent = Agent(task='Test task', llm=llm)
AgentOutput = agent.AgentOutput
# Test 1: Action with low index (5) -> needs at least 6 elements (index + 1)
step_low_index = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Test',
next_goal=None,
action=[{'input': {'index': 5, 'text': 'test'}}], # type: ignore[arg-type]
),
result=[ActionResult(long_term_memory='Done')],
state=BrowserStateHistory(
url='http://test.com',
title='Test',
tabs=[],
interacted_element=[None],
),
metadata=StepMetadata(step_start_time=0, step_end_time=1, step_number=1, step_interval=0.1),
)
# Test 2: Action with higher index (25) -> needs at least 26 elements
step_high_index = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Test',
next_goal=None,
action=[{'click': {'index': 25}}], # type: ignore[arg-type]
),
result=[ActionResult(long_term_memory='Done')],
state=BrowserStateHistory(
url='http://test.com',
title='Test',
tabs=[],
interacted_element=[None],
),
metadata=StepMetadata(step_start_time=0, step_end_time=1, step_number=2, step_interval=0.1),
)
# Test 3: Action with very high index (100) -> capped at 50
step_very_high_index = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Test',
next_goal=None,
action=[{'click': {'index': 100}}], # type: ignore[arg-type]
),
result=[ActionResult(long_term_memory='Done')],
state=BrowserStateHistory(
url='http://test.com',
title='Test',
tabs=[],
interacted_element=[None],
),
metadata=StepMetadata(step_start_time=0, step_end_time=1, step_number=3, step_interval=0.1),
)
# Test 4: Navigate action (no index) -> returns 0
step_no_index = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Test',
next_goal=None,
action=[{'navigate': {'url': 'http://test.com'}}], # type: ignore[arg-type]
),
result=[ActionResult(long_term_memory='Done')],
state=BrowserStateHistory(
url='http://test.com',
title='Test',
tabs=[],
interacted_element=[None],
),
metadata=StepMetadata(step_start_time=0, step_end_time=1, step_number=4, step_interval=0.1),
)
# Test 5: Multiple actions - uses max index
step_multiple_actions = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Test',
next_goal=None,
action=[
{'click': {'index': 3}}, # type: ignore[arg-type]
{'input': {'index': 10, 'text': 'test'}}, # type: ignore[arg-type]
],
),
result=[ActionResult(long_term_memory='Done'), ActionResult(long_term_memory='Done')],
state=BrowserStateHistory(
url='http://test.com',
title='Test',
tabs=[],
interacted_element=[None, None],
),
metadata=StepMetadata(step_start_time=0, step_end_time=1, step_number=5, step_interval=0.1),
)
# Test 6: Action with index 0 (edge case) -> needs at least 1 element
# Using input action because it allows index 0 (click requires ge=1)
step_index_zero = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Test',
next_goal=None,
action=[{'input': {'index': 0, 'text': 'test'}}], # type: ignore[arg-type]
),
result=[ActionResult(long_term_memory='Done')],
state=BrowserStateHistory(
url='http://test.com',
title='Test',
tabs=[],
interacted_element=[None],
),
metadata=StepMetadata(step_start_time=0, step_end_time=1, step_number=6, step_interval=0.1),
)
try:
# Test 1: Action index 5 -> needs 6 elements (index + 1)
assert agent._count_expected_elements_from_history(step_low_index) == 6
# Test 2: Action index 25 -> needs 26 elements
assert agent._count_expected_elements_from_history(step_high_index) == 26
# Test 3: Action index 100 -> capped at 50
assert agent._count_expected_elements_from_history(step_very_high_index) == 50
# Test 4: Navigate has no index -> returns 0
assert agent._count_expected_elements_from_history(step_no_index) == 0
# Test 5: Multiple actions -> uses max index (10) + 1 = 11
assert agent._count_expected_elements_from_history(step_multiple_actions) == 11
# Test 6: Action index 0 (edge case) -> needs 1 element (0 + 1)
assert agent._count_expected_elements_from_history(step_index_zero) == 1
finally:
await agent.close()
async def test_wait_for_minimum_elements(httpserver):
"""Test that _wait_for_minimum_elements waits for elements to appear."""
# Set up a simple test page with a button
test_html = """<!DOCTYPE html>
<html>
<body>
<button id="btn1">Button 1</button>
<button id="btn2">Button 2</button>
<input type="text" id="input1" />
</body>
</html>"""
httpserver.expect_request('/test').respond_with_data(test_html, content_type='text/html')
test_url = httpserver.url_for('/test')
llm = create_mock_llm(actions=None)
agent = Agent(task='Test task', llm=llm)
try:
await agent.browser_session.start()
# Navigate to the test page first
from browser_use.browser.events import NavigateToUrlEvent
await agent.browser_session.event_bus.dispatch(NavigateToUrlEvent(url=test_url, new_tab=False))
# Wait a bit for navigation
import asyncio
await asyncio.sleep(1.0)
# Test 1: Wait for 1 element (should succeed quickly)
state = await agent._wait_for_minimum_elements(min_elements=1, timeout=5.0, poll_interval=0.5)
assert state is not None
assert state.dom_state.selector_map is not None
assert len(state.dom_state.selector_map) >= 1
# Test 2: Wait for reasonable number of elements (should succeed)
state = await agent._wait_for_minimum_elements(min_elements=2, timeout=5.0, poll_interval=0.5)
assert state is not None
assert len(state.dom_state.selector_map) >= 2
# Test 3: Wait for too many elements (should timeout but still return state)
state = await agent._wait_for_minimum_elements(min_elements=100, timeout=2.0, poll_interval=0.5)
assert state is not None # Should still return a state even on timeout
finally:
await agent.close()
async def test_rerun_waits_for_elements_before_matching(httpserver):
"""Test that rerun_history waits for elements before attempting element matching.
This test verifies that for actions needing element matching (like click),
the rerun logic waits for the page to have enough elements before proceeding.
"""
from browser_use.dom.views import DOMInteractedElement
# Set up a test page with elements
test_html = """<!DOCTYPE html>
<html>
<body>
<button id="test-btn" aria-label="Test Button">Click me</button>
</body>
</html>"""
httpserver.expect_request('/test').respond_with_data(test_html, content_type='text/html')
test_url = httpserver.url_for('/test')
# Create a mock LLM for summary
summary_action = RerunSummaryAction(
summary='Rerun completed',
success=True,
completion_status='complete',
)
async def custom_ainvoke(*args, **kwargs):
output_format = args[1] if len(args) > 1 else kwargs.get('output_format')
if output_format is RerunSummaryAction:
from browser_use.llm.views import ChatInvokeCompletion
return ChatInvokeCompletion(completion=summary_action, usage=None)
raise ValueError('Unexpected output_format')
mock_summary_llm = AsyncMock()
mock_summary_llm.ainvoke.side_effect = custom_ainvoke
llm = create_mock_llm(actions=None)
agent = Agent(task='Test task', llm=llm)
AgentOutput = agent.AgentOutput
# Create an element that matches the page
button_element = DOMInteractedElement(
node_id=1,
backend_node_id=5, # This will trigger waiting for at least 5 elements
frame_id=None,
node_type=NodeType.ELEMENT_NODE,
node_value='',
node_name='BUTTON',
attributes={'aria-label': 'Test Button', 'id': 'test-btn'},
x_path='html/body/button',
element_hash=12345,
stable_hash=12345,
bounds=DOMRect(x=0, y=0, width=100, height=50),
)
# Step 1: Navigate to test page
navigate_step = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Navigate to test page',
next_goal=None,
action=[{'navigate': {'url': test_url}}], # type: ignore[arg-type]
),
result=[ActionResult(long_term_memory='Navigated')],
state=BrowserStateHistory(
url=test_url,
title='Test Page',
tabs=[],
interacted_element=[None],
),
metadata=StepMetadata(
step_start_time=0,
step_end_time=1,
step_number=1,
step_interval=0.1,
),
)
# Step 2: Click button (needs element matching, should wait for elements)
click_step = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Click button',
next_goal=None,
action=[{'click': {'index': 5}}], # type: ignore[arg-type]
),
result=[ActionResult(long_term_memory='Clicked')],
state=BrowserStateHistory(
url=test_url,
title='Test Page',
tabs=[],
interacted_element=[button_element],
),
metadata=StepMetadata(
step_start_time=1,
step_end_time=2,
step_number=2,
step_interval=0.1,
),
)
history = AgentHistoryList(history=[navigate_step, click_step])
try:
# Run rerun with wait_for_elements=True - should wait for elements before trying to match
results = await agent.rerun_history(
history,
skip_failures=True,
max_retries=1,
summary_llm=mock_summary_llm,
wait_for_elements=True, # Enable element waiting
)
# Should have results: navigate + click (or error if element not found) + summary
assert len(results) >= 2
# First result should be navigation success
nav_result = results[0]
assert nav_result.error is None
finally:
await agent.close()
async def test_rerun_uses_exponential_backoff_retry_delays(httpserver):
"""Test that rerun uses exponential backoff delays between retries (5s, 10s, 20s, capped at 30s)."""
import time
from browser_use.dom.views import DOMInteractedElement
# Set up a test page with a button that won't match
test_html = """<!DOCTYPE html>
<html>
<body>
<button id="real-btn">Real Button</button>
</body>
</html>"""
httpserver.expect_request('/test').respond_with_data(test_html, content_type='text/html')
test_url = httpserver.url_for('/test')
llm = create_mock_llm(actions=None)
agent = Agent(task='Test task', llm=llm)
AgentOutput = agent.AgentOutput
# Create an element that WON'T match (different identifiers)
non_matching_element = DOMInteractedElement(
node_id=1,
backend_node_id=1, # Low to avoid long element waiting
frame_id=None,
node_type=NodeType.ELEMENT_NODE,
node_value='',
node_name='BUTTON',
attributes={'aria-label': 'Non-existent', 'id': 'fake-id'},
x_path='html/body/button[999]',
element_hash=99999,
stable_hash=99999,
bounds=DOMRect(x=0, y=0, width=100, height=50),
)
# Step 1: Navigate
navigate_step = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Navigate',
next_goal=None,
action=[{'navigate': {'url': test_url}}], # type: ignore[arg-type]
),
result=[ActionResult(long_term_memory='Navigated')],
state=BrowserStateHistory(
url=test_url,
title='Test',
tabs=[],
interacted_element=[None],
),
metadata=StepMetadata(step_start_time=0, step_end_time=1, step_number=1, step_interval=0.1),
)
# Step 2: Click non-matching element (will fail and retry)
failing_step = AgentHistory(
model_output=AgentOutput(
evaluation_previous_goal=None,
memory='Click',
next_goal=None,
action=[{'click': {'index': 1}}], # type: ignore[arg-type]
),
result=[ActionResult(long_term_memory='Clicked')],
state=BrowserStateHistory(
url=test_url,
title='Test',
tabs=[],
interacted_element=[non_matching_element],
),
metadata=StepMetadata(step_start_time=1, step_end_time=2, step_number=2, step_interval=0.1),
)
history = AgentHistoryList(history=[navigate_step, failing_step])
try:
start_time = time.time()
# Run rerun with 2 retries - should use exponential backoff (5s for first retry)
# Attempt 1 fails -> wait 5s -> Attempt 2 fails -> done
try:
await agent.rerun_history(
history,
skip_failures=False,
max_retries=2, # Will fail twice with 5s delay between (exponential: 5s * 2^0 = 5s)
)
except RuntimeError:
pass # Expected to fail
elapsed = time.time() - start_time
# Should have taken at least 5 seconds (the first retry delay with exponential backoff)
# Exponential backoff formula: base_delay * 2^(retry_count-1) = 5 * 2^0 = 5s
assert elapsed >= 4.5, f'Expected at least 4.5s elapsed (5s exponential backoff), got {elapsed:.1f}s'
finally:
await agent.close()
async def test_exponential_backoff_calculation():
"""Test that exponential backoff correctly calculates delays: 5s, 10s, 20s, capped at 30s."""
# Verify the exponential backoff formula: min(5 * 2^(retry-1), 30)
base_delay = 5.0
max_delay = 30.0
# Retry 1: 5 * 2^0 = 5s
assert min(base_delay * (2**0), max_delay) == 5.0
# Retry 2: 5 * 2^1 = 10s
assert min(base_delay * (2**1), max_delay) == 10.0
# Retry 3: 5 * 2^2 = 20s
assert min(base_delay * (2**2), max_delay) == 20.0
# Retry 4: 5 * 2^3 = 40s -> capped at 30s
assert min(base_delay * (2**3), max_delay) == 30.0
# Retry 5: 5 * 2^4 = 80s -> capped at 30s
assert min(base_delay * (2**4), max_delay) == 30.0
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/test_rerun_ai_summary.py",
"license": "MIT License",
"lines": 1097,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:tests/ci/test_screenshot_exclusion.py | """Test that screenshot action is excluded when use_vision != 'auto'."""
import pytest
from browser_use.agent.service import Agent
from browser_use.browser.profile import BrowserProfile
from browser_use.browser.session import BrowserSession
from browser_use.tools.service import Tools
from tests.ci.conftest import create_mock_llm
@pytest.fixture(scope='function')
async def browser_session():
session = BrowserSession(browser_profile=BrowserProfile(headless=True))
await session.start()
yield session
await session.kill()
def test_screenshot_excluded_with_use_vision_false():
"""Test that screenshot action is excluded when use_vision=False."""
mock_llm = create_mock_llm(actions=['{"action": [{"done": {"text": "test", "success": true}}]}'])
agent = Agent(
task='test',
llm=mock_llm,
use_vision=False,
)
# Verify screenshot is not in the registry
assert 'screenshot' not in agent.tools.registry.registry.actions, 'Screenshot should be excluded when use_vision=False'
def test_screenshot_excluded_with_use_vision_true():
"""Test that screenshot action is excluded when use_vision=True."""
mock_llm = create_mock_llm(actions=['{"action": [{"done": {"text": "test", "success": true}}]}'])
agent = Agent(
task='test',
llm=mock_llm,
use_vision=True,
)
# Verify screenshot is not in the registry
assert 'screenshot' not in agent.tools.registry.registry.actions, 'Screenshot should be excluded when use_vision=True'
def test_screenshot_included_with_use_vision_auto():
"""Test that screenshot action is included when use_vision='auto'."""
mock_llm = create_mock_llm(actions=['{"action": [{"done": {"text": "test", "success": true}}]}'])
agent = Agent(
task='test',
llm=mock_llm,
use_vision='auto',
)
# Verify screenshot IS in the registry
assert 'screenshot' in agent.tools.registry.registry.actions, 'Screenshot should be included when use_vision="auto"'
def test_screenshot_excluded_with_custom_tools_and_use_vision_false():
"""Test that screenshot action is excluded even when user passes custom tools and use_vision=False.
This is the critical test case that verifies the fix:
When users pass their own Tools instance with screenshot included,
the Agent should still enforce the exclusion if use_vision != 'auto'.
"""
mock_llm = create_mock_llm(actions=['{"action": [{"done": {"text": "test", "success": true}}]}'])
# Create custom tools that includes screenshot action
custom_tools = Tools()
assert 'screenshot' in custom_tools.registry.registry.actions, 'Custom tools should have screenshot by default'
# Pass custom tools to agent with use_vision=False
agent = Agent(
task='test',
llm=mock_llm,
tools=custom_tools,
use_vision=False,
)
# Verify screenshot is excluded even though user passed custom tools
assert 'screenshot' not in agent.tools.registry.registry.actions, (
'Screenshot should be excluded when use_vision=False, even with custom tools'
)
def test_screenshot_excluded_with_custom_tools_and_use_vision_true():
"""Test that screenshot action is excluded even when user passes custom tools and use_vision=True.
This is another critical test case:
When users pass their own Tools instance with screenshot included,
the Agent should still enforce the exclusion if use_vision != 'auto'.
"""
mock_llm = create_mock_llm(actions=['{"action": [{"done": {"text": "test", "success": true}}]}'])
# Create custom tools - by default Tools() includes screenshot
# (unless exclude_actions is passed)
custom_tools = Tools()
# Note: We check if screenshot exists in the default set, but it might not
# exist if use_vision defaults have changed. The key is that after passing
# to Agent with use_vision=True, it should be excluded.
has_screenshot_before = 'screenshot' in custom_tools.registry.registry.actions
# Pass custom tools to agent with use_vision=True
agent = Agent(
task='test',
llm=mock_llm,
tools=custom_tools,
use_vision=True,
)
# Verify screenshot is excluded even though user passed custom tools
# The key test: screenshot should be excluded after Agent init
assert 'screenshot' not in agent.tools.registry.registry.actions, (
f'Screenshot should be excluded when use_vision=True, even with custom tools (had screenshot before: {has_screenshot_before})'
)
def test_screenshot_included_with_custom_tools_and_use_vision_auto():
"""Test that screenshot action is kept when user passes custom tools and use_vision='auto'."""
mock_llm = create_mock_llm(actions=['{"action": [{"done": {"text": "test", "success": true}}]}'])
# Create custom tools that includes screenshot action
custom_tools = Tools()
assert 'screenshot' in custom_tools.registry.registry.actions, 'Custom tools should have screenshot by default'
# Pass custom tools to agent with use_vision='auto'
agent = Agent(
task='test',
llm=mock_llm,
tools=custom_tools,
use_vision='auto',
)
# Verify screenshot is kept when use_vision='auto'
assert 'screenshot' in agent.tools.registry.registry.actions, (
'Screenshot should be included when use_vision="auto", even with custom tools'
)
def test_tools_exclude_action_method():
"""Test the Tools.exclude_action() method directly."""
tools = Tools()
# Verify screenshot is included initially
assert 'screenshot' in tools.registry.registry.actions, 'Screenshot should be included by default'
# Exclude screenshot
tools.exclude_action('screenshot')
# Verify screenshot is excluded
assert 'screenshot' not in tools.registry.registry.actions, 'Screenshot should be excluded after calling exclude_action()'
assert 'screenshot' in tools.registry.exclude_actions, 'Screenshot should be in exclude_actions list'
def test_exclude_action_prevents_re_registration():
"""Test that excluded actions cannot be re-registered."""
tools = Tools()
# Exclude screenshot
tools.exclude_action('screenshot')
assert 'screenshot' not in tools.registry.registry.actions
# Try to re-register screenshot (simulating what happens in __init__)
# The decorator should skip registration since it's in exclude_actions
@tools.registry.action('Test screenshot action')
async def screenshot():
return 'test'
# Verify it was not re-registered
assert 'screenshot' not in tools.registry.registry.actions, 'Excluded action should not be re-registered'
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/test_screenshot_exclusion.py",
"license": "MIT License",
"lines": 130,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:tests/ci/test_variable_detection.py | """Unit tests for variable detection in agent history"""
from browser_use.agent.variable_detector import (
_detect_from_attributes,
_detect_from_value_pattern,
_detect_variable_type,
_ensure_unique_name,
detect_variables_in_history,
)
from browser_use.agent.views import DetectedVariable
from browser_use.dom.views import DOMInteractedElement, NodeType
def create_test_element(attributes: dict[str, str] | None = None) -> DOMInteractedElement:
"""Helper to create a DOMInteractedElement for testing"""
return DOMInteractedElement(
node_id=1,
backend_node_id=1,
frame_id='frame1',
node_type=NodeType.ELEMENT_NODE,
node_value='',
node_name='input',
attributes=attributes or {},
bounds=None,
x_path='//*[@id="test"]',
element_hash=12345,
)
def create_mock_history(actions_with_elements: list[tuple[dict, DOMInteractedElement | None]]):
"""Helper to create mock history for testing"""
from types import SimpleNamespace
history_items = []
for action_dict, element in actions_with_elements:
mock_action = SimpleNamespace(**action_dict)
mock_output = SimpleNamespace(action=[mock_action])
mock_state = SimpleNamespace(interacted_element=[element] if element else None)
mock_history_item = SimpleNamespace(model_output=mock_output, state=mock_state)
history_items.append(mock_history_item)
return SimpleNamespace(history=history_items)
def test_detect_email_from_attributes():
"""Test email detection via type='email' attribute"""
attributes = {'type': 'email', 'id': 'email-input'}
result = _detect_from_attributes(attributes)
assert result is not None
var_name, var_format = result
assert var_name == 'email'
assert var_format == 'email'
def test_detect_email_from_pattern():
"""Test email detection via pattern matching"""
result = _detect_from_value_pattern('test@example.com')
assert result is not None
var_name, var_format = result
assert var_name == 'email'
assert var_format == 'email'
def test_detect_phone_from_attributes():
"""Test phone detection via type='tel' attribute"""
attributes = {'type': 'tel', 'name': 'phone'}
result = _detect_from_attributes(attributes)
assert result is not None
var_name, var_format = result
assert var_name == 'phone'
assert var_format == 'phone'
def test_detect_phone_from_pattern():
"""Test phone detection via pattern matching"""
result = _detect_from_value_pattern('+1 (555) 123-4567')
assert result is not None
var_name, var_format = result
assert var_name == 'phone'
assert var_format == 'phone'
def test_detect_date_from_attributes():
"""Test date detection via type='date' attribute"""
attributes = {'type': 'date', 'id': 'dob'}
result = _detect_from_attributes(attributes)
assert result is not None
var_name, var_format = result
assert var_name == 'date'
assert var_format == 'date'
def test_detect_date_from_pattern():
"""Test date detection via YYYY-MM-DD pattern"""
result = _detect_from_value_pattern('1990-01-01')
assert result is not None
var_name, var_format = result
assert var_name == 'date'
assert var_format == 'date'
def test_detect_first_name_from_attributes():
"""Test first name detection from element attributes"""
attributes = {'name': 'first_name', 'placeholder': 'Enter your first name'}
result = _detect_from_attributes(attributes)
assert result is not None
var_name, var_format = result
assert var_name == 'first_name'
assert var_format is None
def test_detect_first_name_from_pattern():
"""Test first name detection from pattern (single capitalized word)"""
result = _detect_from_value_pattern('John')
assert result is not None
var_name, var_format = result
assert var_name == 'first_name'
assert var_format is None
def test_detect_full_name_from_pattern():
"""Test full name detection from pattern (two capitalized words)"""
result = _detect_from_value_pattern('John Doe')
assert result is not None
var_name, var_format = result
assert var_name == 'full_name'
assert var_format is None
def test_detect_address_from_attributes():
"""Test address detection from element attributes"""
attributes = {'name': 'street_address', 'id': 'address-input'}
result = _detect_from_attributes(attributes)
assert result is not None
var_name, var_format = result
assert var_name == 'address'
assert var_format is None
def test_detect_billing_address_from_attributes():
"""Test billing address detection from element attributes"""
attributes = {'name': 'billing_address', 'placeholder': 'Billing street address'}
result = _detect_from_attributes(attributes)
assert result is not None
var_name, var_format = result
assert var_name == 'billing_address'
assert var_format is None
def test_detect_comment_from_attributes():
"""Test comment detection from element attributes"""
attributes = {'name': 'comment', 'placeholder': 'Enter your comment'}
result = _detect_from_attributes(attributes)
assert result is not None
var_name, var_format = result
assert var_name == 'comment'
assert var_format is None
def test_detect_city_from_attributes():
"""Test city detection from element attributes"""
attributes = {'name': 'city', 'id': 'city-input'}
result = _detect_from_attributes(attributes)
assert result is not None
var_name, var_format = result
assert var_name == 'city'
assert var_format is None
def test_detect_state_from_attributes():
"""Test state detection from element attributes"""
attributes = {'name': 'state', 'aria-label': 'State or Province'}
result = _detect_from_attributes(attributes)
assert result is not None
var_name, var_format = result
assert var_name == 'state'
assert var_format is None
def test_detect_country_from_attributes():
"""Test country detection from element attributes"""
attributes = {'name': 'country', 'id': 'country-select'}
result = _detect_from_attributes(attributes)
assert result is not None
var_name, var_format = result
assert var_name == 'country'
assert var_format is None
def test_detect_zip_code_from_attributes():
"""Test zip code detection from element attributes"""
attributes = {'name': 'zip_code', 'placeholder': 'Zip or postal code'}
result = _detect_from_attributes(attributes)
assert result is not None
var_name, var_format = result
assert var_name == 'zip_code'
assert var_format == 'postal_code'
def test_detect_company_from_attributes():
"""Test company detection from element attributes"""
attributes = {'name': 'company', 'id': 'company-input'}
result = _detect_from_attributes(attributes)
assert result is not None
var_name, var_format = result
assert var_name == 'company'
assert var_format is None
def test_detect_number_from_pattern():
"""Test number detection from pattern (pure digits)"""
result = _detect_from_value_pattern('12345')
assert result is not None
var_name, var_format = result
assert var_name == 'number'
assert var_format == 'number'
def test_no_detection_for_random_text():
"""Test that random text is not detected as a variable"""
result = _detect_from_value_pattern('some random text that is not a variable')
assert result is None
def test_no_detection_for_short_text():
"""Test that very short text is not detected"""
result = _detect_from_value_pattern('a')
assert result is None
def test_element_attributes_take_priority_over_pattern():
"""Test that element attributes are checked before pattern matching"""
# A value that could match pattern (capitalized name)
value = 'Test'
# Element with explicit type="email"
element = create_test_element(attributes={'type': 'email', 'id': 'email-input'})
result = _detect_variable_type(value, element)
assert result is not None
var_name, var_format = result
# Should detect as email (from attributes), not first_name (from pattern)
assert var_name == 'email'
assert var_format == 'email'
def test_pattern_matching_used_when_no_element():
"""Test that pattern matching is used when element context is missing"""
value = 'test@example.com'
result = _detect_variable_type(value, element=None)
assert result is not None
var_name, var_format = result
assert var_name == 'email'
assert var_format == 'email'
def test_ensure_unique_name_no_conflict():
"""Test unique name generation with no conflicts"""
existing = {}
result = _ensure_unique_name('email', existing)
assert result == 'email'
def test_ensure_unique_name_with_conflict():
"""Test unique name generation with conflicts"""
existing = {
'email': DetectedVariable(name='email', original_value='test1@example.com'),
}
result = _ensure_unique_name('email', existing)
assert result == 'email_2'
def test_ensure_unique_name_with_multiple_conflicts():
"""Test unique name generation with multiple conflicts"""
existing = {
'email': DetectedVariable(name='email', original_value='test1@example.com'),
'email_2': DetectedVariable(name='email_2', original_value='test2@example.com'),
}
result = _ensure_unique_name('email', existing)
assert result == 'email_3'
def test_detect_variables_in_empty_history():
"""Test variable detection in empty history"""
from types import SimpleNamespace
history = SimpleNamespace(history=[])
result = detect_variables_in_history(history) # type: ignore[arg-type]
assert result == {}
def test_detect_variables_in_history_with_input_action():
"""Test variable detection in history with input action"""
# Use mock objects to avoid Pydantic validation issues
from types import SimpleNamespace
# Create mock history structure
element = create_test_element(attributes={'type': 'email', 'id': 'email-input'})
mock_action = SimpleNamespace(**{'input': {'index': 1, 'text': 'test@example.com'}})
mock_output = SimpleNamespace(action=[mock_action])
mock_state = SimpleNamespace(interacted_element=[element])
mock_history_item = SimpleNamespace(model_output=mock_output, state=mock_state)
mock_history = SimpleNamespace(history=[mock_history_item])
result = detect_variables_in_history(mock_history) # type: ignore[arg-type]
assert len(result) == 1
assert 'email' in result
assert result['email'].original_value == 'test@example.com'
assert result['email'].format == 'email'
def test_detect_variables_skips_duplicate_values():
"""Test that duplicate values are only detected once"""
# Create history with same value entered twice
element = create_test_element(attributes={'type': 'email'})
history = create_mock_history(
[
({'input': {'index': 1, 'text': 'test@example.com'}}, element),
({'input': {'index': 2, 'text': 'test@example.com'}}, element),
]
)
result = detect_variables_in_history(history) # type: ignore[arg-type]
# Should only detect one variable, not two
assert len(result) == 1
assert 'email' in result
def test_detect_variables_handles_missing_state():
"""Test that detection works when state is missing"""
from types import SimpleNamespace
# Create history with None state
mock_action = SimpleNamespace(**{'input': {'index': 1, 'text': 'test@example.com'}})
mock_output = SimpleNamespace(action=[mock_action])
mock_history_item = SimpleNamespace(model_output=mock_output, state=None)
history = SimpleNamespace(history=[mock_history_item])
result = detect_variables_in_history(history) # type: ignore[arg-type]
# Should still detect via pattern matching
assert len(result) == 1
assert 'email' in result
assert result['email'].original_value == 'test@example.com'
def test_detect_variables_handles_missing_interacted_element():
"""Test that detection works when interacted_element is missing"""
# Use None as element to test when interacted_element is None
history = create_mock_history(
[
({'input': {'index': 1, 'text': 'test@example.com'}}, None),
]
)
result = detect_variables_in_history(history) # type: ignore[arg-type]
# Should still detect via pattern matching
assert len(result) == 1
assert 'email' in result
def test_detect_variables_multiple_types():
"""Test detection of multiple variable types in one history"""
history = create_mock_history(
[
({'input': {'index': 1, 'text': 'test@example.com'}}, create_test_element(attributes={'type': 'email'})),
({'input': {'index': 2, 'text': 'John'}}, create_test_element(attributes={'name': 'first_name'})),
({'input': {'index': 3, 'text': '1990-01-01'}}, create_test_element(attributes={'type': 'date'})),
]
)
result = detect_variables_in_history(history) # type: ignore[arg-type]
assert len(result) == 3
assert 'email' in result
assert 'first_name' in result
assert 'date' in result
assert result['email'].original_value == 'test@example.com'
assert result['first_name'].original_value == 'John'
assert result['date'].original_value == '1990-01-01'
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/test_variable_detection.py",
"license": "MIT License",
"lines": 299,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:tests/ci/test_variable_substitution.py | """Unit tests for variable substitution in agent history"""
from types import SimpleNamespace
from browser_use.agent.service import Agent
from browser_use.dom.views import DOMInteractedElement, NodeType
def create_test_element(attributes: dict[str, str] | None = None) -> DOMInteractedElement:
"""Helper to create a DOMInteractedElement for testing"""
return DOMInteractedElement(
node_id=1,
backend_node_id=1,
frame_id='frame1',
node_type=NodeType.ELEMENT_NODE,
node_value='',
node_name='input',
attributes=attributes or {},
bounds=None,
x_path='//*[@id="test"]',
element_hash=12345,
)
def create_mock_history(actions_with_elements: list[tuple[dict, DOMInteractedElement | None]]):
"""Helper to create mock history for testing"""
history_items = []
for action_dict, element in actions_with_elements:
mock_action = SimpleNamespace(**action_dict)
mock_output = SimpleNamespace(action=[mock_action])
mock_state = SimpleNamespace(interacted_element=[element] if element else None)
mock_history_item = SimpleNamespace(model_output=mock_output, state=mock_state)
history_items.append(mock_history_item)
return SimpleNamespace(history=history_items)
def test_substitute_single_variable(mock_llm):
"""Test substitution of a single variable"""
agent = Agent(task='test', llm=mock_llm)
# Create mock history with email
element = create_test_element(attributes={'type': 'email'})
history = create_mock_history(
[
({'input': {'index': 1, 'text': 'old@example.com'}}, element),
]
)
# Substitute the email
modified_history = agent._substitute_variables_in_history(
history, # type: ignore[arg-type]
{'email': 'new@example.com'},
)
# Check that the value was substituted
action = modified_history.history[0].model_output.action[0] # type: ignore[attr-defined]
action_dict = vars(action)
assert action_dict['input']['text'] == 'new@example.com'
def test_substitute_multiple_variables(mock_llm):
"""Test substitution of multiple variables"""
agent = Agent(task='test', llm=mock_llm)
# Create mock history with email and name
history = create_mock_history(
[
({'input': {'index': 1, 'text': 'old@example.com'}}, create_test_element(attributes={'type': 'email'})),
({'input': {'index': 2, 'text': 'John'}}, create_test_element(attributes={'name': 'first_name'})),
({'input': {'index': 3, 'text': '1990-01-01'}}, create_test_element(attributes={'type': 'date'})),
]
)
# Substitute all variables
modified_history = agent._substitute_variables_in_history(
history, # type: ignore[arg-type]
{
'email': 'new@example.com',
'first_name': 'Jane',
'date': '1995-05-15',
},
)
# Check that all values were substituted
action1 = modified_history.history[0].model_output.action[0] # type: ignore[attr-defined]
action2 = modified_history.history[1].model_output.action[0] # type: ignore[attr-defined]
action3 = modified_history.history[2].model_output.action[0] # type: ignore[attr-defined]
assert vars(action1)['input']['text'] == 'new@example.com'
assert vars(action2)['input']['text'] == 'Jane'
assert vars(action3)['input']['text'] == '1995-05-15'
def test_substitute_partial_variables(mock_llm):
"""Test substitution of only some variables"""
agent = Agent(task='test', llm=mock_llm)
# Create mock history with email and name
history = create_mock_history(
[
({'input': {'index': 1, 'text': 'old@example.com'}}, create_test_element(attributes={'type': 'email'})),
({'input': {'index': 2, 'text': 'John'}}, create_test_element(attributes={'name': 'first_name'})),
]
)
# Substitute only email
modified_history = agent._substitute_variables_in_history(
history, # type: ignore[arg-type]
{'email': 'new@example.com'},
)
# Check that only email was substituted
action1 = modified_history.history[0].model_output.action[0] # type: ignore[attr-defined]
action2 = modified_history.history[1].model_output.action[0] # type: ignore[attr-defined]
assert vars(action1)['input']['text'] == 'new@example.com'
assert vars(action2)['input']['text'] == 'John' # Unchanged
def test_substitute_nonexistent_variable(mock_llm):
"""Test that substituting a nonexistent variable doesn't break things"""
agent = Agent(task='test', llm=mock_llm)
# Create mock history with email
element = create_test_element(attributes={'type': 'email'})
history = create_mock_history(
[
({'input': {'index': 1, 'text': 'old@example.com'}}, element),
]
)
# Try to substitute a variable that doesn't exist
modified_history = agent._substitute_variables_in_history(
history, # type: ignore[arg-type]
{'nonexistent_var': 'some_value'},
)
# Check that nothing changed
action = modified_history.history[0].model_output.action[0] # type: ignore[attr-defined]
action_dict = vars(action)
assert action_dict['input']['text'] == 'old@example.com'
def test_substitute_in_nested_dict(mock_llm):
"""Test substitution in nested dictionary structures"""
agent = Agent(task='test', llm=mock_llm)
# Create a more complex action with nested structure
complex_action = {
'search_google': {
'query': 'test@example.com',
'metadata': {'user': 'test@example.com'},
}
}
element = create_test_element(attributes={'type': 'email'})
history = create_mock_history([(complex_action, element)])
# Substitute the email
modified_history = agent._substitute_variables_in_history(
history, # type: ignore[arg-type]
{'email': 'new@example.com'},
)
# Check that values in nested structures were substituted
action = modified_history.history[0].model_output.action[0] # type: ignore[attr-defined]
action_dict = vars(action)
assert action_dict['search_google']['query'] == 'new@example.com'
assert action_dict['search_google']['metadata']['user'] == 'new@example.com'
def test_substitute_in_list(mock_llm):
"""Test substitution in list structures"""
agent = Agent(task='test', llm=mock_llm)
# Create history with an input action first (so email is detected)
# Then an action with a list containing the same email
history = create_mock_history(
[
({'input': {'index': 1, 'text': 'test@example.com'}}, create_test_element(attributes={'type': 'email'})),
({'some_action': {'items': ['test@example.com', 'other_value', 'test@example.com']}}, None),
]
)
# Substitute the email
modified_history = agent._substitute_variables_in_history(
history, # type: ignore[arg-type]
{'email': 'new@example.com'},
)
# Check that values in the first action were substituted
action1 = modified_history.history[0].model_output.action[0] # type: ignore[attr-defined]
assert vars(action1)['input']['text'] == 'new@example.com'
# Check that values in lists were also substituted
action2 = modified_history.history[1].model_output.action[0] # type: ignore[attr-defined]
action_dict = vars(action2)
assert action_dict['some_action']['items'] == ['new@example.com', 'other_value', 'new@example.com']
def test_substitute_preserves_original_history(mock_llm):
"""Test that substitution doesn't modify the original history"""
agent = Agent(task='test', llm=mock_llm)
# Create mock history
element = create_test_element(attributes={'type': 'email'})
history = create_mock_history(
[
({'input': {'index': 1, 'text': 'old@example.com'}}, element),
]
)
# Get original value
original_action = history.history[0].model_output.action[0]
original_value = vars(original_action)['input']['text']
# Substitute
agent._substitute_variables_in_history(history, {'email': 'new@example.com'}) # type: ignore[arg-type]
# Check that original history is unchanged
current_value = vars(original_action)['input']['text']
assert current_value == original_value
assert current_value == 'old@example.com'
def test_substitute_empty_variables(mock_llm):
"""Test substitution with empty variables dict"""
agent = Agent(task='test', llm=mock_llm)
# Create mock history
element = create_test_element(attributes={'type': 'email'})
history = create_mock_history(
[
({'input': {'index': 1, 'text': 'old@example.com'}}, element),
]
)
# Substitute with empty dict
modified_history = agent._substitute_variables_in_history(history, {}) # type: ignore[arg-type]
# Check that nothing changed
action = modified_history.history[0].model_output.action[0] # type: ignore[attr-defined]
action_dict = vars(action)
assert action_dict['input']['text'] == 'old@example.com'
def test_substitute_same_value_multiple_times(mock_llm):
"""Test that the same value is substituted across multiple actions"""
agent = Agent(task='test', llm=mock_llm)
# Create history where same email appears twice
element = create_test_element(attributes={'type': 'email'})
history = create_mock_history(
[
({'input': {'index': 1, 'text': 'old@example.com'}}, element),
({'input': {'index': 2, 'text': 'old@example.com'}}, element),
]
)
# Substitute the email
modified_history = agent._substitute_variables_in_history(
history, # type: ignore[arg-type]
{'email': 'new@example.com'},
)
# Check that both occurrences were substituted
action1 = modified_history.history[0].model_output.action[0] # type: ignore[attr-defined]
action2 = modified_history.history[1].model_output.action[0] # type: ignore[attr-defined]
assert vars(action1)['input']['text'] == 'new@example.com'
assert vars(action2)['input']['text'] == 'new@example.com'
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/test_variable_substitution.py",
"license": "MIT License",
"lines": 214,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:examples/features/blocked_domains.py | import asyncio
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent, ChatOpenAI
from browser_use.browser import BrowserProfile, BrowserSession
llm = ChatOpenAI(model='gpt-4o-mini')
# Example task: Try to navigate to various sites including blocked ones
task = 'Navigate to example.com, then try to go to x.com, then facebook.com, and finally visit google.com. Tell me which sites you were able to access.'
prohibited_domains = [
'x.com', # Block X (formerly Twitter) - "locked the f in"
'twitter.com', # Block Twitter (redirects to x.com anyway)
'facebook.com', # Lock the F in Facebook too
'*.meta.com', # Block all Meta properties (wildcard pattern)
'*.adult-site.com', # Block all subdomains of adult sites
'https://explicit-content.org', # Block specific protocol/domain
'gambling-site.net', # Block gambling sites
]
# Note: For lists with 100+ domains, automatic optimization kicks in:
# - Converts list to set for O(1) lookup (blazingly fast!)
# - Pattern matching (*.domain) is disabled for large lists
# - Both www.example.com and example.com variants are checked automatically
# Perfect for ad blockers or large malware domain lists (e.g., 400k+ domains)
browser_session = BrowserSession(
browser_profile=BrowserProfile(
prohibited_domains=prohibited_domains,
headless=False, # Set to True to run without visible browser
user_data_dir='~/.config/browseruse/profiles/blocked-demo',
),
)
agent = Agent(
task=task,
llm=llm,
browser_session=browser_session,
)
async def main():
print('Demo: Blocked Domains Feature - "Lock the F in" Edition')
print("We're literally locking the F in Facebook and X!")
print(f'Prohibited domains: {prohibited_domains}')
print('The agent will try to visit various sites, but blocked domains will be prevented.')
print()
await agent.run(max_steps=10)
input('Press Enter to close the browser...')
await browser_session.kill()
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/features/blocked_domains.py",
"license": "MIT License",
"lines": 48,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:examples/models/modelscope_example.py | """
Simple try of the agent.
@dev You need to add MODELSCOPE_API_KEY to your environment variables.
"""
import asyncio
import os
from dotenv import load_dotenv
from browser_use import Agent, ChatOpenAI
# dotenv
load_dotenv()
api_key = os.getenv('MODELSCOPE_API_KEY', '')
if not api_key:
raise ValueError('MODELSCOPE_API_KEY is not set')
async def run_search():
agent = Agent(
# task=('go to amazon.com, search for laptop'),
task=('go to google, search for modelscope'),
llm=ChatOpenAI(base_url='https://api-inference.modelscope.cn/v1/', model='Qwen/Qwen2.5-VL-72B-Instruct', api_key=api_key),
use_vision=False,
)
await agent.run()
if __name__ == '__main__':
asyncio.run(run_search())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/models/modelscope_example.py",
"license": "MIT License",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:browser_use/actor/element.py | """Element class for element operations."""
import asyncio
from typing import TYPE_CHECKING, Literal, Union
from cdp_use.client import logger
from typing_extensions import TypedDict
if TYPE_CHECKING:
from cdp_use.cdp.dom.commands import (
DescribeNodeParameters,
FocusParameters,
GetAttributesParameters,
GetBoxModelParameters,
PushNodesByBackendIdsToFrontendParameters,
RequestChildNodesParameters,
ResolveNodeParameters,
)
from cdp_use.cdp.input.commands import (
DispatchMouseEventParameters,
)
from cdp_use.cdp.input.types import MouseButton
from cdp_use.cdp.page.commands import CaptureScreenshotParameters
from cdp_use.cdp.page.types import Viewport
from cdp_use.cdp.runtime.commands import CallFunctionOnParameters
from browser_use.browser.session import BrowserSession
# Type definitions for element operations
ModifierType = Literal['Alt', 'Control', 'Meta', 'Shift']
class Position(TypedDict):
"""2D position coordinates."""
x: float
y: float
class BoundingBox(TypedDict):
"""Element bounding box with position and dimensions."""
x: float
y: float
width: float
height: float
class ElementInfo(TypedDict):
"""Basic information about a DOM element."""
backendNodeId: int
nodeId: int | None
nodeName: str
nodeType: int
nodeValue: str | None
attributes: dict[str, str]
boundingBox: BoundingBox | None
error: str | None
class Element:
"""Element operations using BackendNodeId."""
def __init__(
self,
browser_session: 'BrowserSession',
backend_node_id: int,
session_id: str | None = None,
):
self._browser_session = browser_session
self._client = browser_session.cdp_client
self._backend_node_id = backend_node_id
self._session_id = session_id
async def _get_node_id(self) -> int:
"""Get DOM node ID from backend node ID."""
params: 'PushNodesByBackendIdsToFrontendParameters' = {'backendNodeIds': [self._backend_node_id]}
result = await self._client.send.DOM.pushNodesByBackendIdsToFrontend(params, session_id=self._session_id)
return result['nodeIds'][0]
async def _get_remote_object_id(self) -> str | None:
"""Get remote object ID for this element."""
node_id = await self._get_node_id()
params: 'ResolveNodeParameters' = {'nodeId': node_id}
result = await self._client.send.DOM.resolveNode(params, session_id=self._session_id)
object_id = result['object'].get('objectId', None)
if not object_id:
return None
return object_id
async def click(
self,
button: 'MouseButton' = 'left',
click_count: int = 1,
modifiers: list[ModifierType] | None = None,
) -> None:
"""Click the element using the advanced watchdog implementation."""
try:
# Get viewport dimensions for visibility checks
layout_metrics = await self._client.send.Page.getLayoutMetrics(session_id=self._session_id)
viewport_width = layout_metrics['layoutViewport']['clientWidth']
viewport_height = layout_metrics['layoutViewport']['clientHeight']
# Try multiple methods to get element geometry
quads = []
# Method 1: Try DOM.getContentQuads first (best for inline elements and complex layouts)
try:
content_quads_result = await self._client.send.DOM.getContentQuads(
params={'backendNodeId': self._backend_node_id}, session_id=self._session_id
)
if 'quads' in content_quads_result and content_quads_result['quads']:
quads = content_quads_result['quads']
except Exception:
pass
# Method 2: Fall back to DOM.getBoxModel
if not quads:
try:
box_model = await self._client.send.DOM.getBoxModel(
params={'backendNodeId': self._backend_node_id}, session_id=self._session_id
)
if 'model' in box_model and 'content' in box_model['model']:
content_quad = box_model['model']['content']
if len(content_quad) >= 8:
# Convert box model format to quad format
quads = [
[
content_quad[0],
content_quad[1], # x1, y1
content_quad[2],
content_quad[3], # x2, y2
content_quad[4],
content_quad[5], # x3, y3
content_quad[6],
content_quad[7], # x4, y4
]
]
except Exception:
pass
# Method 3: Fall back to JavaScript getBoundingClientRect
if not quads:
try:
result = await self._client.send.DOM.resolveNode(
params={'backendNodeId': self._backend_node_id}, session_id=self._session_id
)
if 'object' in result and 'objectId' in result['object']:
object_id = result['object']['objectId']
# Get bounding rect via JavaScript
bounds_result = await self._client.send.Runtime.callFunctionOn(
params={
'functionDeclaration': """
function() {
const rect = this.getBoundingClientRect();
return {
x: rect.left,
y: rect.top,
width: rect.width,
height: rect.height
};
}
""",
'objectId': object_id,
'returnByValue': True,
},
session_id=self._session_id,
)
if 'result' in bounds_result and 'value' in bounds_result['result']:
rect = bounds_result['result']['value']
# Convert rect to quad format
x, y, w, h = rect['x'], rect['y'], rect['width'], rect['height']
quads = [
[
x,
y, # top-left
x + w,
y, # top-right
x + w,
y + h, # bottom-right
x,
y + h, # bottom-left
]
]
except Exception:
pass
# If we still don't have quads, fall back to JS click
if not quads:
try:
result = await self._client.send.DOM.resolveNode(
params={'backendNodeId': self._backend_node_id}, session_id=self._session_id
)
if 'object' not in result or 'objectId' not in result['object']:
raise Exception('Failed to find DOM element based on backendNodeId, maybe page content changed?')
object_id = result['object']['objectId']
await self._client.send.Runtime.callFunctionOn(
params={
'functionDeclaration': 'function() { this.click(); }',
'objectId': object_id,
},
session_id=self._session_id,
)
await asyncio.sleep(0.05)
return
except Exception as js_e:
raise Exception(f'Failed to click element: {js_e}')
# Find the largest visible quad within the viewport
best_quad = None
best_area = 0
for quad in quads:
if len(quad) < 8:
continue
# Calculate quad bounds
xs = [quad[i] for i in range(0, 8, 2)]
ys = [quad[i] for i in range(1, 8, 2)]
min_x, max_x = min(xs), max(xs)
min_y, max_y = min(ys), max(ys)
# Check if quad intersects with viewport
if max_x < 0 or max_y < 0 or min_x > viewport_width or min_y > viewport_height:
continue # Quad is completely outside viewport
# Calculate visible area (intersection with viewport)
visible_min_x = max(0, min_x)
visible_max_x = min(viewport_width, max_x)
visible_min_y = max(0, min_y)
visible_max_y = min(viewport_height, max_y)
visible_width = visible_max_x - visible_min_x
visible_height = visible_max_y - visible_min_y
visible_area = visible_width * visible_height
if visible_area > best_area:
best_area = visible_area
best_quad = quad
if not best_quad:
# No visible quad found, use the first quad anyway
best_quad = quads[0]
# Calculate center point of the best quad
center_x = sum(best_quad[i] for i in range(0, 8, 2)) / 4
center_y = sum(best_quad[i] for i in range(1, 8, 2)) / 4
# Ensure click point is within viewport bounds
center_x = max(0, min(viewport_width - 1, center_x))
center_y = max(0, min(viewport_height - 1, center_y))
# Scroll element into view
try:
await self._client.send.DOM.scrollIntoViewIfNeeded(
params={'backendNodeId': self._backend_node_id}, session_id=self._session_id
)
await asyncio.sleep(0.05) # Wait for scroll to complete
except Exception:
pass
# Calculate modifier bitmask for CDP
modifier_value = 0
if modifiers:
modifier_map = {'Alt': 1, 'Control': 2, 'Meta': 4, 'Shift': 8}
for mod in modifiers:
modifier_value |= modifier_map.get(mod, 0)
# Perform the click using CDP
try:
# Move mouse to element
await self._client.send.Input.dispatchMouseEvent(
params={
'type': 'mouseMoved',
'x': center_x,
'y': center_y,
},
session_id=self._session_id,
)
await asyncio.sleep(0.05)
# Mouse down
try:
await asyncio.wait_for(
self._client.send.Input.dispatchMouseEvent(
params={
'type': 'mousePressed',
'x': center_x,
'y': center_y,
'button': button,
'clickCount': click_count,
'modifiers': modifier_value,
},
session_id=self._session_id,
),
timeout=1.0, # 1 second timeout for mousePressed
)
await asyncio.sleep(0.08)
except TimeoutError:
pass # Don't sleep if we timed out
# Mouse up
try:
await asyncio.wait_for(
self._client.send.Input.dispatchMouseEvent(
params={
'type': 'mouseReleased',
'x': center_x,
'y': center_y,
'button': button,
'clickCount': click_count,
'modifiers': modifier_value,
},
session_id=self._session_id,
),
timeout=3.0, # 3 second timeout for mouseReleased
)
except TimeoutError:
pass
except Exception as e:
# Fall back to JavaScript click via CDP
try:
result = await self._client.send.DOM.resolveNode(
params={'backendNodeId': self._backend_node_id}, session_id=self._session_id
)
if 'object' not in result or 'objectId' not in result['object']:
raise Exception('Failed to find DOM element based on backendNodeId, maybe page content changed?')
object_id = result['object']['objectId']
await self._client.send.Runtime.callFunctionOn(
params={
'functionDeclaration': 'function() { this.click(); }',
'objectId': object_id,
},
session_id=self._session_id,
)
await asyncio.sleep(0.1)
return
except Exception as js_e:
raise Exception(f'Failed to click element: {e}')
except Exception as e:
# Extract key element info for error message
raise RuntimeError(f'Failed to click element: {e}')
async def fill(self, value: str, clear: bool = True) -> None:
"""Fill the input element using proper CDP methods with improved focus handling."""
try:
# Use the existing CDP client and session
cdp_client = self._client
session_id = self._session_id
backend_node_id = self._backend_node_id
# Track coordinates for metadata
input_coordinates = None
# Scroll element into view
try:
await cdp_client.send.DOM.scrollIntoViewIfNeeded(params={'backendNodeId': backend_node_id}, session_id=session_id)
await asyncio.sleep(0.01)
except Exception as e:
logger.warning(f'Failed to scroll element into view: {e}')
# Get object ID for the element
result = await cdp_client.send.DOM.resolveNode(
params={'backendNodeId': backend_node_id},
session_id=session_id,
)
if 'object' not in result or 'objectId' not in result['object']:
raise RuntimeError('Failed to get object ID for element')
object_id = result['object']['objectId']
# Get element coordinates for focus
try:
bounds_result = await cdp_client.send.Runtime.callFunctionOn(
params={
'functionDeclaration': 'function() { return this.getBoundingClientRect(); }',
'objectId': object_id,
'returnByValue': True,
},
session_id=session_id,
)
if bounds_result.get('result', {}).get('value'):
bounds = bounds_result['result']['value'] # type: ignore
center_x = bounds['x'] + bounds['width'] / 2
center_y = bounds['y'] + bounds['height'] / 2
input_coordinates = {'input_x': center_x, 'input_y': center_y}
logger.debug(f'Using element coordinates: x={center_x:.1f}, y={center_y:.1f}')
except Exception as e:
logger.debug(f'Could not get element coordinates: {e}')
# Ensure session_id is not None
if session_id is None:
raise RuntimeError('Session ID is required for fill operation')
# Step 1: Focus the element
focused_successfully = await self._focus_element_simple(
backend_node_id=backend_node_id,
object_id=object_id,
cdp_client=cdp_client,
session_id=session_id,
input_coordinates=input_coordinates,
)
# Step 2: Clear existing text if requested
if clear:
cleared_successfully = await self._clear_text_field(
object_id=object_id, cdp_client=cdp_client, session_id=session_id
)
if not cleared_successfully:
logger.warning('Text field clearing failed, typing may append to existing text')
# Step 3: Type the text character by character using proper human-like key events
logger.debug(f'Typing text character by character: "{value}"')
for i, char in enumerate(value):
# Handle newline characters as Enter key
if char == '\n':
# Send proper Enter key sequence
await cdp_client.send.Input.dispatchKeyEvent(
params={
'type': 'keyDown',
'key': 'Enter',
'code': 'Enter',
'windowsVirtualKeyCode': 13,
},
session_id=session_id,
)
# Small delay to emulate human typing speed
await asyncio.sleep(0.001)
# Send char event with carriage return
await cdp_client.send.Input.dispatchKeyEvent(
params={
'type': 'char',
'text': '\r',
'key': 'Enter',
},
session_id=session_id,
)
# Send keyUp event
await cdp_client.send.Input.dispatchKeyEvent(
params={
'type': 'keyUp',
'key': 'Enter',
'code': 'Enter',
'windowsVirtualKeyCode': 13,
},
session_id=session_id,
)
else:
# Handle regular characters
# Get proper modifiers, VK code, and base key for the character
modifiers, vk_code, base_key = self._get_char_modifiers_and_vk(char)
key_code = self._get_key_code_for_char(base_key)
# Step 1: Send keyDown event (NO text parameter)
await cdp_client.send.Input.dispatchKeyEvent(
params={
'type': 'keyDown',
'key': base_key,
'code': key_code,
'modifiers': modifiers,
'windowsVirtualKeyCode': vk_code,
},
session_id=session_id,
)
# Small delay to emulate human typing speed
await asyncio.sleep(0.001)
# Step 2: Send char event (WITH text parameter) - this is crucial for text input
await cdp_client.send.Input.dispatchKeyEvent(
params={
'type': 'char',
'text': char,
'key': char,
},
session_id=session_id,
)
# Step 3: Send keyUp event (NO text parameter)
await cdp_client.send.Input.dispatchKeyEvent(
params={
'type': 'keyUp',
'key': base_key,
'code': key_code,
'modifiers': modifiers,
'windowsVirtualKeyCode': vk_code,
},
session_id=session_id,
)
# Add 18ms delay between keystrokes
await asyncio.sleep(0.018)
except Exception as e:
raise Exception(f'Failed to fill element: {str(e)}')
async def hover(self) -> None:
"""Hover over the element."""
box = await self.get_bounding_box()
if not box:
raise RuntimeError('Element is not visible or has no bounding box')
x = box['x'] + box['width'] / 2
y = box['y'] + box['height'] / 2
params: 'DispatchMouseEventParameters' = {'type': 'mouseMoved', 'x': x, 'y': y}
await self._client.send.Input.dispatchMouseEvent(params, session_id=self._session_id)
async def focus(self) -> None:
"""Focus the element."""
node_id = await self._get_node_id()
params: 'FocusParameters' = {'nodeId': node_id}
await self._client.send.DOM.focus(params, session_id=self._session_id)
async def check(self) -> None:
"""Check or uncheck a checkbox/radio button."""
await self.click()
async def select_option(self, values: str | list[str]) -> None:
"""Select option(s) in a select element."""
if isinstance(values, str):
values = [values]
# Focus the element first
try:
await self.focus()
except Exception:
logger.warning('Failed to focus element')
# For select elements, we need to find option elements and click them
# This is a simplified approach - in practice, you might need to handle
# different select types (single vs multi-select) differently
node_id = await self._get_node_id()
# Request child nodes to get the options
params: 'RequestChildNodesParameters' = {'nodeId': node_id, 'depth': 1}
await self._client.send.DOM.requestChildNodes(params, session_id=self._session_id)
# Get the updated node description with children
describe_params: 'DescribeNodeParameters' = {'nodeId': node_id, 'depth': 1}
describe_result = await self._client.send.DOM.describeNode(describe_params, session_id=self._session_id)
select_node = describe_result['node']
# Find and select matching options
for child in select_node.get('children', []):
if child.get('nodeName', '').lower() == 'option':
# Get option attributes
attrs = child.get('attributes', [])
option_attrs = {}
for i in range(0, len(attrs), 2):
if i + 1 < len(attrs):
option_attrs[attrs[i]] = attrs[i + 1]
option_value = option_attrs.get('value', '')
option_text = child.get('nodeValue', '')
# Check if this option should be selected
should_select = option_value in values or option_text in values
if should_select:
# Click the option to select it
option_node_id = child.get('nodeId')
if option_node_id:
# Get backend node ID for the option
option_describe_params: 'DescribeNodeParameters' = {'nodeId': option_node_id}
option_backend_result = await self._client.send.DOM.describeNode(
option_describe_params, session_id=self._session_id
)
option_backend_id = option_backend_result['node']['backendNodeId']
# Create an Element for the option and click it
option_element = Element(self._browser_session, option_backend_id, self._session_id)
await option_element.click()
async def drag_to(
self,
target: Union['Element', Position],
source_position: Position | None = None,
target_position: Position | None = None,
) -> None:
"""Drag this element to another element or position."""
# Get source coordinates
if source_position:
source_x = source_position['x']
source_y = source_position['y']
else:
source_box = await self.get_bounding_box()
if not source_box:
raise RuntimeError('Source element is not visible')
source_x = source_box['x'] + source_box['width'] / 2
source_y = source_box['y'] + source_box['height'] / 2
# Get target coordinates
if isinstance(target, dict) and 'x' in target and 'y' in target:
target_x = target['x']
target_y = target['y']
else:
if target_position:
target_box = await target.get_bounding_box()
if not target_box:
raise RuntimeError('Target element is not visible')
target_x = target_box['x'] + target_position['x']
target_y = target_box['y'] + target_position['y']
else:
target_box = await target.get_bounding_box()
if not target_box:
raise RuntimeError('Target element is not visible')
target_x = target_box['x'] + target_box['width'] / 2
target_y = target_box['y'] + target_box['height'] / 2
# Perform drag operation
await self._client.send.Input.dispatchMouseEvent(
{'type': 'mousePressed', 'x': source_x, 'y': source_y, 'button': 'left'},
session_id=self._session_id,
)
await self._client.send.Input.dispatchMouseEvent(
{'type': 'mouseMoved', 'x': target_x, 'y': target_y},
session_id=self._session_id,
)
await self._client.send.Input.dispatchMouseEvent(
{'type': 'mouseReleased', 'x': target_x, 'y': target_y, 'button': 'left'},
session_id=self._session_id,
)
# Element properties and queries
async def get_attribute(self, name: str) -> str | None:
"""Get an attribute value."""
node_id = await self._get_node_id()
params: 'GetAttributesParameters' = {'nodeId': node_id}
result = await self._client.send.DOM.getAttributes(params, session_id=self._session_id)
attributes = result['attributes']
for i in range(0, len(attributes), 2):
if attributes[i] == name:
return attributes[i + 1]
return None
async def get_bounding_box(self) -> BoundingBox | None:
"""Get the bounding box of the element."""
try:
node_id = await self._get_node_id()
params: 'GetBoxModelParameters' = {'nodeId': node_id}
result = await self._client.send.DOM.getBoxModel(params, session_id=self._session_id)
if 'model' not in result:
return None
# Get content box (first 8 values are content quad: x1,y1,x2,y2,x3,y3,x4,y4)
content = result['model']['content']
if len(content) < 8:
return None
# Calculate bounding box from quad
x_coords = [content[i] for i in range(0, 8, 2)]
y_coords = [content[i] for i in range(1, 8, 2)]
x = min(x_coords)
y = min(y_coords)
width = max(x_coords) - x
height = max(y_coords) - y
return BoundingBox(x=x, y=y, width=width, height=height)
except Exception:
return None
async def screenshot(self, format: str = 'png', quality: int | None = None) -> str:
"""Take a screenshot of this element and return base64 encoded image.
Args:
format: Image format ('jpeg', 'png', 'webp')
quality: Quality 0-100 for JPEG format
Returns:
Base64-encoded image data
"""
# Get element's bounding box
box = await self.get_bounding_box()
if not box:
raise RuntimeError('Element is not visible or has no bounding box')
# Create viewport clip for the element
viewport: 'Viewport' = {'x': box['x'], 'y': box['y'], 'width': box['width'], 'height': box['height'], 'scale': 1.0}
# Prepare screenshot parameters
params: 'CaptureScreenshotParameters' = {'format': format, 'clip': viewport}
if quality is not None and format.lower() == 'jpeg':
params['quality'] = quality
# Take screenshot
result = await self._client.send.Page.captureScreenshot(params, session_id=self._session_id)
return result['data']
async def evaluate(self, page_function: str, *args) -> str:
"""Execute JavaScript code in the context of this element.
The JavaScript code executes with 'this' bound to the element, allowing direct
access to element properties and methods.
Args:
page_function: JavaScript code that MUST start with (...args) => format
*args: Arguments to pass to the function
Returns:
String representation of the JavaScript execution result.
Objects and arrays are JSON-stringified.
Example:
# Get element's text content
text = await element.evaluate("() => this.textContent")
# Set style with argument
await element.evaluate("(color) => this.style.color = color", "red")
# Get computed style
color = await element.evaluate("() => getComputedStyle(this).color")
# Async operations
result = await element.evaluate("async () => { await new Promise(r => setTimeout(r, 100)); return this.id; }")
"""
# Get remote object ID for this element
object_id = await self._get_remote_object_id()
if not object_id:
raise RuntimeError('Element has no remote object ID (element may be detached from DOM)')
# Validate arrow function format (allow async prefix)
page_function = page_function.strip()
# Check for arrow function with optional async prefix
if not ('=>' in page_function and (page_function.startswith('(') or page_function.startswith('async'))):
raise ValueError(
f'JavaScript code must start with (...args) => or async (...args) => format. Got: {page_function[:50]}...'
)
# Convert arrow function to function declaration for CallFunctionOn
# CallFunctionOn expects 'function(...args) { ... }' format, not arrow functions
# We need to convert: '() => expression' to 'function() { return expression; }'
# or: '(x, y) => { statements }' to 'function(x, y) { statements }'
# Extract parameters and body from arrow function
import re
# Check if it's an async arrow function
is_async = page_function.strip().startswith('async')
async_prefix = 'async ' if is_async else ''
# Match: (params) => body or async (params) => body
# Strip 'async' prefix if present for parsing
func_to_parse = page_function.strip()
if is_async:
func_to_parse = func_to_parse[5:].strip() # Remove 'async' prefix
arrow_match = re.match(r'\s*\(([^)]*)\)\s*=>\s*(.+)', func_to_parse, re.DOTALL)
if not arrow_match:
raise ValueError(f'Could not parse arrow function: {page_function[:50]}...')
params_str = arrow_match.group(1).strip() # e.g., '', 'x', 'x, y'
body = arrow_match.group(2).strip()
# If body doesn't start with {, it's an expression that needs implicit return
if not body.startswith('{'):
function_declaration = f'{async_prefix}function({params_str}) {{ return {body}; }}'
else:
# Body already has braces, use as-is
function_declaration = f'{async_prefix}function({params_str}) {body}'
# Build CallArgument list for args if provided
call_arguments = []
if args:
from cdp_use.cdp.runtime.types import CallArgument
for arg in args:
# Convert Python values to CallArgument format
call_arguments.append(CallArgument(value=arg))
# Prepare CallFunctionOn parameters
params: 'CallFunctionOnParameters' = {
'functionDeclaration': function_declaration,
'objectId': object_id,
'returnByValue': True,
'awaitPromise': True,
}
if call_arguments:
params['arguments'] = call_arguments
# Execute the function on the element
result = await self._client.send.Runtime.callFunctionOn(
params,
session_id=self._session_id,
)
# Handle exceptions
if 'exceptionDetails' in result:
raise RuntimeError(f'JavaScript evaluation failed: {result["exceptionDetails"]}')
# Extract and return value
value = result.get('result', {}).get('value')
# Return string representation (matching Page.evaluate behavior)
if value is None:
return ''
elif isinstance(value, str):
return value
else:
# Convert objects, numbers, booleans to string
import json
try:
return json.dumps(value) if isinstance(value, (dict, list)) else str(value)
except (TypeError, ValueError):
return str(value)
# Helpers for modifiers etc
def _get_char_modifiers_and_vk(self, char: str) -> tuple[int, int, str]:
"""Get modifiers, virtual key code, and base key for a character.
Returns:
(modifiers, windowsVirtualKeyCode, base_key)
"""
# Characters that require Shift modifier
shift_chars = {
'!': ('1', 49),
'@': ('2', 50),
'#': ('3', 51),
'$': ('4', 52),
'%': ('5', 53),
'^': ('6', 54),
'&': ('7', 55),
'*': ('8', 56),
'(': ('9', 57),
')': ('0', 48),
'_': ('-', 189),
'+': ('=', 187),
'{': ('[', 219),
'}': (']', 221),
'|': ('\\', 220),
':': (';', 186),
'"': ("'", 222),
'<': (',', 188),
'>': ('.', 190),
'?': ('/', 191),
'~': ('`', 192),
}
# Check if character requires Shift
if char in shift_chars:
base_key, vk_code = shift_chars[char]
return (8, vk_code, base_key) # Shift=8
# Uppercase letters require Shift
if char.isupper():
return (8, ord(char), char.lower()) # Shift=8
# Lowercase letters
if char.islower():
return (0, ord(char.upper()), char)
# Numbers
if char.isdigit():
return (0, ord(char), char)
# Special characters without Shift
no_shift_chars = {
' ': 32,
'-': 189,
'=': 187,
'[': 219,
']': 221,
'\\': 220,
';': 186,
"'": 222,
',': 188,
'.': 190,
'/': 191,
'`': 192,
}
if char in no_shift_chars:
return (0, no_shift_chars[char], char)
# Fallback
return (0, ord(char.upper()) if char.isalpha() else ord(char), char)
def _get_key_code_for_char(self, char: str) -> str:
"""Get the proper key code for a character (like Playwright does)."""
# Key code mapping for common characters (using proper base keys + modifiers)
key_codes = {
' ': 'Space',
'.': 'Period',
',': 'Comma',
'-': 'Minus',
'_': 'Minus', # Underscore uses Minus with Shift
'@': 'Digit2', # @ uses Digit2 with Shift
'!': 'Digit1', # ! uses Digit1 with Shift (not 'Exclamation')
'?': 'Slash', # ? uses Slash with Shift
':': 'Semicolon', # : uses Semicolon with Shift
';': 'Semicolon',
'(': 'Digit9', # ( uses Digit9 with Shift
')': 'Digit0', # ) uses Digit0 with Shift
'[': 'BracketLeft',
']': 'BracketRight',
'{': 'BracketLeft', # { uses BracketLeft with Shift
'}': 'BracketRight', # } uses BracketRight with Shift
'/': 'Slash',
'\\': 'Backslash',
'=': 'Equal',
'+': 'Equal', # + uses Equal with Shift
'*': 'Digit8', # * uses Digit8 with Shift
'&': 'Digit7', # & uses Digit7 with Shift
'%': 'Digit5', # % uses Digit5 with Shift
'$': 'Digit4', # $ uses Digit4 with Shift
'#': 'Digit3', # # uses Digit3 with Shift
'^': 'Digit6', # ^ uses Digit6 with Shift
'~': 'Backquote', # ~ uses Backquote with Shift
'`': 'Backquote',
'"': 'Quote', # " uses Quote with Shift
"'": 'Quote',
'<': 'Comma', # < uses Comma with Shift
'>': 'Period', # > uses Period with Shift
'|': 'Backslash', # | uses Backslash with Shift
}
if char in key_codes:
return key_codes[char]
elif char.isalpha():
return f'Key{char.upper()}'
elif char.isdigit():
return f'Digit{char}'
else:
# Fallback for unknown characters
return f'Key{char.upper()}' if char.isascii() and char.isalpha() else 'Unidentified'
async def _clear_text_field(self, object_id: str, cdp_client, session_id: str) -> bool:
"""Clear text field using multiple strategies, starting with the most reliable."""
try:
# Strategy 1: Direct JavaScript value setting (most reliable for modern web apps)
logger.debug('Clearing text field using JavaScript value setting')
await cdp_client.send.Runtime.callFunctionOn(
params={
'functionDeclaration': """
function() {
// Try to select all text first (only works on text-like inputs)
// This handles cases where cursor is in the middle of text
try {
this.select();
} catch (e) {
// Some input types (date, color, number, etc.) don't support select()
// That's fine, we'll just clear the value directly
}
// Set value to empty
this.value = "";
// Dispatch events to notify frameworks like React
this.dispatchEvent(new Event("input", { bubbles: true }));
this.dispatchEvent(new Event("change", { bubbles: true }));
return this.value;
}
""",
'objectId': object_id,
'returnByValue': True,
},
session_id=session_id,
)
# Verify clearing worked by checking the value
verify_result = await cdp_client.send.Runtime.callFunctionOn(
params={
'functionDeclaration': 'function() { return this.value; }',
'objectId': object_id,
'returnByValue': True,
},
session_id=session_id,
)
current_value = verify_result.get('result', {}).get('value', '')
if not current_value:
logger.debug('Text field cleared successfully using JavaScript')
return True
else:
logger.debug(f'JavaScript clear partially failed, field still contains: "{current_value}"')
except Exception as e:
logger.debug(f'JavaScript clear failed: {e}')
# Strategy 2: Triple-click + Delete (fallback for stubborn fields)
try:
logger.debug('Fallback: Clearing using triple-click + Delete')
# Get element center coordinates for triple-click
bounds_result = await cdp_client.send.Runtime.callFunctionOn(
params={
'functionDeclaration': 'function() { return this.getBoundingClientRect(); }',
'objectId': object_id,
'returnByValue': True,
},
session_id=session_id,
)
if bounds_result.get('result', {}).get('value'):
bounds = bounds_result['result']['value'] # type: ignore # type: ignore
center_x = bounds['x'] + bounds['width'] / 2
center_y = bounds['y'] + bounds['height'] / 2
# Triple-click to select all text
await cdp_client.send.Input.dispatchMouseEvent(
params={
'type': 'mousePressed',
'x': center_x,
'y': center_y,
'button': 'left',
'clickCount': 3,
},
session_id=session_id,
)
await cdp_client.send.Input.dispatchMouseEvent(
params={
'type': 'mouseReleased',
'x': center_x,
'y': center_y,
'button': 'left',
'clickCount': 3,
},
session_id=session_id,
)
# Delete selected text
await cdp_client.send.Input.dispatchKeyEvent(
params={
'type': 'keyDown',
'key': 'Delete',
'code': 'Delete',
},
session_id=session_id,
)
await cdp_client.send.Input.dispatchKeyEvent(
params={
'type': 'keyUp',
'key': 'Delete',
'code': 'Delete',
},
session_id=session_id,
)
logger.debug('Text field cleared using triple-click + Delete')
return True
except Exception as e:
logger.debug(f'Triple-click clear failed: {e}')
# If all strategies failed
logger.warning('All text clearing strategies failed')
return False
async def _focus_element_simple(
self, backend_node_id: int, object_id: str, cdp_client, session_id: str, input_coordinates=None
) -> bool:
"""Focus element using multiple strategies with robust fallbacks."""
try:
# Strategy 1: CDP focus (most reliable)
logger.debug('Focusing element using CDP focus')
await cdp_client.send.DOM.focus(params={'backendNodeId': backend_node_id}, session_id=session_id)
logger.debug('Element focused successfully using CDP focus')
return True
except Exception as e:
logger.debug(f'CDP focus failed: {e}, trying JavaScript focus')
try:
# Strategy 2: JavaScript focus (fallback)
logger.debug('Focusing element using JavaScript focus')
await cdp_client.send.Runtime.callFunctionOn(
params={
'functionDeclaration': 'function() { this.focus(); }',
'objectId': object_id,
},
session_id=session_id,
)
logger.debug('Element focused successfully using JavaScript')
return True
except Exception as e:
logger.debug(f'JavaScript focus failed: {e}, trying click focus')
try:
# Strategy 3: Click to focus (last resort)
if input_coordinates:
logger.debug(f'Focusing element by clicking at coordinates: {input_coordinates}')
center_x = input_coordinates['input_x']
center_y = input_coordinates['input_y']
# Click on the element to focus it
await cdp_client.send.Input.dispatchMouseEvent(
params={
'type': 'mousePressed',
'x': center_x,
'y': center_y,
'button': 'left',
'clickCount': 1,
},
session_id=session_id,
)
await cdp_client.send.Input.dispatchMouseEvent(
params={
'type': 'mouseReleased',
'x': center_x,
'y': center_y,
'button': 'left',
'clickCount': 1,
},
session_id=session_id,
)
logger.debug('Element focused using click')
return True
else:
logger.debug('No coordinates available for click focus')
except Exception as e:
logger.warning(f'All focus strategies failed: {e}')
return False
async def get_basic_info(self) -> ElementInfo:
"""Get basic information about the element including coordinates and properties."""
try:
# Get basic node information
node_id = await self._get_node_id()
describe_result = await self._client.send.DOM.describeNode({'nodeId': node_id}, session_id=self._session_id)
node_info = describe_result['node']
# Get bounding box
bounding_box = await self.get_bounding_box()
# Get attributes as a proper dict
attributes_list = node_info.get('attributes', [])
attributes_dict: dict[str, str] = {}
for i in range(0, len(attributes_list), 2):
if i + 1 < len(attributes_list):
attributes_dict[attributes_list[i]] = attributes_list[i + 1]
return ElementInfo(
backendNodeId=self._backend_node_id,
nodeId=node_id,
nodeName=node_info.get('nodeName', ''),
nodeType=node_info.get('nodeType', 0),
nodeValue=node_info.get('nodeValue'),
attributes=attributes_dict,
boundingBox=bounding_box,
error=None,
)
except Exception as e:
return ElementInfo(
backendNodeId=self._backend_node_id,
nodeId=None,
nodeName='',
nodeType=0,
nodeValue=None,
attributes={},
boundingBox=None,
error=str(e),
)
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/actor/element.py",
"license": "MIT License",
"lines": 1011,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/actor/mouse.py | """Mouse class for mouse operations."""
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from cdp_use.cdp.input.commands import DispatchMouseEventParameters, SynthesizeScrollGestureParameters
from cdp_use.cdp.input.types import MouseButton
from browser_use.browser.session import BrowserSession
class Mouse:
"""Mouse operations for a target."""
def __init__(self, browser_session: 'BrowserSession', session_id: str | None = None, target_id: str | None = None):
self._browser_session = browser_session
self._client = browser_session.cdp_client
self._session_id = session_id
self._target_id = target_id
async def click(self, x: int, y: int, button: 'MouseButton' = 'left', click_count: int = 1) -> None:
"""Click at the specified coordinates."""
# Mouse press
press_params: 'DispatchMouseEventParameters' = {
'type': 'mousePressed',
'x': x,
'y': y,
'button': button,
'clickCount': click_count,
}
await self._client.send.Input.dispatchMouseEvent(
press_params,
session_id=self._session_id,
)
# Mouse release
release_params: 'DispatchMouseEventParameters' = {
'type': 'mouseReleased',
'x': x,
'y': y,
'button': button,
'clickCount': click_count,
}
await self._client.send.Input.dispatchMouseEvent(
release_params,
session_id=self._session_id,
)
async def down(self, button: 'MouseButton' = 'left', click_count: int = 1) -> None:
"""Press mouse button down."""
params: 'DispatchMouseEventParameters' = {
'type': 'mousePressed',
'x': 0, # Will use last mouse position
'y': 0,
'button': button,
'clickCount': click_count,
}
await self._client.send.Input.dispatchMouseEvent(
params,
session_id=self._session_id,
)
async def up(self, button: 'MouseButton' = 'left', click_count: int = 1) -> None:
"""Release mouse button."""
params: 'DispatchMouseEventParameters' = {
'type': 'mouseReleased',
'x': 0, # Will use last mouse position
'y': 0,
'button': button,
'clickCount': click_count,
}
await self._client.send.Input.dispatchMouseEvent(
params,
session_id=self._session_id,
)
async def move(self, x: int, y: int, steps: int = 1) -> None:
"""Move mouse to the specified coordinates."""
# TODO: Implement smooth movement with multiple steps if needed
_ = steps # Acknowledge parameter for future use
params: 'DispatchMouseEventParameters' = {'type': 'mouseMoved', 'x': x, 'y': y}
await self._client.send.Input.dispatchMouseEvent(params, session_id=self._session_id)
async def scroll(self, x: int = 0, y: int = 0, delta_x: int | None = None, delta_y: int | None = None) -> None:
"""Scroll the page using robust CDP methods."""
if not self._session_id:
raise RuntimeError('Session ID is required for scroll operations')
# Method 1: Try mouse wheel event (most reliable)
try:
# Get viewport dimensions
layout_metrics = await self._client.send.Page.getLayoutMetrics(session_id=self._session_id)
viewport_width = layout_metrics['layoutViewport']['clientWidth']
viewport_height = layout_metrics['layoutViewport']['clientHeight']
# Use provided coordinates or center of viewport
scroll_x = x if x > 0 else viewport_width / 2
scroll_y = y if y > 0 else viewport_height / 2
# Calculate scroll deltas (positive = down/right)
scroll_delta_x = delta_x or 0
scroll_delta_y = delta_y or 0
# Dispatch mouse wheel event
await self._client.send.Input.dispatchMouseEvent(
params={
'type': 'mouseWheel',
'x': scroll_x,
'y': scroll_y,
'deltaX': scroll_delta_x,
'deltaY': scroll_delta_y,
},
session_id=self._session_id,
)
return
except Exception:
pass
# Method 2: Fallback to synthesizeScrollGesture
try:
params: 'SynthesizeScrollGestureParameters' = {'x': x, 'y': y, 'xDistance': delta_x or 0, 'yDistance': delta_y or 0}
await self._client.send.Input.synthesizeScrollGesture(
params,
session_id=self._session_id,
)
except Exception:
# Method 3: JavaScript fallback
scroll_js = f'window.scrollBy({delta_x or 0}, {delta_y or 0})'
await self._client.send.Runtime.evaluate(
params={'expression': scroll_js, 'returnByValue': True},
session_id=self._session_id,
)
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/actor/mouse.py",
"license": "MIT License",
"lines": 115,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/actor/page.py | """Page class for page-level operations."""
from typing import TYPE_CHECKING, TypeVar
from pydantic import BaseModel
from browser_use import logger
from browser_use.actor.utils import get_key_info
from browser_use.dom.serializer.serializer import DOMTreeSerializer
from browser_use.dom.service import DomService
from browser_use.llm.messages import SystemMessage, UserMessage
T = TypeVar('T', bound=BaseModel)
if TYPE_CHECKING:
from cdp_use.cdp.dom.commands import (
DescribeNodeParameters,
QuerySelectorAllParameters,
)
from cdp_use.cdp.emulation.commands import SetDeviceMetricsOverrideParameters
from cdp_use.cdp.input.commands import (
DispatchKeyEventParameters,
)
from cdp_use.cdp.page.commands import CaptureScreenshotParameters, NavigateParameters, NavigateToHistoryEntryParameters
from cdp_use.cdp.runtime.commands import EvaluateParameters
from cdp_use.cdp.target.commands import (
AttachToTargetParameters,
GetTargetInfoParameters,
)
from cdp_use.cdp.target.types import TargetInfo
from browser_use.browser.session import BrowserSession
from browser_use.llm.base import BaseChatModel
from .element import Element
from .mouse import Mouse
class Page:
"""Page operations (tab or iframe)."""
def __init__(
self, browser_session: 'BrowserSession', target_id: str, session_id: str | None = None, llm: 'BaseChatModel | None' = None
):
self._browser_session = browser_session
self._client = browser_session.cdp_client
self._target_id = target_id
self._session_id: str | None = session_id
self._mouse: 'Mouse | None' = None
self._llm = llm
async def _ensure_session(self) -> str:
"""Ensure we have a session ID for this target."""
if not self._session_id:
params: 'AttachToTargetParameters' = {'targetId': self._target_id, 'flatten': True}
result = await self._client.send.Target.attachToTarget(params)
self._session_id = result['sessionId']
# Enable necessary domains
import asyncio
await asyncio.gather(
self._client.send.Page.enable(session_id=self._session_id),
self._client.send.DOM.enable(session_id=self._session_id),
self._client.send.Runtime.enable(session_id=self._session_id),
self._client.send.Network.enable(session_id=self._session_id),
)
return self._session_id
@property
async def session_id(self) -> str:
"""Get the session ID for this target.
@dev Pass this to an arbitrary CDP call
"""
return await self._ensure_session()
@property
async def mouse(self) -> 'Mouse':
"""Get the mouse interface for this target."""
if not self._mouse:
session_id = await self._ensure_session()
from .mouse import Mouse
self._mouse = Mouse(self._browser_session, session_id, self._target_id)
return self._mouse
async def reload(self) -> None:
"""Reload the target."""
session_id = await self._ensure_session()
await self._client.send.Page.reload(session_id=session_id)
async def get_element(self, backend_node_id: int) -> 'Element':
"""Get an element by its backend node ID."""
session_id = await self._ensure_session()
from .element import Element as Element_
return Element_(self._browser_session, backend_node_id, session_id)
async def evaluate(self, page_function: str, *args) -> str:
"""Execute JavaScript in the target.
Args:
page_function: JavaScript code that MUST start with (...args) => format
*args: Arguments to pass to the function
Returns:
String representation of the JavaScript execution result.
Objects and arrays are JSON-stringified.
"""
session_id = await self._ensure_session()
# Clean and fix common JavaScript string parsing issues
page_function = self._fix_javascript_string(page_function)
# Enforce arrow function format
if not (page_function.startswith('(') and '=>' in page_function):
raise ValueError(f'JavaScript code must start with (...args) => format. Got: {page_function[:50]}...')
# Build the expression - call the arrow function with provided args
if args:
# Convert args to JSON representation for safe passing
import json
arg_strs = [json.dumps(arg) for arg in args]
expression = f'({page_function})({", ".join(arg_strs)})'
else:
expression = f'({page_function})()'
# Debug: log the actual expression being evaluated
logger.debug(f'Evaluating JavaScript: {repr(expression)}')
params: 'EvaluateParameters' = {'expression': expression, 'returnByValue': True, 'awaitPromise': True}
result = await self._client.send.Runtime.evaluate(
params,
session_id=session_id,
)
if 'exceptionDetails' in result:
raise RuntimeError(f'JavaScript evaluation failed: {result["exceptionDetails"]}')
value = result.get('result', {}).get('value')
# Always return string representation
if value is None:
return ''
elif isinstance(value, str):
return value
else:
# Convert objects, numbers, booleans to string
import json
try:
return json.dumps(value) if isinstance(value, (dict, list)) else str(value)
except (TypeError, ValueError):
return str(value)
def _fix_javascript_string(self, js_code: str) -> str:
"""Fix common JavaScript string parsing issues when written as Python string."""
# Just do minimal, safe cleaning
js_code = js_code.strip()
# Only fix the most common and safe issues:
# 1. Remove obvious Python string wrapper quotes if they exist
if (js_code.startswith('"') and js_code.endswith('"')) or (js_code.startswith("'") and js_code.endswith("'")):
# Check if it's a wrapped string (not part of JS syntax)
inner = js_code[1:-1]
if inner.count('"') + inner.count("'") == 0 or '() =>' in inner:
js_code = inner
# 2. Only fix clearly escaped quotes that shouldn't be
# But be very conservative - only if we're sure it's a Python string artifact
if '\\"' in js_code and js_code.count('\\"') > js_code.count('"'):
js_code = js_code.replace('\\"', '"')
if "\\'" in js_code and js_code.count("\\'") > js_code.count("'"):
js_code = js_code.replace("\\'", "'")
# 3. Basic whitespace normalization only
js_code = js_code.strip()
# Final validation - ensure it's not empty
if not js_code:
raise ValueError('JavaScript code is empty after cleaning')
return js_code
async def screenshot(self, format: str = 'png', quality: int | None = None) -> str:
"""Take a screenshot and return base64 encoded image.
Args:
format: Image format ('jpeg', 'png', 'webp')
quality: Quality 0-100 for JPEG format
Returns:
Base64-encoded image data
"""
session_id = await self._ensure_session()
params: 'CaptureScreenshotParameters' = {'format': format}
if quality is not None and format.lower() == 'jpeg':
params['quality'] = quality
result = await self._client.send.Page.captureScreenshot(params, session_id=session_id)
return result['data']
async def press(self, key: str) -> None:
"""Press a key on the page (sends keyboard input to the focused element or page)."""
session_id = await self._ensure_session()
# Handle key combinations like "Control+A"
if '+' in key:
parts = key.split('+')
modifiers = parts[:-1]
main_key = parts[-1]
# Calculate modifier bitmask
modifier_value = 0
modifier_map = {'Alt': 1, 'Control': 2, 'Meta': 4, 'Shift': 8}
for mod in modifiers:
modifier_value |= modifier_map.get(mod, 0)
# Press modifier keys
for mod in modifiers:
code, vk_code = get_key_info(mod)
params: 'DispatchKeyEventParameters' = {'type': 'keyDown', 'key': mod, 'code': code}
if vk_code is not None:
params['windowsVirtualKeyCode'] = vk_code
await self._client.send.Input.dispatchKeyEvent(params, session_id=session_id)
# Press main key with modifiers bitmask
main_code, main_vk_code = get_key_info(main_key)
main_down_params: 'DispatchKeyEventParameters' = {
'type': 'keyDown',
'key': main_key,
'code': main_code,
'modifiers': modifier_value,
}
if main_vk_code is not None:
main_down_params['windowsVirtualKeyCode'] = main_vk_code
await self._client.send.Input.dispatchKeyEvent(main_down_params, session_id=session_id)
main_up_params: 'DispatchKeyEventParameters' = {
'type': 'keyUp',
'key': main_key,
'code': main_code,
'modifiers': modifier_value,
}
if main_vk_code is not None:
main_up_params['windowsVirtualKeyCode'] = main_vk_code
await self._client.send.Input.dispatchKeyEvent(main_up_params, session_id=session_id)
# Release modifier keys
for mod in reversed(modifiers):
code, vk_code = get_key_info(mod)
release_params: 'DispatchKeyEventParameters' = {'type': 'keyUp', 'key': mod, 'code': code}
if vk_code is not None:
release_params['windowsVirtualKeyCode'] = vk_code
await self._client.send.Input.dispatchKeyEvent(release_params, session_id=session_id)
else:
# Simple key press
code, vk_code = get_key_info(key)
key_down_params: 'DispatchKeyEventParameters' = {'type': 'keyDown', 'key': key, 'code': code}
if vk_code is not None:
key_down_params['windowsVirtualKeyCode'] = vk_code
await self._client.send.Input.dispatchKeyEvent(key_down_params, session_id=session_id)
key_up_params: 'DispatchKeyEventParameters' = {'type': 'keyUp', 'key': key, 'code': code}
if vk_code is not None:
key_up_params['windowsVirtualKeyCode'] = vk_code
await self._client.send.Input.dispatchKeyEvent(key_up_params, session_id=session_id)
async def set_viewport_size(self, width: int, height: int) -> None:
"""Set the viewport size."""
session_id = await self._ensure_session()
params: 'SetDeviceMetricsOverrideParameters' = {
'width': width,
'height': height,
'deviceScaleFactor': 1.0,
'mobile': False,
}
await self._client.send.Emulation.setDeviceMetricsOverride(
params,
session_id=session_id,
)
# Target properties (from CDP getTargetInfo)
async def get_target_info(self) -> 'TargetInfo':
"""Get target information."""
params: 'GetTargetInfoParameters' = {'targetId': self._target_id}
result = await self._client.send.Target.getTargetInfo(params)
return result['targetInfo']
async def get_url(self) -> str:
"""Get the current URL."""
info = await self.get_target_info()
return info.get('url', '')
async def get_title(self) -> str:
"""Get the current title."""
info = await self.get_target_info()
return info.get('title', '')
async def goto(self, url: str) -> None:
"""Navigate this target to a URL."""
session_id = await self._ensure_session()
params: 'NavigateParameters' = {'url': url}
await self._client.send.Page.navigate(params, session_id=session_id)
async def navigate(self, url: str) -> None:
"""Alias for goto."""
await self.goto(url)
async def go_back(self) -> None:
"""Navigate back in history."""
session_id = await self._ensure_session()
try:
# Get navigation history
history = await self._client.send.Page.getNavigationHistory(session_id=session_id)
current_index = history['currentIndex']
entries = history['entries']
# Check if we can go back
if current_index <= 0:
raise RuntimeError('Cannot go back - no previous entry in history')
# Navigate to the previous entry
previous_entry_id = entries[current_index - 1]['id']
params: 'NavigateToHistoryEntryParameters' = {'entryId': previous_entry_id}
await self._client.send.Page.navigateToHistoryEntry(params, session_id=session_id)
except Exception as e:
raise RuntimeError(f'Failed to navigate back: {e}')
async def go_forward(self) -> None:
"""Navigate forward in history."""
session_id = await self._ensure_session()
try:
# Get navigation history
history = await self._client.send.Page.getNavigationHistory(session_id=session_id)
current_index = history['currentIndex']
entries = history['entries']
# Check if we can go forward
if current_index >= len(entries) - 1:
raise RuntimeError('Cannot go forward - no next entry in history')
# Navigate to the next entry
next_entry_id = entries[current_index + 1]['id']
params: 'NavigateToHistoryEntryParameters' = {'entryId': next_entry_id}
await self._client.send.Page.navigateToHistoryEntry(params, session_id=session_id)
except Exception as e:
raise RuntimeError(f'Failed to navigate forward: {e}')
# Element finding methods (these would need to be implemented based on DOM queries)
async def get_elements_by_css_selector(self, selector: str) -> list['Element']:
"""Get elements by CSS selector."""
session_id = await self._ensure_session()
# Get document first
doc_result = await self._client.send.DOM.getDocument(session_id=session_id)
document_node_id = doc_result['root']['nodeId']
# Query selector all
query_params: 'QuerySelectorAllParameters' = {'nodeId': document_node_id, 'selector': selector}
result = await self._client.send.DOM.querySelectorAll(query_params, session_id=session_id)
elements = []
from .element import Element as Element_
# Convert node IDs to backend node IDs
for node_id in result['nodeIds']:
# Get backend node ID
describe_params: 'DescribeNodeParameters' = {'nodeId': node_id}
node_result = await self._client.send.DOM.describeNode(describe_params, session_id=session_id)
backend_node_id = node_result['node']['backendNodeId']
elements.append(Element_(self._browser_session, backend_node_id, session_id))
return elements
# AI METHODS
@property
def dom_service(self) -> 'DomService':
"""Get the DOM service for this target."""
return DomService(self._browser_session)
async def get_element_by_prompt(self, prompt: str, llm: 'BaseChatModel | None' = None) -> 'Element | None':
"""Get an element by a prompt."""
await self._ensure_session()
llm = llm or self._llm
if not llm:
raise ValueError('LLM not provided')
dom_service = self.dom_service
# Lazy fetch all_frames inside get_dom_tree if needed (for cross-origin iframes)
enhanced_dom_tree, _ = await dom_service.get_dom_tree(target_id=self._target_id, all_frames=None)
session_id = self._browser_session.id
serialized_dom_state, _ = DOMTreeSerializer(
enhanced_dom_tree, None, paint_order_filtering=True, session_id=session_id
).serialize_accessible_elements()
llm_representation = serialized_dom_state.llm_representation()
system_message = SystemMessage(
content="""You are an AI created to find an element on a page by a prompt.
<browser_state>
Interactive Elements: All interactive elements will be provided in format as [index]<type>text</type> where
- index: Numeric identifier for interaction
- type: HTML element type (button, input, etc.)
- text: Element description
Examples:
[33]<div>User form</div>
[35]<button aria-label='Submit form'>Submit</button>
Note that:
- Only elements with numeric indexes in [] are interactive
- (stacked) indentation (with \t) is important and means that the element is a (html) child of the element above (with a lower index)
- Pure text elements without [] are not interactive.
</browser_state>
Your task is to find an element index (if any) that matches the prompt (written in <prompt> tag).
If non of the elements matches the, return None.
Before you return the element index, reason about the state and elements for a sentence or two."""
)
state_message = UserMessage(
content=f"""
<browser_state>
{llm_representation}
</browser_state>
<prompt>
{prompt}
</prompt>
"""
)
class ElementResponse(BaseModel):
# thinking: str
element_highlight_index: int | None
llm_response = await llm.ainvoke(
[
system_message,
state_message,
],
output_format=ElementResponse,
)
element_highlight_index = llm_response.completion.element_highlight_index
if element_highlight_index is None or element_highlight_index not in serialized_dom_state.selector_map:
return None
element = serialized_dom_state.selector_map[element_highlight_index]
from .element import Element as Element_
return Element_(self._browser_session, element.backend_node_id, self._session_id)
async def must_get_element_by_prompt(self, prompt: str, llm: 'BaseChatModel | None' = None) -> 'Element':
"""Get an element by a prompt.
@dev LLM can still return None, this just raises an error if the element is not found.
"""
element = await self.get_element_by_prompt(prompt, llm)
if element is None:
raise ValueError(f'No element found for prompt: {prompt}')
return element
async def extract_content(self, prompt: str, structured_output: type[T], llm: 'BaseChatModel | None' = None) -> T:
"""Extract structured content from the current page using LLM.
Extracts clean markdown from the page and sends it to LLM for structured data extraction.
Args:
prompt: Description of what content to extract
structured_output: Pydantic BaseModel class defining the expected output structure
llm: Language model to use for extraction
Returns:
The structured BaseModel instance with extracted content
"""
llm = llm or self._llm
if not llm:
raise ValueError('LLM not provided')
# Extract clean markdown using the same method as in tools/service.py
try:
content, content_stats = await self._extract_clean_markdown()
except Exception as e:
raise RuntimeError(f'Could not extract clean markdown: {type(e).__name__}')
# System prompt for structured extraction
system_prompt = """
You are an expert at extracting structured data from the markdown of a webpage.
<input>
You will be given a query and the markdown of a webpage that has been filtered to remove noise and advertising content.
</input>
<instructions>
- You are tasked to extract information from the webpage that is relevant to the query.
- You should ONLY use the information available in the webpage to answer the query. Do not make up information or provide guess from your own knowledge.
- If the information relevant to the query is not available in the page, your response should mention that.
- If the query asks for all items, products, etc., make sure to directly list all of them.
- Return the extracted content in the exact structured format specified.
</instructions>
<output>
- Your output should present ALL the information relevant to the query in the specified structured format.
- Do not answer in conversational format - directly output the relevant information in the structured format.
</output>
""".strip()
# Build prompt with just query and content
prompt_content = f'<query>\n{prompt}\n</query>\n\n<webpage_content>\n{content}\n</webpage_content>'
# Send to LLM with structured output
import asyncio
try:
response = await asyncio.wait_for(
llm.ainvoke(
[SystemMessage(content=system_prompt), UserMessage(content=prompt_content)], output_format=structured_output
),
timeout=120.0,
)
# Return the structured output BaseModel instance
return response.completion
except Exception as e:
raise RuntimeError(str(e))
async def _extract_clean_markdown(self, extract_links: bool = False) -> tuple[str, dict]:
"""Extract clean markdown from the current page using enhanced DOM tree.
Uses the shared markdown extractor for consistency with tools/service.py.
"""
from browser_use.dom.markdown_extractor import extract_clean_markdown
dom_service = self.dom_service
return await extract_clean_markdown(dom_service=dom_service, target_id=self._target_id, extract_links=extract_links)
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/actor/page.py",
"license": "MIT License",
"lines": 437,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/actor/playground/flights.py | import asyncio
from browser_use import Agent, Browser, ChatOpenAI
llm = ChatOpenAI('gpt-4.1-mini')
async def main():
"""
Main function demonstrating mixed automation with Browser-Use and Playwright.
"""
print('🚀 Mixed Automation with Browser-Use and Actor API')
browser = Browser(keep_alive=True)
await browser.start()
page = await browser.get_current_page() or await browser.new_page()
# Go to apple wikipedia page
await page.goto('https://www.google.com/travel/flights')
await asyncio.sleep(1)
round_trip_button = await page.must_get_element_by_prompt('round trip button', llm)
await round_trip_button.click()
one_way_button = await page.must_get_element_by_prompt('one way button', llm)
await one_way_button.click()
await asyncio.sleep(1)
agent = Agent(task='Find the cheapest flight from London to Paris on 2025-10-15', llm=llm, browser_session=browser)
await agent.run()
input('Press Enter to continue...')
await browser.stop()
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/actor/playground/flights.py",
"license": "MIT License",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:browser_use/actor/playground/mixed_automation.py | import asyncio
from pydantic import BaseModel
from browser_use import Browser, ChatOpenAI
TASK = """
On the current wikipedia page, find the latest huge edit and tell me what is was about.
"""
class LatestEditFinder(BaseModel):
"""Find the latest huge edit on the current wikipedia page."""
latest_edit: str
edit_time: str
edit_author: str
edit_summary: str
edit_url: str
llm = ChatOpenAI('gpt-4.1-mini')
async def main():
"""
Main function demonstrating mixed automation with Browser-Use and Playwright.
"""
print('🚀 Mixed Automation with Browser-Use and Actor API')
browser = Browser(keep_alive=True)
await browser.start()
page = await browser.get_current_page() or await browser.new_page()
# Go to apple wikipedia page
await page.goto('https://browser-use.github.io/stress-tests/challenges/angularjs-form.html')
await asyncio.sleep(1)
element = await page.get_element_by_prompt('zip code input', llm)
print('Element found', element)
if element:
await element.click()
else:
print('No element found')
await browser.stop()
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/actor/playground/mixed_automation.py",
"license": "MIT License",
"lines": 34,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:browser_use/actor/playground/playground.py | #!/usr/bin/env python3
"""
Playground script to test the browser-use actor API.
This script demonstrates:
- Starting a browser session
- Using the actor API to navigate and interact
- Finding elements, clicking, scrolling, JavaScript evaluation
- Testing most of the available methods
"""
import asyncio
import json
import logging
from browser_use import Browser
# Configure logging to see what's happening
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
async def main():
"""Main playground function."""
logger.info('🚀 Starting browser actor playground')
# Create browser session
browser = Browser()
try:
# Start the browser
await browser.start()
logger.info('✅ Browser session started')
# Navigate to Wikipedia using integrated methods
logger.info('📖 Navigating to Wikipedia...')
page = await browser.new_page('https://en.wikipedia.org')
# Get basic page info
url = await page.get_url()
title = await page.get_title()
logger.info(f'📄 Page loaded: {title} ({url})')
# Take a screenshot
logger.info('📸 Taking initial screenshot...')
screenshot_b64 = await page.screenshot()
logger.info(f'📸 Screenshot captured: {len(screenshot_b64)} bytes')
# Set viewport size
logger.info('🖥️ Setting viewport to 1920x1080...')
await page.set_viewport_size(1920, 1080)
# Execute some JavaScript to count links
logger.info('🔍 Counting article links using JavaScript...')
js_code = """() => {
// Find all article links on the page
const links = Array.from(document.querySelectorAll('a[href*="/wiki/"]:not([href*=":"])'))
.filter(link => !link.href.includes('Main_Page') && !link.href.includes('Special:'));
return {
total: links.length,
sample: links.slice(0, 3).map(link => ({
href: link.href,
text: link.textContent.trim()
}))
};
}"""
link_info = json.loads(await page.evaluate(js_code))
logger.info(f'🔗 Found {link_info["total"]} article links')
# Try to find and interact with links using CSS selector
try:
# Find article links on the page
links = await page.get_elements_by_css_selector('a[href*="/wiki/"]:not([href*=":"])')
if links:
logger.info(f'📋 Found {len(links)} wiki links via CSS selector')
# Pick the first link
link_element = links[0]
# Get link info using available methods
basic_info = await link_element.get_basic_info()
link_href = await link_element.get_attribute('href')
logger.info(f'🎯 Selected element: <{basic_info["nodeName"]}>')
logger.info(f'🔗 Link href: {link_href}')
if basic_info['boundingBox']:
bbox = basic_info['boundingBox']
logger.info(f'📏 Position: ({bbox["x"]}, {bbox["y"]}) Size: {bbox["width"]}x{bbox["height"]}')
# Test element interactions with robust implementations
logger.info('👆 Hovering over the element...')
await link_element.hover()
await asyncio.sleep(1)
logger.info('🔍 Focusing the element...')
await link_element.focus()
await asyncio.sleep(0.5)
# Click the link using robust click method
logger.info('🖱️ Clicking the link with robust fallbacks...')
await link_element.click()
# Wait for navigation
await asyncio.sleep(3)
# Get new page info
new_url = await page.get_url()
new_title = await page.get_title()
logger.info(f'📄 Navigated to: {new_title}')
logger.info(f'🌐 New URL: {new_url}')
else:
logger.warning('❌ No links found to interact with')
except Exception as e:
logger.warning(f'⚠️ Link interaction failed: {e}')
# Scroll down the page
logger.info('📜 Scrolling down the page...')
mouse = await page.mouse
await mouse.scroll(x=0, y=100, delta_y=500)
await asyncio.sleep(1)
# Test mouse operations
logger.info('🖱️ Testing mouse operations...')
await mouse.move(x=100, y=200)
await mouse.click(x=150, y=250)
# Execute more JavaScript examples
logger.info('🧪 Testing JavaScript evaluation...')
# Simple expressions
page_height = await page.evaluate('() => document.body.scrollHeight')
current_scroll = await page.evaluate('() => window.pageYOffset')
logger.info(f'📏 Page height: {page_height}px, current scroll: {current_scroll}px')
# JavaScript with arguments
result = await page.evaluate('(x) => x * 2', 21)
logger.info(f'🧮 JavaScript with args: 21 * 2 = {result}')
# More complex JavaScript
page_stats = json.loads(
await page.evaluate("""() => {
return {
url: window.location.href,
title: document.title,
links: document.querySelectorAll('a').length,
images: document.querySelectorAll('img').length,
scrollTop: window.pageYOffset,
viewportHeight: window.innerHeight
};
}""")
)
logger.info(f'📊 Page stats: {page_stats}')
# Get page title using different methods
title_via_js = await page.evaluate('() => document.title')
title_via_api = await page.get_title()
logger.info(f'📝 Title via JS: "{title_via_js}"')
logger.info(f'📝 Title via API: "{title_via_api}"')
# Take a final screenshot
logger.info('📸 Taking final screenshot...')
final_screenshot = await page.screenshot()
logger.info(f'📸 Final screenshot: {len(final_screenshot)} bytes')
# Test browser navigation with error handling
logger.info('⬅️ Testing browser back navigation...')
try:
await page.go_back()
await asyncio.sleep(2)
back_url = await page.get_url()
back_title = await page.get_title()
logger.info(f'📄 After going back: {back_title}')
logger.info(f'🌐 Back URL: {back_url}')
except RuntimeError as e:
logger.info(f'ℹ️ Navigation back failed as expected: {e}')
# Test creating new page
logger.info('🆕 Creating new blank page...')
new_page = await browser.new_page()
new_page_url = await new_page.get_url()
logger.info(f'🆕 New page created with URL: {new_page_url}')
# Get all pages
all_pages = await browser.get_pages()
logger.info(f'📑 Total pages: {len(all_pages)}')
# Test form interaction if we can find a form
try:
# Look for search input on the page
search_inputs = await page.get_elements_by_css_selector('input[type="search"], input[name*="search"]')
if search_inputs:
search_input = search_inputs[0]
logger.info('🔍 Found search input, testing form interaction...')
await search_input.focus()
await search_input.fill('test search query')
await page.press('Enter')
logger.info('✅ Form interaction test completed')
else:
logger.info('ℹ️ No search inputs found for form testing')
except Exception as e:
logger.info(f'ℹ️ Form interaction test skipped: {e}')
# wait 2 seconds before closing the new page
logger.info('🕒 Waiting 2 seconds before closing the new page...')
await asyncio.sleep(2)
logger.info('🗑️ Closing new page...')
await browser.close_page(new_page)
logger.info('✅ Playground completed successfully!')
input('Press Enter to continue...')
except Exception as e:
logger.error(f'❌ Error in playground: {e}', exc_info=True)
finally:
# Clean up
logger.info('🧹 Cleaning up...')
try:
await browser.stop()
logger.info('✅ Browser session stopped')
except Exception as e:
logger.error(f'❌ Error stopping browser: {e}')
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/actor/playground/playground.py",
"license": "MIT License",
"lines": 186,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:examples/apps/ad-use/ad_generator.py | import argparse
import asyncio
import logging
import os
import subprocess
import sys
from datetime import datetime
from pathlib import Path
from browser_use.utils import create_task_with_error_handling
def setup_environment(debug: bool):
if not debug:
os.environ['BROWSER_USE_SETUP_LOGGING'] = 'false'
os.environ['BROWSER_USE_LOGGING_LEVEL'] = 'critical'
logging.getLogger().setLevel(logging.CRITICAL)
else:
os.environ['BROWSER_USE_SETUP_LOGGING'] = 'true'
os.environ['BROWSER_USE_LOGGING_LEVEL'] = 'info'
parser = argparse.ArgumentParser(description='Generate ads from landing pages using browser-use + 🍌')
parser.add_argument('--url', nargs='?', help='Landing page URL to analyze')
parser.add_argument('--debug', action='store_true', default=False, help='Enable debug mode (show browser, verbose logs)')
parser.add_argument('--count', type=int, default=1, help='Number of ads to generate in parallel (default: 1)')
group = parser.add_mutually_exclusive_group()
group.add_argument('--instagram', action='store_true', default=False, help='Generate Instagram image ad (default)')
group.add_argument('--tiktok', action='store_true', default=False, help='Generate TikTok video ad using Veo3')
args = parser.parse_args()
if not args.instagram and not args.tiktok:
args.instagram = True
setup_environment(args.debug)
from typing import Any, cast
import aiofiles
from google import genai
from PIL import Image
from browser_use import Agent, BrowserSession
from browser_use.llm.google import ChatGoogle
GOOGLE_API_KEY = os.getenv('GOOGLE_API_KEY')
class LandingPageAnalyzer:
def __init__(self, debug: bool = False):
self.debug = debug
self.llm = ChatGoogle(model='gemini-2.0-flash-exp', api_key=GOOGLE_API_KEY)
self.output_dir = Path('output')
self.output_dir.mkdir(exist_ok=True)
async def analyze_landing_page(self, url: str, mode: str = 'instagram') -> dict:
browser_session = BrowserSession(
headless=not self.debug,
)
agent = Agent(
task=f"""Go to {url} and quickly extract key brand information for Instagram ad creation.
Steps:
1. Navigate to the website
2. From the initial view, extract ONLY these essentials:
- Brand/Product name
- Main tagline or value proposition (one sentence)
- Primary call-to-action text
- Any visible pricing or special offer
3. Scroll down half a page, twice (0.5 pages each) to check for any key info
4. Done - keep it simple and focused on the brand
Return ONLY the key brand info, not page structure details.""",
llm=self.llm,
browser_session=browser_session,
max_actions_per_step=2,
step_timeout=30,
use_thinking=False,
vision_detail_level='high',
)
screenshot_path = None
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
async def screenshot_callback(agent_instance):
nonlocal screenshot_path
await asyncio.sleep(4)
screenshot_path = self.output_dir / f'landing_page_{timestamp}.png'
await agent_instance.browser_session.take_screenshot(path=str(screenshot_path), full_page=False)
screenshot_task = create_task_with_error_handling(
screenshot_callback(agent), name='screenshot_callback', suppress_exceptions=True
)
history = await agent.run()
try:
await screenshot_task
except Exception as e:
print(f'Screenshot task failed: {e}')
analysis = history.final_result() or 'No analysis content extracted'
return {'url': url, 'analysis': analysis, 'screenshot_path': screenshot_path, 'timestamp': timestamp}
class AdGenerator:
def __init__(self, api_key: str | None = GOOGLE_API_KEY, mode: str = 'instagram'):
if not api_key:
raise ValueError('GOOGLE_API_KEY is missing or empty – set the environment variable or pass api_key explicitly')
self.client = genai.Client(api_key=api_key)
self.output_dir = Path('output')
self.output_dir.mkdir(exist_ok=True)
self.mode = mode
async def create_video_concept(self, browser_analysis: str, ad_id: int) -> str:
"""Generate a unique creative concept for each video ad"""
if self.mode != 'tiktok':
return ''
concept_prompt = f"""Based on this brand analysis:
{browser_analysis}
Create a UNIQUE and SPECIFIC TikTok video concept #{ad_id}.
Be creative and different! Consider various approaches like:
- Different visual metaphors and storytelling angles
- Various trending TikTok formats (transitions, reveals, transformations)
- Different emotional appeals (funny, inspiring, surprising, relatable)
- Unique visual styles (neon, retro, minimalist, maximalist, surreal)
- Different perspectives (first-person, aerial, macro, time-lapse)
Return a 2-3 sentence description of a specific, unique video concept that would work for this brand.
Make it visually interesting and different from typical ads. Be specific about visual elements, transitions, and mood."""
response = self.client.models.generate_content(model='gemini-2.0-flash-exp', contents=concept_prompt)
return response.text if response and response.text else ''
def create_ad_prompt(self, browser_analysis: str, video_concept: str = '') -> str:
if self.mode == 'instagram':
prompt = f"""Create an Instagram ad for this brand:
{browser_analysis}
Create a vibrant, eye-catching Instagram ad image with:
- Try to use the colors and style of the logo or brand, else:
- Bold, modern gradient background with bright colors
- Large, playful sans-serif text with the product/service name from the analysis
- Trendy design elements: geometric shapes, sparkles, emojis
- Fun bubbles or badges for any pricing or special offers mentioned
- Call-to-action button with text from the analysis
- Emphasizes the key value proposition from the analysis
- Uses visual elements that match the brand personality
- Square format (1:1 ratio)
- Use color psychology to drive action
Style: Modern Instagram advertisement, (1:1), scroll-stopping, professional but playful, conversion-focused"""
else: # tiktok
if video_concept:
prompt = f"""Create a TikTok video ad based on this specific concept:
{video_concept}
Brand context: {browser_analysis}
Requirements:
- Vertical 9:16 format
- High quality, professional execution
- Bring the concept to life exactly as described
- No text overlays, pure visual storytelling"""
else:
prompt = f"""Create a viral TikTok video ad for this brand:
{browser_analysis}
Create a dynamic, engaging vertical video with:
- Quick hook opening that grabs attention immediately
- Minimal text overlays (focus on visual storytelling)
- Fast-paced but not overwhelming editing
- Authentic, relatable energy that appeals to Gen Z
- Vertical 9:16 format optimized for mobile
- High energy but professional execution
Style: Modern TikTok advertisement, viral potential, authentic energy, minimal text, maximum visual impact"""
return prompt
async def generate_ad_image(self, prompt: str, screenshot_path: Path | None = None) -> bytes | None:
"""Generate ad image bytes using Gemini. Returns None on failure."""
try:
from typing import Any
contents: list[Any] = [prompt]
if screenshot_path and screenshot_path.exists():
img = Image.open(screenshot_path)
w, h = img.size
side = min(w, h)
img = img.crop(((w - side) // 2, (h - side) // 2, (w + side) // 2, (h + side) // 2))
contents = [prompt + '\n\nHere is the actual landing page screenshot to reference for design inspiration:', img]
response = await self.client.aio.models.generate_content(
model='gemini-2.5-flash-image-preview',
contents=contents,
)
cand = getattr(response, 'candidates', None)
if cand:
for part in getattr(cand[0].content, 'parts', []):
inline = getattr(part, 'inline_data', None)
if inline:
return inline.data
except Exception as e:
print(f'❌ Image generation failed: {e}')
return None
async def generate_ad_video(self, prompt: str, screenshot_path: Path | None = None, ad_id: int = 1) -> bytes:
"""Generate ad video using Veo3."""
sync_client = genai.Client(api_key=GOOGLE_API_KEY)
# Commented out image input for now - it was using the screenshot as first frame
# if screenshot_path and screenshot_path.exists():
# import base64
# import io
# img = Image.open(screenshot_path)
# img_buffer = io.BytesIO()
# img.save(img_buffer, format='PNG')
# img_bytes = img_buffer.getvalue()
# operation = sync_client.models.generate_videos(
# model='veo-3.0-generate-001',
# prompt=prompt,
# image=cast(Any, {
# 'imageBytes': base64.b64encode(img_bytes).decode('utf-8'),
# 'mimeType': 'image/png'
# }),
# config=cast(Any, {'aspectRatio': '9:16', 'resolution': '720p'}),
# )
# else:
operation = sync_client.models.generate_videos(
model='veo-3.0-generate-001',
prompt=prompt,
config=cast(Any, {'aspectRatio': '9:16', 'resolution': '720p'}),
)
while not operation.done:
await asyncio.sleep(10)
operation = sync_client.operations.get(operation)
if not operation.response or not operation.response.generated_videos:
raise RuntimeError('No videos generated')
videos = operation.response.generated_videos
video = videos[0]
video_file = getattr(video, 'video', None)
if not video_file:
raise RuntimeError('No video file in response')
sync_client.files.download(file=video_file)
video_bytes = getattr(video_file, 'video_bytes', None)
if not video_bytes:
raise RuntimeError('No video bytes in response')
return video_bytes
async def save_results(self, ad_content: bytes, prompt: str, analysis: str, url: str, timestamp: str) -> str:
if self.mode == 'instagram':
content_path = self.output_dir / f'ad_{timestamp}.png'
else: # tiktok
content_path = self.output_dir / f'ad_{timestamp}.mp4'
async with aiofiles.open(content_path, 'wb') as f:
await f.write(ad_content)
analysis_path = self.output_dir / f'analysis_{timestamp}.txt'
async with aiofiles.open(analysis_path, 'w', encoding='utf-8') as f:
await f.write(f'URL: {url}\n\n')
await f.write('BROWSER-USE ANALYSIS:\n')
await f.write(analysis)
await f.write('\n\nGENERATED PROMPT:\n')
await f.write(prompt)
return str(content_path)
def open_file(file_path: str):
"""Open file with default system viewer"""
try:
if sys.platform.startswith('darwin'):
subprocess.run(['open', file_path], check=True)
elif sys.platform.startswith('win'):
subprocess.run(['cmd', '/c', 'start', '', file_path], check=True)
else:
subprocess.run(['xdg-open', file_path], check=True)
except Exception as e:
print(f'❌ Could not open file: {e}')
async def create_ad_from_landing_page(url: str, debug: bool = False, mode: str = 'instagram', ad_id: int = 1):
analyzer = LandingPageAnalyzer(debug=debug)
try:
if ad_id == 1:
print(f'🚀 Analyzing {url} for {mode.capitalize()} ad...')
page_data = await analyzer.analyze_landing_page(url, mode=mode)
else:
analyzer_temp = LandingPageAnalyzer(debug=debug)
page_data = await analyzer_temp.analyze_landing_page(url, mode=mode)
generator = AdGenerator(mode=mode)
if mode == 'instagram':
prompt = generator.create_ad_prompt(page_data['analysis'])
ad_content = await generator.generate_ad_image(prompt, page_data.get('screenshot_path'))
if ad_content is None:
raise RuntimeError(f'Ad image generation failed for ad #{ad_id}')
else: # tiktok
video_concept = await generator.create_video_concept(page_data['analysis'], ad_id)
prompt = generator.create_ad_prompt(page_data['analysis'], video_concept)
ad_content = await generator.generate_ad_video(prompt, page_data.get('screenshot_path'), ad_id)
result_path = await generator.save_results(ad_content, prompt, page_data['analysis'], url, page_data['timestamp'])
if mode == 'instagram':
print(f'🎨 Generated image ad #{ad_id}: {result_path}')
else:
print(f'🎬 Generated video ad #{ad_id}: {result_path}')
open_file(result_path)
return result_path
except Exception as e:
print(f'❌ Error for ad #{ad_id}: {e}')
raise
finally:
if ad_id == 1 and page_data.get('screenshot_path'):
print(f'📸 Page screenshot: {page_data["screenshot_path"]}')
async def generate_single_ad(page_data: dict, mode: str, ad_id: int):
"""Generate a single ad using pre-analyzed page data"""
generator = AdGenerator(mode=mode)
try:
if mode == 'instagram':
prompt = generator.create_ad_prompt(page_data['analysis'])
ad_content = await generator.generate_ad_image(prompt, page_data.get('screenshot_path'))
if ad_content is None:
raise RuntimeError(f'Ad image generation failed for ad #{ad_id}')
else: # tiktok
video_concept = await generator.create_video_concept(page_data['analysis'], ad_id)
prompt = generator.create_ad_prompt(page_data['analysis'], video_concept)
ad_content = await generator.generate_ad_video(prompt, page_data.get('screenshot_path'), ad_id)
# Create unique timestamp for each ad
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S') + f'_{ad_id}'
result_path = await generator.save_results(ad_content, prompt, page_data['analysis'], page_data['url'], timestamp)
if mode == 'instagram':
print(f'🎨 Generated image ad #{ad_id}: {result_path}')
else:
print(f'🎬 Generated video ad #{ad_id}: {result_path}')
return result_path
except Exception as e:
print(f'❌ Error for ad #{ad_id}: {e}')
raise
async def create_multiple_ads(url: str, debug: bool = False, mode: str = 'instagram', count: int = 1):
"""Generate multiple ads in parallel using asyncio concurrency"""
if count == 1:
return await create_ad_from_landing_page(url, debug, mode, 1)
print(f'🚀 Analyzing {url} for {count} {mode} ads...')
analyzer = LandingPageAnalyzer(debug=debug)
page_data = await analyzer.analyze_landing_page(url, mode=mode)
print(f'🎯 Generating {count} {mode} ads in parallel...')
tasks = []
for i in range(count):
task = create_task_with_error_handling(generate_single_ad(page_data, mode, i + 1), name=f'generate_ad_{i + 1}')
tasks.append(task)
results = await asyncio.gather(*tasks, return_exceptions=True)
successful = []
failed = []
for i, result in enumerate(results):
if isinstance(result, Exception):
failed.append(i + 1)
else:
successful.append(result)
print(f'\n✅ Successfully generated {len(successful)}/{count} ads')
if failed:
print(f'❌ Failed ads: {failed}')
if page_data.get('screenshot_path'):
print(f'📸 Page screenshot: {page_data["screenshot_path"]}')
for ad_path in successful:
open_file(ad_path)
return successful
if __name__ == '__main__':
url = args.url
if not url:
url = input('🔗 Enter URL: ').strip() or 'https://www.apple.com/iphone-17-pro/'
if args.tiktok:
mode = 'tiktok'
else:
mode = 'instagram'
asyncio.run(create_multiple_ads(url, debug=args.debug, mode=mode, count=args.count))
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/apps/ad-use/ad_generator.py",
"license": "MIT License",
"lines": 328,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:examples/apps/msg-use/login.py | import asyncio
import os
from pathlib import Path
from browser_use import Agent, BrowserSession
from browser_use.llm.google import ChatGoogle
GOOGLE_API_KEY = os.getenv('GOOGLE_API_KEY')
# Browser profile directory for persistence (same as main script)
USER_DATA_DIR = Path.home() / '.config' / 'whatsapp_scheduler' / 'browser_profile'
USER_DATA_DIR.mkdir(parents=True, exist_ok=True)
# Storage state file for cookies
STORAGE_STATE_FILE = USER_DATA_DIR / 'storage_state.json'
async def login_to_whatsapp():
"""Open WhatsApp Web and wait for user to scan QR code"""
if not GOOGLE_API_KEY:
print('❌ Error: GOOGLE_API_KEY environment variable is required')
print("Please set it with: export GOOGLE_API_KEY='your-api-key-here'")
return
print('WhatsApp Login Setup')
print('=' * 50)
print(f'Browser profile directory: {USER_DATA_DIR}')
print(f'Storage state file: {STORAGE_STATE_FILE}')
print('=' * 50)
try:
llm = ChatGoogle(model='gemini-2.0-flash-exp', temperature=0.3, api_key=GOOGLE_API_KEY)
task = """
You are helping a user log into WhatsApp Web. Follow these steps:
1. Navigate to https://web.whatsapp.com
2. Wait for the page to load completely
3. If you see a QR code, tell the user to scan it with their phone
4. Wait patiently for the login to complete
5. Once you see the WhatsApp chat interface, confirm successful login
Take your time and be patient with page loads.
"""
print('\nOpening WhatsApp Web...')
print('Please scan the QR code when it appears.\n')
browser_session = BrowserSession(
headless=False, # Show browser
user_data_dir=str(USER_DATA_DIR), # Use persistent profile directory
storage_state=str(STORAGE_STATE_FILE) if STORAGE_STATE_FILE.exists() else None, # Use saved cookies/session
)
agent = Agent(task=task, llm=llm, browser_session=browser_session)
result = await agent.run()
print('\n✅ Login completed!')
print("Note: For now, you'll need to scan the QR code each time.")
print("We'll improve session persistence in a future update.")
print('\nPress Enter to close the browser...')
input()
except Exception as e:
print(f'\n❌ Error during login: {str(e)}')
print('Please try again.')
if __name__ == '__main__':
asyncio.run(login_to_whatsapp())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/apps/msg-use/login.py",
"license": "MIT License",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:examples/apps/msg-use/scheduler.py | #!/usr/bin/env python3
"""
WhatsApp Message Scheduler - Send scheduled messages via WhatsApp Web
"""
import argparse
import asyncio
import json
import logging
import os
import random
import re
from datetime import datetime, timedelta
from pathlib import Path
def setup_environment(debug: bool):
if not debug:
os.environ['BROWSER_USE_SETUP_LOGGING'] = 'false'
os.environ['BROWSER_USE_LOGGING_LEVEL'] = 'critical'
logging.getLogger().setLevel(logging.CRITICAL)
else:
os.environ['BROWSER_USE_SETUP_LOGGING'] = 'true'
os.environ['BROWSER_USE_LOGGING_LEVEL'] = 'info'
parser = argparse.ArgumentParser(description='WhatsApp Scheduler - Send scheduled messages via WhatsApp Web')
parser.add_argument('--debug', action='store_true', help='Debug mode: show browser and verbose logs')
parser.add_argument('--test', action='store_true', help='Test mode: show what messages would be sent without sending them')
parser.add_argument('--auto', action='store_true', help='Auto mode: respond to unread messages every 30 minutes')
args = parser.parse_args()
setup_environment(args.debug)
from browser_use import Agent, BrowserSession
from browser_use.llm.google import ChatGoogle
GOOGLE_API_KEY = os.getenv('GOOGLE_API_KEY') or os.getenv('GEMINI_API_KEY')
USER_DATA_DIR = Path.home() / '.config' / 'whatsapp_scheduler' / 'browser_profile'
USER_DATA_DIR.mkdir(parents=True, exist_ok=True)
STORAGE_STATE_FILE = USER_DATA_DIR / 'storage_state.json'
async def parse_messages():
"""Parse messages.txt and extract scheduling info"""
messages_file = Path('messages.txt')
if not messages_file.exists():
print('❌ messages.txt not found!')
return []
import aiofiles
async with aiofiles.open(messages_file) as f:
content = await f.read()
llm = ChatGoogle(model='gemini-2.0-flash-exp', temperature=0.1, api_key=GOOGLE_API_KEY)
now = datetime.now()
prompt = f"""
Parse these WhatsApp message instructions and extract:
1. Contact name (extract just the name, not descriptions)
2. Message content (what to send)
3. Date and time (when to send)
Current date/time: {now.strftime('%Y-%m-%d %H:%M')}
Today is: {now.strftime('%Y-%m-%d')}
Current time is: {now.strftime('%H:%M')}
Instructions:
{content}
Return ONLY a JSON array with format:
[{{"contact": "name", "message": "text", "datetime": "YYYY-MM-DD HH:MM"}}]
CRITICAL: Transform instructions into actual messages:
QUOTED TEXT → Use exactly as-is:
- Text in "quotes" becomes the exact message
UNQUOTED INSTRUCTIONS → Generate actual content:
- If it's an instruction to write something → write the actual thing
- If it's an instruction to tell someone something → write what to tell them
- If it's an instruction to remind someone → write the actual reminder
- For multi-line content like poems: use single line with spacing, not line breaks
DO NOT copy the instruction - create the actual message content!
Time Rules:
- If only time given (like "at 15:30"), use TODAY
- If no date specified, assume TODAY
- If no year given, use current year
- Default time is 9:00 if not specified
- Extract names from parentheses: "hinge date (Camila)" → "Camila"
- "tomorrow" means {(now + timedelta(days=1)).strftime('%Y-%m-%d')}
- "next tuesday" or similar means the next occurrence of that day
"""
from browser_use.llm.messages import UserMessage
response = await llm.ainvoke([UserMessage(content=prompt)])
response_text = response.completion if hasattr(response, 'completion') else str(response)
# Extract JSON
json_match = re.search(r'\[.*?\]', response_text, re.DOTALL)
if json_match:
try:
messages = json.loads(json_match.group())
for msg in messages:
if 'message' in msg:
msg['message'] = re.sub(r'\n+', ' • ', msg['message'])
msg['message'] = re.sub(r'\s+', ' ', msg['message']).strip()
return messages
except json.JSONDecodeError:
pass
return []
async def send_message(contact, message):
"""Send a WhatsApp message"""
print(f'\n📱 Sending to {contact}: {message}')
llm = ChatGoogle(model='gemini-2.0-flash-exp', temperature=0.3, api_key=GOOGLE_API_KEY)
task = f"""
Send WhatsApp message:
1. Go to https://web.whatsapp.com
2. Search for contact: {contact}
3. Click on the contact
4. Type message: {message}
5. Press Enter to send
6. Confirm sent
"""
browser = BrowserSession(
headless=not args.debug, # headless=False only when debug=True
user_data_dir=str(USER_DATA_DIR),
storage_state=str(STORAGE_STATE_FILE) if STORAGE_STATE_FILE.exists() else None,
)
agent = Agent(task=task, llm=llm, browser_session=browser)
await agent.run()
print(f'✅ Sent to {contact}')
async def auto_respond_to_unread():
"""Click unread tab and respond to messages"""
print('\nAuto-responding to unread messages...')
llm = ChatGoogle(model='gemini-2.0-flash-exp', temperature=0.3, api_key=GOOGLE_API_KEY)
task = """
1. Go to https://web.whatsapp.com
2. Wait for page to load
3. Click on the "Unread" filter tab
4. If there are unread messages:
- Click on each unread chat
- Read the last message
- Generate and send a friendly, contextual response
- Move to next unread chat
5. Report how many messages were responded to
"""
browser = BrowserSession(
headless=not args.debug,
user_data_dir=str(USER_DATA_DIR),
storage_state=str(STORAGE_STATE_FILE) if STORAGE_STATE_FILE.exists() else None,
)
agent = Agent(task=task, llm=llm, browser_session=browser)
result = await agent.run()
print('✅ Auto-response complete')
return result
async def main():
if not GOOGLE_API_KEY:
print('❌ Set GOOGLE_API_KEY or GEMINI_API_KEY environment variable')
return
print('WhatsApp Scheduler')
print(f'Profile: {USER_DATA_DIR}')
print()
# Auto mode - respond to unread messages periodically
if args.auto:
print('AUTO MODE - Responding to unread messages every ~30 minutes')
print('Press Ctrl+C to stop.\n')
while True:
try:
await auto_respond_to_unread()
# Wait 30 minutes +/- 5 minutes randomly
wait_minutes = 30 + random.randint(-5, 5)
print(f'\n⏰ Next check in {wait_minutes} minutes...')
await asyncio.sleep(wait_minutes * 60)
except KeyboardInterrupt:
print('\n\nAuto mode stopped by user')
break
except Exception as e:
print(f'\n❌ Error in auto mode: {e}')
print('Waiting 5 minutes before retry...')
await asyncio.sleep(300)
return
# Parse messages
print('Parsing messages.txt...')
messages = await parse_messages()
if not messages:
print('No messages found')
return
print(f'\nFound {len(messages)} messages:')
for msg in messages:
print(f' • {msg["datetime"]}: {msg["message"][:30]}... to {msg["contact"]}')
now = datetime.now()
immediate = []
future = []
for msg in messages:
msg_time = datetime.strptime(msg['datetime'], '%Y-%m-%d %H:%M')
if msg_time <= now:
immediate.append(msg)
else:
future.append(msg)
if args.test:
print('\n=== TEST MODE - Preview ===')
if immediate:
print(f'\nWould send {len(immediate)} past-due messages NOW:')
for msg in immediate:
print(f' 📱 To {msg["contact"]}: {msg["message"]}')
if future:
print(f'\nWould monitor {len(future)} future messages:')
for msg in future:
print(f' ⏰ {msg["datetime"]}: To {msg["contact"]}: {msg["message"]}')
print('\nTest mode complete. No messages sent.')
return
if immediate:
print(f'\nSending {len(immediate)} past-due messages NOW...')
for msg in immediate:
await send_message(msg['contact'], msg['message'])
if future:
print(f'\n⏰ Monitoring {len(future)} future messages...')
print('Press Ctrl+C to stop.\n')
last_status = None
while future:
now = datetime.now()
due = []
remaining = []
for msg in future:
msg_time = datetime.strptime(msg['datetime'], '%Y-%m-%d %H:%M')
if msg_time <= now:
due.append(msg)
else:
remaining.append(msg)
for msg in due:
print(f'\n⏰ Time reached for {msg["contact"]}')
await send_message(msg['contact'], msg['message'])
future = remaining
if future:
next_msg = min(future, key=lambda x: datetime.strptime(x['datetime'], '%Y-%m-%d %H:%M'))
current_status = f'Next: {next_msg["datetime"]} to {next_msg["contact"]}'
if current_status != last_status:
print(current_status)
last_status = current_status
await asyncio.sleep(30) # Check every 30 seconds
print('\n✅ All messages processed!')
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/apps/msg-use/scheduler.py",
"license": "MIT License",
"lines": 223,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:examples/apps/news-use/news_monitor.py | #!/usr/bin/env python3
"""
News monitoring agent with browser-use + Gemini Flash.
Automatically extracts and analyzes the latest articles from any news website.
"""
import argparse
import asyncio
import hashlib
import json
import logging
import os
import time
from datetime import datetime
from typing import Literal
from dateutil import parser as dtparser
from pydantic import BaseModel
def setup_environment(debug: bool):
if not debug:
os.environ['BROWSER_USE_SETUP_LOGGING'] = 'false'
os.environ['BROWSER_USE_LOGGING_LEVEL'] = 'critical'
logging.getLogger().setLevel(logging.CRITICAL)
else:
os.environ['BROWSER_USE_SETUP_LOGGING'] = 'true'
os.environ['BROWSER_USE_LOGGING_LEVEL'] = 'info'
parser = argparse.ArgumentParser(description='News extractor using Browser-Use + Gemini')
parser.add_argument('--url', default='https://www.techcrunch.com', help='News site root URL')
parser.add_argument('--interval', type=int, default=300, help='Seconds between checks in monitor mode')
parser.add_argument('--once', action='store_true', help='Run a single extraction and exit')
parser.add_argument('--output', default='news_data.json', help='Path to JSON file where articles are stored')
parser.add_argument('--debug', action='store_true', help='Verbose console output and non-headless browser')
args = parser.parse_args()
setup_environment(args.debug)
from browser_use import Agent, BrowserSession, ChatGoogle
GEMINI_API_KEY = os.getenv('GOOGLE_API_KEY') or 'xxxx'
if GEMINI_API_KEY == 'xxxx':
print('⚠️ WARNING: Please set GOOGLE_API_KEY environment variable')
print(' You can get an API key at: https://makersuite.google.com/app/apikey')
print(" Then run: export GEMINI_API_KEY='your-api-key-here'")
print()
class NewsArticle(BaseModel):
title: str
url: str
posting_time: str
short_summary: str
long_summary: str
sentiment: Literal['positive', 'neutral', 'negative']
# ---------------------------------------------------------
# Core extractor
# ---------------------------------------------------------
async def extract_latest_article(site_url: str, debug: bool = False) -> dict:
"""Open site_url, navigate to the newest article and return structured JSON."""
prompt = (
f'Navigate to {site_url} and find the most recent headline article (usually at the top). '
f'Click on it to open the full article page. Once loaded, scroll & extract ALL required information: '
f'1. title: The article headline '
f'2. url: The full URL of the article page '
f'3. posting_time: The publication date/time as shown on the page '
f"4. short_summary: A 10-word overview of the article's content "
f'5. long_summary: A 100-word detailed summary of the article '
f"6. sentiment: Classify as 'positive', 'neutral', or 'negative' based on the article tone. "
f'When done, call the done action with success=True and put ALL extracted data in the text field '
f'as valid JSON in this exact format: '
f'{{"title": "...", "url": "...", "posting_time": "...", "short_summary": "...", "long_summary": "...", "sentiment": "positive|neutral|negative"}}'
)
llm = ChatGoogle(model='gemini-2.0-flash', temperature=0.1, api_key=GEMINI_API_KEY)
browser_session = BrowserSession(headless=not debug)
agent = Agent(task=prompt, llm=llm, browser_session=browser_session, use_vision=False)
if debug:
print(f'[DEBUG] Starting extraction from {site_url}')
start = time.time()
result = await agent.run(max_steps=25)
raw = result.final_result() if result else None
if debug:
print(f'[DEBUG] Raw result type: {type(raw)}')
print(f'[DEBUG] Raw result: {raw[:500] if isinstance(raw, str) else raw}')
print(f'[DEBUG] Extraction time: {time.time() - start:.2f}s')
if isinstance(raw, dict):
return {'status': 'success', 'data': raw}
text = str(raw).strip() if raw else ''
if '<json>' in text and '</json>' in text:
text = text.split('<json>', 1)[1].split('</json>', 1)[0].strip()
if text.lower().startswith('here is'):
brace = text.find('{')
if brace != -1:
text = text[brace:]
if text.startswith('```'):
text = text.lstrip('`\n ')
if text.lower().startswith('json'):
text = text[4:].lstrip()
def _escape_newlines(src: str) -> str:
out, in_str, esc = [], False, False
for ch in src:
if in_str:
if esc:
esc = False
elif ch == '\\':
esc = True
elif ch == '"':
in_str = False
elif ch == '\n':
out.append('\\n')
continue
elif ch == '\r':
continue
else:
if ch == '"':
in_str = True
out.append(ch)
return ''.join(out)
cleaned = _escape_newlines(text)
def _try_parse(txt: str):
try:
return json.loads(txt)
except Exception:
return None
data = _try_parse(cleaned)
# Fallback: grab first balanced JSON object
if data is None:
brace = 0
start = None
for i, ch in enumerate(text):
if ch == '{':
if brace == 0:
start = i
brace += 1
elif ch == '}':
brace -= 1
if brace == 0 and start is not None:
candidate = _escape_newlines(text[start : i + 1])
data = _try_parse(candidate)
if data is not None:
break
if isinstance(data, dict):
return {'status': 'success', 'data': data}
return {'status': 'error', 'error': f'JSON parse failed. Raw head: {text[:200]}'}
# ---------------------------------------------------------
# Persistence helpers
# ---------------------------------------------------------
def load_seen_hashes(file_path: str = 'news_data.json') -> set:
"""Load already-saved article URL hashes from disk for dedup across restarts."""
if not os.path.exists(file_path):
return set()
try:
with open(file_path) as f:
items = json.load(f)
return {entry['hash'] for entry in items if 'hash' in entry}
except Exception:
return set()
def save_article(article: dict, file_path: str = 'news_data.json'):
"""Append article to disk with a hash for future dedup."""
payload = {
'hash': hashlib.md5(article['url'].encode()).hexdigest(),
'pulled_at': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
'data': article,
}
existing = []
if os.path.exists(file_path):
try:
with open(file_path) as f:
existing = json.load(f)
except Exception:
existing = []
existing.append(payload)
# Keep last 100
existing = existing[-100:]
with open(file_path, 'w') as f:
json.dump(existing, f, ensure_ascii=False, indent=2)
# ---------------------------------------------------------
# CLI functions
# ---------------------------------------------------------
def _fmt(ts_raw: str) -> str:
"""Format timestamp string"""
try:
return dtparser.parse(ts_raw).strftime('%Y-%m-%d %H:%M:%S')
except Exception:
return datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
async def run_once(url: str, output_path: str, debug: bool):
"""Run a single extraction and exit"""
res = await extract_latest_article(url, debug)
if res['status'] == 'success':
art = res['data']
url_val = art.get('url', '')
hash_ = hashlib.md5(url_val.encode()).hexdigest() if url_val else None
if url_val:
save_article(art, output_path)
ts = _fmt(art.get('posting_time', ''))
sentiment = art.get('sentiment', 'neutral')
emoji = {'positive': '🟢', 'negative': '🔴', 'neutral': '🟡'}.get(sentiment, '🟡')
summary = art.get('short_summary', art.get('summary', art.get('title', '')))
if debug:
print(json.dumps(art, ensure_ascii=False, indent=2))
print()
print(f'[{ts}] - {emoji} - {summary}')
if not debug:
print() # Only add spacing in non-debug mode
return hash_
else:
print(f'Error: {res["error"]}')
return None
async def monitor(url: str, interval: int, output_path: str, debug: bool):
"""Continuous monitoring mode"""
seen = load_seen_hashes(output_path)
print(f'Monitoring {url} every {interval}s')
print()
while True:
try:
res = await extract_latest_article(url, debug)
if res['status'] == 'success':
art = res['data']
url_val = art.get('url', '')
hash_ = hashlib.md5(url_val.encode()).hexdigest() if url_val else None
if hash_ and hash_ not in seen:
seen.add(hash_)
ts = _fmt(art.get('posting_time', ''))
sentiment = art.get('sentiment', 'neutral')
emoji = {'positive': '🟢', 'negative': '🔴', 'neutral': '🟡'}.get(sentiment, '🟡')
summary = art.get('short_summary', art.get('title', ''))
save_article(art, output_path)
if debug:
print(json.dumps(art, ensure_ascii=False, indent=2))
print(f'[{ts}] - {emoji} - {summary}')
if not debug:
print() # Add spacing between articles in non-debug mode
elif debug:
print(f'Error: {res["error"]}')
except Exception as e:
if debug:
import traceback
traceback.print_exc()
else:
print(f'Unhandled error: {e}')
await asyncio.sleep(interval)
def main():
"""Main entry point"""
if args.once:
asyncio.run(run_once(args.url, args.output, args.debug))
else:
try:
asyncio.run(monitor(args.url, args.interval, args.output, args.debug))
except KeyboardInterrupt:
print('\nStopped by user')
if __name__ == '__main__':
main()
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/apps/news-use/news_monitor.py",
"license": "MIT License",
"lines": 243,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:examples/browser/cloud_browser.py | """
Examples of using Browser-Use cloud browser service.
Prerequisites:
1. Set BROWSER_USE_API_KEY environment variable
2. Active subscription at https://cloud.browser-use.com
"""
import asyncio
from dotenv import load_dotenv
from browser_use import Agent, Browser, ChatBrowserUse
load_dotenv()
async def basic():
"""Simplest usage - just pass cloud params directly."""
browser = Browser(use_cloud=True)
agent = Agent(
task='Go to github.com/browser-use/browser-use and tell me the star count',
llm=ChatBrowserUse(model='bu-2-0'),
browser=browser,
)
result = await agent.run()
print(f'Result: {result}')
async def full_config():
"""Full cloud configuration with specific profile."""
browser = Browser(
# cloud_profile_id='21182245-590f-4712-8888-9611651a024c',
cloud_proxy_country_code='jp',
cloud_timeout=60,
)
agent = Agent(
task='go and check my ip address and the location',
llm=ChatBrowserUse(model='bu-2-0'),
browser=browser,
)
result = await agent.run()
print(f'Result: {result}')
async def main():
try:
# await basic()
await full_config()
except Exception as e:
print(f'Error: {e}')
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/browser/cloud_browser.py",
"license": "MIT License",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:examples/custom-functions/actor_use.py | import asyncio
import os
import sys
from browser_use.browser.session import BrowserSession
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import ActionResult, Agent, ChatOpenAI, Tools
tools = Tools()
llm = ChatOpenAI(model='gpt-4.1-mini')
@tools.registry.action('Click on submit button')
async def click_submit_button(browser_session: BrowserSession):
page = await browser_session.must_get_current_page()
submit_button = await page.must_get_element_by_prompt('submit button', llm)
await submit_button.click()
return ActionResult(is_done=True, extracted_content='Submit button clicked!')
async def main():
task = 'go to brower-use.com and then click on the submit button'
agent = Agent(task=task, llm=llm, tools=tools)
await agent.run()
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/custom-functions/actor_use.py",
"license": "MIT License",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:examples/features/rerun_history.py | """
Example: Rerunning saved agent history with variable detection and substitution
This example shows how to:
1. Run an agent and save its history (including initial URL navigation)
2. Detect variables in the saved history (emails, names, dates, etc.)
3. Rerun the history with substituted values (different data)
4. Get AI-generated summary of rerun completion (with screenshot analysis)
Useful for:
- Debugging agent behavior
- Testing changes with consistent scenarios
- Replaying successful workflows with different data
- Understanding what values can be substituted in reruns
- Getting automated verification of rerun success
Note: Initial actions (like opening URLs from tasks) are now automatically
saved to history and will be replayed during rerun, so you don't need to
worry about manually specifying URLs when rerunning.
AI Features During Rerun:
1. AI Step for Extract Actions:
When an 'extract' action is replayed, the rerun automatically uses AI to
re-analyze the current page content (since it may have changed with new data).
This ensures the extracted content reflects the current state, not cached results.
2. AI Summary:
At the end of the rerun, an AI summary analyzes the final screenshot and
execution statistics to determine success/failure.
Custom LLM Usage:
# Option 1: Use agent's LLM (default)
results = await agent.load_and_rerun(history_file)
# Option 2: Use custom LLMs for AI steps and summary
from browser_use.llm import ChatOpenAI
custom_llm = ChatOpenAI(model='gpt-4.1-mini')
results = await agent.load_and_rerun(
history_file,
ai_step_llm=custom_llm, # For extract action re-evaluation
summary_llm=custom_llm, # For final summary
)
The AI summary will be the last item in results and will have:
- extracted_content: The summary text
- success: Whether rerun was successful
- is_done: Always True for summary
"""
import asyncio
from pathlib import Path
from browser_use import Agent
from browser_use.llm import ChatBrowserUse
async def main():
# Example task to demonstrate history saving and rerunning
history_file = Path('agent_history.json')
task = 'Go to https://browser-use.github.io/stress-tests/challenges/reference-number-form.html and fill the form with example data and submit and extract the refernence number.'
llm = ChatBrowserUse(model='bu-2-0')
# Optional: Use custom LLMs for AI features during rerun
# Uncomment to use a custom LLM:
# from browser_use.llm import ChatOpenAI
# custom_llm = ChatOpenAI(model='gpt-4.1-mini')
# ai_step_llm = custom_llm # For re-evaluating extract actions
# summary_llm = custom_llm # For final summary
ai_step_llm = None # Set to None to use agent's LLM (default)
summary_llm = None # Set to None to use agent's LLM (default)
# Step 1: Run the agent and save history
print('=== Running Agent ===')
agent = Agent(task=task, llm=llm, max_actions_per_step=1)
await agent.run(max_steps=10)
agent.save_history(history_file)
print(f'✓ History saved to {history_file}')
# Step 2: Detect variables in the saved history
print('\n=== Detecting Variables ===')
variables = agent.detect_variables()
if variables:
print(f'Found {len(variables)} variable(s):')
for var_name, var_info in variables.items():
format_info = f' (format: {var_info.format})' if var_info.format else ''
print(f' • {var_name}: "{var_info.original_value}"{format_info}')
else:
print('No variables detected in history')
# Step 3: Rerun the history with substituted values
if variables:
print('\n=== Rerunning History (Substituted Values) ===')
# Create new values for the detected variables
new_values = {}
for var_name, var_info in variables.items():
# Map detected variables to new values
if var_name == 'email':
new_values[var_name] = 'jane.smith@example.com'
elif var_name == 'full_name':
new_values[var_name] = 'Jane Smith'
elif var_name.startswith('full_name_'):
new_values[var_name] = 'General Information'
elif var_name == 'first_name':
new_values[var_name] = 'Jane'
elif var_name == 'date':
new_values[var_name] = '1995-05-15'
elif var_name == 'country':
new_values[var_name] = 'Canada'
# You can add more variable substitutions as needed
if new_values:
print(f'Substituting {len(new_values)} variable(s):')
for var_name, new_value in new_values.items():
old_value = variables[var_name].original_value
print(f' • {var_name}: "{old_value}" → "{new_value}"')
# Rerun with substituted values and optional custom LLMs
substitute_agent = Agent(task='', llm=llm)
results = await substitute_agent.load_and_rerun(
history_file,
variables=new_values,
ai_step_llm=ai_step_llm, # For extract action re-evaluation
summary_llm=summary_llm, # For final summary
max_step_interval=20,
delay_between_actions=1,
)
# Display AI-generated summary (last result)
if results and results[-1].is_done:
summary = results[-1]
print('\n📊 AI Summary:')
print(f' Summary: {summary.extracted_content}')
print(f' Success: {summary.success}')
print('✓ History rerun with substituted values complete')
else:
print('\n⚠️ No variables detected, skipping substitution rerun')
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/features/rerun_history.py",
"license": "MIT License",
"lines": 119,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/dom/serializer/paint_order.py | from collections import defaultdict
from dataclasses import dataclass
from browser_use.dom.views import SimplifiedNode
"""
Helper class for maintaining a union of rectangles (used for order of elements calculation)
"""
@dataclass(frozen=True, slots=True)
class Rect:
"""Closed axis-aligned rectangle with (x1,y1) bottom-left, (x2,y2) top-right."""
x1: float
y1: float
x2: float
y2: float
def __post_init__(self):
if not (self.x1 <= self.x2 and self.y1 <= self.y2):
return False
# --- fast relations ----------------------------------------------------
def area(self) -> float:
return (self.x2 - self.x1) * (self.y2 - self.y1)
def intersects(self, other: 'Rect') -> bool:
return not (self.x2 <= other.x1 or other.x2 <= self.x1 or self.y2 <= other.y1 or other.y2 <= self.y1)
def contains(self, other: 'Rect') -> bool:
return self.x1 <= other.x1 and self.y1 <= other.y1 and self.x2 >= other.x2 and self.y2 >= other.y2
class RectUnionPure:
"""
Maintains a *disjoint* set of rectangles.
No external dependencies - fine for a few thousand rectangles.
"""
__slots__ = ('_rects',)
def __init__(self):
self._rects: list[Rect] = []
# -----------------------------------------------------------------
def _split_diff(self, a: Rect, b: Rect) -> list[Rect]:
r"""
Return list of up to 4 rectangles = a \ b.
Assumes a intersects b.
"""
parts = []
# Bottom slice
if a.y1 < b.y1:
parts.append(Rect(a.x1, a.y1, a.x2, b.y1))
# Top slice
if b.y2 < a.y2:
parts.append(Rect(a.x1, b.y2, a.x2, a.y2))
# Middle (vertical) strip: y overlap is [max(a.y1,b.y1), min(a.y2,b.y2)]
y_lo = max(a.y1, b.y1)
y_hi = min(a.y2, b.y2)
# Left slice
if a.x1 < b.x1:
parts.append(Rect(a.x1, y_lo, b.x1, y_hi))
# Right slice
if b.x2 < a.x2:
parts.append(Rect(b.x2, y_lo, a.x2, y_hi))
return parts
# -----------------------------------------------------------------
def contains(self, r: Rect) -> bool:
"""
True iff r is fully covered by the current union.
"""
if not self._rects:
return False
stack = [r]
for s in self._rects:
new_stack = []
for piece in stack:
if s.contains(piece):
# piece completely gone
continue
if piece.intersects(s):
new_stack.extend(self._split_diff(piece, s))
else:
new_stack.append(piece)
if not new_stack: # everything eaten – covered
return True
stack = new_stack
return False # something survived
# -----------------------------------------------------------------
def add(self, r: Rect) -> bool:
"""
Insert r unless it is already covered.
Returns True if the union grew.
"""
if self.contains(r):
return False
pending = [r]
i = 0
while i < len(self._rects):
s = self._rects[i]
new_pending = []
changed = False
for piece in pending:
if piece.intersects(s):
new_pending.extend(self._split_diff(piece, s))
changed = True
else:
new_pending.append(piece)
pending = new_pending
if changed:
# s unchanged; proceed with next existing rectangle
i += 1
else:
i += 1
# Any left‑over pieces are new, non‑overlapping areas
self._rects.extend(pending)
return True
class PaintOrderRemover:
"""
Calculates which elements should be removed based on the paint order parameter.
"""
def __init__(self, root: SimplifiedNode):
self.root = root
def calculate_paint_order(self) -> None:
all_simplified_nodes_with_paint_order: list[SimplifiedNode] = []
def collect_paint_order(node: SimplifiedNode) -> None:
if (
node.original_node.snapshot_node
and node.original_node.snapshot_node.paint_order is not None
and node.original_node.snapshot_node.bounds is not None
):
all_simplified_nodes_with_paint_order.append(node)
for child in node.children:
collect_paint_order(child)
collect_paint_order(self.root)
grouped_by_paint_order: defaultdict[int, list[SimplifiedNode]] = defaultdict(list)
for node in all_simplified_nodes_with_paint_order:
if node.original_node.snapshot_node and node.original_node.snapshot_node.paint_order is not None:
grouped_by_paint_order[node.original_node.snapshot_node.paint_order].append(node)
rect_union = RectUnionPure()
for paint_order, nodes in sorted(grouped_by_paint_order.items(), key=lambda x: -x[0]):
rects_to_add = []
for node in nodes:
if not node.original_node.snapshot_node or not node.original_node.snapshot_node.bounds:
continue # shouldn't happen by how we filter them out in the first place
rect = Rect(
x1=node.original_node.snapshot_node.bounds.x,
y1=node.original_node.snapshot_node.bounds.y,
x2=node.original_node.snapshot_node.bounds.x + node.original_node.snapshot_node.bounds.width,
y2=node.original_node.snapshot_node.bounds.y + node.original_node.snapshot_node.bounds.height,
)
if rect_union.contains(rect):
node.ignored_by_paint_order = True
# don't add to the nodes if opacity is less then 0.95 or background-color is transparent
if (
node.original_node.snapshot_node.computed_styles
and node.original_node.snapshot_node.computed_styles.get('background-color', 'rgba(0, 0, 0, 0)')
== 'rgba(0, 0, 0, 0)'
) or (
node.original_node.snapshot_node.computed_styles
and float(node.original_node.snapshot_node.computed_styles.get('opacity', '1'))
< 0.8 # this is highly vibes based number
):
continue
rects_to_add.append(rect)
for rect in rects_to_add:
rect_union.add(rect)
return None
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/dom/serializer/paint_order.py",
"license": "MIT License",
"lines": 156,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:examples/features/follow_up_task.py | from dotenv import load_dotenv
from browser_use import Agent, Browser
load_dotenv()
import asyncio
async def main():
browser = Browser(keep_alive=True)
await browser.start()
agent = Agent(task='search for browser-use.', browser_session=browser)
await agent.run(max_steps=2)
agent.add_new_task('return the title of first result')
await agent.run()
await browser.kill()
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/features/follow_up_task.py",
"license": "MIT License",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:browser_use/agent/cloud_events.py | import base64
import os
from datetime import datetime, timezone
from pathlib import Path
import anyio
from bubus import BaseEvent
from pydantic import Field, field_validator
from uuid_extensions import uuid7str
MAX_STRING_LENGTH = 500000 # 100K chars ~ 25k tokens should be enough
MAX_URL_LENGTH = 100000
MAX_TASK_LENGTH = 100000
MAX_COMMENT_LENGTH = 2000
MAX_FILE_CONTENT_SIZE = 50 * 1024 * 1024 # 50MB
class UpdateAgentTaskEvent(BaseEvent):
# Required fields for identification
id: str # The task ID to update
user_id: str = Field(max_length=255) # For authorization
device_id: str | None = Field(None, max_length=255) # Device ID for auth lookup
# Optional fields that can be updated
stopped: bool | None = None
paused: bool | None = None
done_output: str | None = Field(None, max_length=MAX_STRING_LENGTH)
finished_at: datetime | None = None
agent_state: dict | None = None
user_feedback_type: str | None = Field(None, max_length=10) # UserFeedbackType enum value as string
user_comment: str | None = Field(None, max_length=MAX_COMMENT_LENGTH)
gif_url: str | None = Field(None, max_length=MAX_URL_LENGTH)
@classmethod
def from_agent(cls, agent) -> 'UpdateAgentTaskEvent':
"""Create an UpdateAgentTaskEvent from an Agent instance"""
if not hasattr(agent, '_task_start_time'):
raise ValueError('Agent must have _task_start_time attribute')
done_output = agent.history.final_result() if agent.history else None
if done_output and len(done_output) > MAX_STRING_LENGTH:
done_output = done_output[:MAX_STRING_LENGTH]
return cls(
id=str(agent.task_id),
user_id='', # To be filled by cloud handler
device_id=agent.cloud_sync.auth_client.device_id
if hasattr(agent, 'cloud_sync') and agent.cloud_sync and agent.cloud_sync.auth_client
else None,
stopped=agent.state.stopped if hasattr(agent.state, 'stopped') else False,
paused=agent.state.paused if hasattr(agent.state, 'paused') else False,
done_output=done_output,
finished_at=datetime.now(timezone.utc) if agent.history and agent.history.is_done() else None,
agent_state=agent.state.model_dump() if hasattr(agent.state, 'model_dump') else {},
user_feedback_type=None,
user_comment=None,
gif_url=None,
# user_feedback_type and user_comment would be set by the API/frontend
# gif_url would be set after GIF generation if needed
)
class CreateAgentOutputFileEvent(BaseEvent):
# Model fields
id: str = Field(default_factory=uuid7str)
user_id: str = Field(max_length=255)
device_id: str | None = Field(None, max_length=255) # Device ID for auth lookup
task_id: str
file_name: str = Field(max_length=255)
file_content: str | None = None # Base64 encoded file content
content_type: str | None = Field(None, max_length=100) # MIME type for file uploads
created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
@field_validator('file_content')
@classmethod
def validate_file_size(cls, v: str | None) -> str | None:
"""Validate base64 file content size."""
if v is None:
return v
# Remove data URL prefix if present
if ',' in v:
v = v.split(',')[1]
# Estimate decoded size (base64 is ~33% larger)
estimated_size = len(v) * 3 / 4
if estimated_size > MAX_FILE_CONTENT_SIZE:
raise ValueError(f'File content exceeds maximum size of {MAX_FILE_CONTENT_SIZE / 1024 / 1024}MB')
return v
@classmethod
async def from_agent_and_file(cls, agent, output_path: str) -> 'CreateAgentOutputFileEvent':
"""Create a CreateAgentOutputFileEvent from a file path"""
gif_path = Path(output_path)
if not gif_path.exists():
raise FileNotFoundError(f'File not found: {output_path}')
gif_size = os.path.getsize(gif_path)
# Read GIF content for base64 encoding if needed
gif_content = None
if gif_size < 50 * 1024 * 1024: # Only read if < 50MB
async with await anyio.open_file(gif_path, 'rb') as f:
gif_bytes = await f.read()
gif_content = base64.b64encode(gif_bytes).decode('utf-8')
return cls(
user_id='', # To be filled by cloud handler
device_id=agent.cloud_sync.auth_client.device_id
if hasattr(agent, 'cloud_sync') and agent.cloud_sync and agent.cloud_sync.auth_client
else None,
task_id=str(agent.task_id),
file_name=gif_path.name,
file_content=gif_content, # Base64 encoded
content_type='image/gif',
)
class CreateAgentStepEvent(BaseEvent):
# Model fields
id: str = Field(default_factory=uuid7str)
user_id: str = Field(max_length=255) # Added for authorization checks
device_id: str | None = Field(None, max_length=255) # Device ID for auth lookup
created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
agent_task_id: str
step: int
evaluation_previous_goal: str = Field(max_length=MAX_STRING_LENGTH)
memory: str = Field(max_length=MAX_STRING_LENGTH)
next_goal: str = Field(max_length=MAX_STRING_LENGTH)
actions: list[dict]
screenshot_url: str | None = Field(None, max_length=MAX_FILE_CONTENT_SIZE) # ~50MB for base64 images
url: str = Field(default='', max_length=MAX_URL_LENGTH)
@field_validator('screenshot_url')
@classmethod
def validate_screenshot_size(cls, v: str | None) -> str | None:
"""Validate screenshot URL or base64 content size."""
if v is None or not v.startswith('data:'):
return v
# It's base64 data, check size
if ',' in v:
base64_part = v.split(',')[1]
estimated_size = len(base64_part) * 3 / 4
if estimated_size > MAX_FILE_CONTENT_SIZE:
raise ValueError(f'Screenshot content exceeds maximum size of {MAX_FILE_CONTENT_SIZE / 1024 / 1024}MB')
return v
@classmethod
def from_agent_step(
cls, agent, model_output, result: list, actions_data: list[dict], browser_state_summary
) -> 'CreateAgentStepEvent':
"""Create a CreateAgentStepEvent from agent step data"""
# Get first action details if available
first_action = model_output.action[0] if model_output.action else None
# Extract current state from model output
current_state = model_output.current_state if hasattr(model_output, 'current_state') else None
# Capture screenshot as base64 data URL if available
screenshot_url = None
if browser_state_summary.screenshot:
screenshot_url = f'data:image/png;base64,{browser_state_summary.screenshot}'
import logging
logger = logging.getLogger(__name__)
logger.debug(f'📸 Including screenshot in CreateAgentStepEvent, length: {len(browser_state_summary.screenshot)}')
else:
import logging
logger = logging.getLogger(__name__)
logger.debug('📸 No screenshot in browser_state_summary for CreateAgentStepEvent')
return cls(
user_id='', # To be filled by cloud handler
device_id=agent.cloud_sync.auth_client.device_id
if hasattr(agent, 'cloud_sync') and agent.cloud_sync and agent.cloud_sync.auth_client
else None,
agent_task_id=str(agent.task_id),
step=agent.state.n_steps,
evaluation_previous_goal=current_state.evaluation_previous_goal if current_state else '',
memory=current_state.memory if current_state else '',
next_goal=current_state.next_goal if current_state else '',
actions=actions_data, # List of action dicts
url=browser_state_summary.url,
screenshot_url=screenshot_url,
)
class CreateAgentTaskEvent(BaseEvent):
# Model fields
id: str = Field(default_factory=uuid7str)
user_id: str = Field(max_length=255) # Added for authorization checks
device_id: str | None = Field(None, max_length=255) # Device ID for auth lookup
agent_session_id: str
llm_model: str = Field(max_length=200) # LLMModel enum value as string
stopped: bool = False
paused: bool = False
task: str = Field(max_length=MAX_TASK_LENGTH)
done_output: str | None = Field(None, max_length=MAX_STRING_LENGTH)
scheduled_task_id: str | None = None
started_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
finished_at: datetime | None = None
agent_state: dict = Field(default_factory=dict)
user_feedback_type: str | None = Field(None, max_length=10) # UserFeedbackType enum value as string
user_comment: str | None = Field(None, max_length=MAX_COMMENT_LENGTH)
gif_url: str | None = Field(None, max_length=MAX_URL_LENGTH)
@classmethod
def from_agent(cls, agent) -> 'CreateAgentTaskEvent':
"""Create a CreateAgentTaskEvent from an Agent instance"""
return cls(
id=str(agent.task_id),
user_id='', # To be filled by cloud handler
device_id=agent.cloud_sync.auth_client.device_id
if hasattr(agent, 'cloud_sync') and agent.cloud_sync and agent.cloud_sync.auth_client
else None,
agent_session_id=str(agent.session_id),
task=agent.task,
llm_model=agent.llm.model_name,
agent_state=agent.state.model_dump() if hasattr(agent.state, 'model_dump') else {},
stopped=False,
paused=False,
done_output=None,
started_at=datetime.fromtimestamp(agent._task_start_time, tz=timezone.utc),
finished_at=None,
user_feedback_type=None,
user_comment=None,
gif_url=None,
)
class CreateAgentSessionEvent(BaseEvent):
# Model fields
id: str = Field(default_factory=uuid7str)
user_id: str = Field(max_length=255)
device_id: str | None = Field(None, max_length=255) # Device ID for auth lookup
browser_session_id: str = Field(max_length=255)
browser_session_live_url: str = Field(max_length=MAX_URL_LENGTH)
browser_session_cdp_url: str = Field(max_length=MAX_URL_LENGTH)
browser_session_stopped: bool = False
browser_session_stopped_at: datetime | None = None
is_source_api: bool | None = None
browser_state: dict = Field(default_factory=dict)
browser_session_data: dict | None = None
@classmethod
def from_agent(cls, agent) -> 'CreateAgentSessionEvent':
"""Create a CreateAgentSessionEvent from an Agent instance"""
return cls(
id=str(agent.session_id),
user_id='', # To be filled by cloud handler
device_id=agent.cloud_sync.auth_client.device_id
if hasattr(agent, 'cloud_sync') and agent.cloud_sync and agent.cloud_sync.auth_client
else None,
browser_session_id=agent.browser_session.id,
browser_session_live_url='', # To be filled by cloud handler
browser_session_cdp_url='', # To be filled by cloud handler
browser_state={
'viewport': agent.browser_profile.viewport if agent.browser_profile else {'width': 1280, 'height': 720},
'user_agent': agent.browser_profile.user_agent if agent.browser_profile else None,
'headless': agent.browser_profile.headless if agent.browser_profile else True,
'initial_url': None, # Will be updated during execution
'final_url': None, # Will be updated during execution
'total_pages_visited': 0, # Will be updated during execution
'session_duration_seconds': 0, # Will be updated during execution
},
browser_session_data={
'cookies': [],
'secrets': {},
# TODO: send secrets safely so tasks can be replayed on cloud seamlessly
# 'secrets': dict(agent.sensitive_data) if agent.sensitive_data else {},
'allowed_domains': agent.browser_profile.allowed_domains if agent.browser_profile else [],
},
)
class UpdateAgentSessionEvent(BaseEvent):
"""Event to update an existing agent session"""
# Model fields
id: str # Session ID to update
user_id: str = Field(max_length=255)
device_id: str | None = Field(None, max_length=255)
browser_session_stopped: bool | None = None
browser_session_stopped_at: datetime | None = None
end_reason: str | None = Field(None, max_length=100) # Why the session ended
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/agent/cloud_events.py",
"license": "MIT License",
"lines": 251,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/browser/events.py | """Event definitions for browser communication."""
import inspect
import os
from typing import Any, Literal
from bubus import BaseEvent
from bubus.models import T_EventResultType
from cdp_use.cdp.target import TargetID
from pydantic import BaseModel, Field, field_validator
from browser_use.browser.views import BrowserStateSummary
from browser_use.dom.views import EnhancedDOMTreeNode
def _get_timeout(env_var: str, default: float) -> float | None:
"""
Safely parse environment variable timeout values with robust error handling.
Args:
env_var: Environment variable name (e.g. 'TIMEOUT_NavigateToUrlEvent')
default: Default timeout value as float (e.g. 15.0)
Returns:
Parsed float value or the default if parsing fails
Raises:
ValueError: Only if both env_var and default are invalid (should not happen with valid defaults)
"""
# Try environment variable first
env_value = os.getenv(env_var)
if env_value:
try:
parsed = float(env_value)
if parsed < 0:
print(f'Warning: {env_var}={env_value} is negative, using default {default}')
return default
return parsed
except (ValueError, TypeError):
print(f'Warning: {env_var}={env_value} is not a valid number, using default {default}')
# Fall back to default
return default
# ============================================================================
# Agent/Tools -> BrowserSession Events (High-level browser actions)
# ============================================================================
class ElementSelectedEvent(BaseEvent[T_EventResultType]):
"""An element was selected."""
node: EnhancedDOMTreeNode
@field_validator('node', mode='before')
@classmethod
def serialize_node(cls, data: EnhancedDOMTreeNode | None) -> EnhancedDOMTreeNode | None:
if data is None:
return None
return EnhancedDOMTreeNode(
node_id=data.node_id,
backend_node_id=data.backend_node_id,
session_id=data.session_id,
frame_id=data.frame_id,
target_id=data.target_id,
node_type=data.node_type,
node_name=data.node_name,
node_value=data.node_value,
attributes=data.attributes,
is_scrollable=data.is_scrollable,
is_visible=data.is_visible,
absolute_position=data.absolute_position,
# override the circular reference fields in EnhancedDOMTreeNode as they cant be serialized and aren't needed by event handlers
# only used internally by the DOM service during DOM tree building process, not intended public API use
content_document=None,
shadow_root_type=None,
shadow_roots=[],
parent_node=None,
children_nodes=[],
ax_node=None,
snapshot_node=None,
)
# TODO: add page handle to events
# class PageHandle(share a base with browser.session.CDPSession?):
# url: str
# target_id: TargetID
# @classmethod
# def from_target_id(cls, target_id: TargetID) -> Self:
# return cls(target_id=target_id)
# @classmethod
# def from_target_id(cls, target_id: TargetID) -> Self:
# return cls(target_id=target_id)
# @classmethod
# def from_url(cls, url: str) -> Self:
# @property
# def root_frame_id(self) -> str:
# return self.target_id
# @property
# def session_id(self) -> str:
# return browser_session.get_or_create_cdp_session(self.target_id).session_id
# class PageSelectedEvent(BaseEvent[T_EventResultType]):
# """An event like SwitchToTabEvent(page=PageHandle) or CloseTabEvent(page=PageHandle)"""
# page: PageHandle
class NavigateToUrlEvent(BaseEvent[None]):
"""Navigate to a specific URL."""
url: str
wait_until: Literal['load', 'domcontentloaded', 'networkidle', 'commit'] = 'load'
timeout_ms: int | None = None
new_tab: bool = Field(
default=False, description='Set True to leave the current tab alone and open a new tab in the foreground for the new URL'
)
# existing_tab: PageHandle | None = None # TODO
# time limits enforced by bubus, not exposed to LLM:
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_NavigateToUrlEvent', 30.0)) # seconds
class ClickElementEvent(ElementSelectedEvent[dict[str, Any] | None]):
"""Click an element."""
node: 'EnhancedDOMTreeNode'
button: Literal['left', 'right', 'middle'] = 'left'
# click_count: int = 1 # TODO
# expect_download: bool = False # moved to downloads_watchdog.py
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_ClickElementEvent', 15.0)) # seconds
class ClickCoordinateEvent(BaseEvent[dict]):
"""Click at specific coordinates."""
coordinate_x: int
coordinate_y: int
button: Literal['left', 'right', 'middle'] = 'left'
force: bool = False # If True, skip safety checks (file input, print, select)
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_ClickCoordinateEvent', 15.0)) # seconds
class TypeTextEvent(ElementSelectedEvent[dict | None]):
"""Type text into an element."""
node: 'EnhancedDOMTreeNode'
text: str
clear: bool = True
is_sensitive: bool = False # Flag to indicate if text contains sensitive data
sensitive_key_name: str | None = None # Name of the sensitive key being typed (e.g., 'username', 'password')
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_TypeTextEvent', 60.0)) # seconds
class ScrollEvent(ElementSelectedEvent[None]):
"""Scroll the page or element."""
direction: Literal['up', 'down', 'left', 'right']
amount: int # pixels
node: 'EnhancedDOMTreeNode | None' = None # None means scroll page
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_ScrollEvent', 8.0)) # seconds
class SwitchTabEvent(BaseEvent[TargetID]):
"""Switch to a different tab."""
target_id: TargetID | None = Field(default=None, description='None means switch to the most recently opened tab')
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_SwitchTabEvent', 10.0)) # seconds
class CloseTabEvent(BaseEvent[None]):
"""Close a tab."""
target_id: TargetID
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_CloseTabEvent', 10.0)) # seconds
class ScreenshotEvent(BaseEvent[str]):
"""Request to take a screenshot."""
full_page: bool = False
clip: dict[str, float] | None = None # {x, y, width, height}
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_ScreenshotEvent', 15.0)) # seconds
class BrowserStateRequestEvent(BaseEvent[BrowserStateSummary]):
"""Request current browser state."""
include_dom: bool = True
include_screenshot: bool = True
include_recent_events: bool = False
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_BrowserStateRequestEvent', 30.0)) # seconds
# class WaitForConditionEvent(BaseEvent):
# """Wait for a condition."""
# condition: Literal['navigation', 'selector', 'timeout', 'load_state']
# timeout: float = 30000
# selector: str | None = None
# state: Literal['attached', 'detached', 'visible', 'hidden'] | None = None
class GoBackEvent(BaseEvent[None]):
"""Navigate back in browser history."""
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_GoBackEvent', 15.0)) # seconds
class GoForwardEvent(BaseEvent[None]):
"""Navigate forward in browser history."""
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_GoForwardEvent', 15.0)) # seconds
class RefreshEvent(BaseEvent[None]):
"""Refresh/reload the current page."""
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_RefreshEvent', 15.0)) # seconds
class WaitEvent(BaseEvent[None]):
"""Wait for a specified number of seconds."""
seconds: float = 3.0
max_seconds: float = 10.0 # Safety cap
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_WaitEvent', 60.0)) # seconds
class SendKeysEvent(BaseEvent[None]):
"""Send keyboard keys/shortcuts."""
keys: str # e.g., "ctrl+a", "cmd+c", "Enter"
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_SendKeysEvent', 60.0)) # seconds
class UploadFileEvent(ElementSelectedEvent[None]):
"""Upload a file to an element."""
node: 'EnhancedDOMTreeNode'
file_path: str
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_UploadFileEvent', 30.0)) # seconds
class GetDropdownOptionsEvent(ElementSelectedEvent[dict[str, str]]):
"""Get all options from any dropdown (native <select>, ARIA menus, or custom dropdowns).
Returns a dict containing dropdown type, options list, and element metadata."""
node: 'EnhancedDOMTreeNode'
event_timeout: float | None = Field(
default_factory=lambda: _get_timeout('TIMEOUT_GetDropdownOptionsEvent', 15.0)
) # some dropdowns lazy-load the list of options on first interaction, so we need to wait for them to load (e.g. table filter lists can have thousands of options)
class SelectDropdownOptionEvent(ElementSelectedEvent[dict[str, str]]):
"""Select a dropdown option by exact text from any dropdown type.
Returns a dict containing success status and selection details."""
node: 'EnhancedDOMTreeNode'
text: str # The option text to select
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_SelectDropdownOptionEvent', 8.0)) # seconds
class ScrollToTextEvent(BaseEvent[None]):
"""Scroll to specific text on the page. Raises exception if text not found."""
text: str
direction: Literal['up', 'down'] = 'down'
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_ScrollToTextEvent', 15.0)) # seconds
# ============================================================================
class BrowserStartEvent(BaseEvent):
"""Start/connect to browser."""
cdp_url: str | None = None
launch_options: dict[str, Any] = Field(default_factory=dict)
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_BrowserStartEvent', 30.0)) # seconds
class BrowserStopEvent(BaseEvent):
"""Stop/disconnect from browser."""
force: bool = False
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_BrowserStopEvent', 45.0)) # seconds
class BrowserLaunchResult(BaseModel):
"""Result of launching a browser."""
# TODO: add browser executable_path, pid, version, latency, user_data_dir, X11 $DISPLAY, host IP address, etc.
cdp_url: str
class BrowserLaunchEvent(BaseEvent[BrowserLaunchResult]):
"""Launch a local browser process."""
# TODO: add executable_path, proxy settings, preferences, extra launch args, etc.
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_BrowserLaunchEvent', 30.0)) # seconds
class BrowserKillEvent(BaseEvent):
"""Kill local browser subprocess."""
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_BrowserKillEvent', 30.0)) # seconds
# TODO: replace all Runtime.evaluate() calls with this event
# class ExecuteJavaScriptEvent(BaseEvent):
# """Execute JavaScript in page context."""
# target_id: TargetID
# expression: str
# await_promise: bool = True
# event_timeout: float | None = 60.0 # seconds
# TODO: add this and use the old BrowserProfile.viewport options to set it
# class SetViewportEvent(BaseEvent):
# """Set the viewport size."""
# width: int
# height: int
# device_scale_factor: float = 1.0
# event_timeout: float | None = 15.0 # seconds
# Moved to storage state
# class SetCookiesEvent(BaseEvent):
# """Set browser cookies."""
# cookies: list[dict[str, Any]]
# event_timeout: float | None = (
# 30.0 # only long to support the edge case of restoring a big localStorage / on many origins (has to O(n) visit each origin to restore)
# )
# class GetCookiesEvent(BaseEvent):
# """Get browser cookies."""
# urls: list[str] | None = None
# event_timeout: float | None = 30.0 # seconds
# ============================================================================
# DOM-related Events
# ============================================================================
class BrowserConnectedEvent(BaseEvent):
"""Browser has started/connected."""
cdp_url: str
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_BrowserConnectedEvent', 30.0)) # seconds
class BrowserStoppedEvent(BaseEvent):
"""Browser has stopped/disconnected."""
reason: str | None = None
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_BrowserStoppedEvent', 30.0)) # seconds
class TabCreatedEvent(BaseEvent):
"""A new tab was created."""
target_id: TargetID
url: str
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_TabCreatedEvent', 30.0)) # seconds
class TabClosedEvent(BaseEvent):
"""A tab was closed."""
target_id: TargetID
# TODO:
# new_focus_target_id: int | None = None
# new_focus_url: str | None = None
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_TabClosedEvent', 3.0)) # seconds
# TODO: emit this when DOM changes significantly, inner frame navigates, form submits, history.pushState(), etc.
# class TabUpdatedEvent(BaseEvent):
# """Tab information updated (URL changed, etc.)."""
# target_id: TargetID
# url: str
class AgentFocusChangedEvent(BaseEvent):
"""Agent focus changed to a different tab."""
target_id: TargetID
url: str
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_AgentFocusChangedEvent', 10.0)) # seconds
class TargetCrashedEvent(BaseEvent):
"""A target has crashed."""
target_id: TargetID
error: str
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_TargetCrashedEvent', 10.0)) # seconds
class NavigationStartedEvent(BaseEvent):
"""Navigation started."""
target_id: TargetID
url: str
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_NavigationStartedEvent', 30.0)) # seconds
class NavigationCompleteEvent(BaseEvent):
"""Navigation completed."""
target_id: TargetID
url: str
status: int | None = None
error_message: str | None = None # Error/timeout message if navigation had issues
loading_status: str | None = None # Detailed loading status (e.g., network timeout info)
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_NavigationCompleteEvent', 30.0)) # seconds
# ============================================================================
# Error Events
# ============================================================================
class BrowserErrorEvent(BaseEvent):
"""An error occurred in the browser layer."""
error_type: str
message: str
details: dict[str, Any] = Field(default_factory=dict)
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_BrowserErrorEvent', 30.0)) # seconds
class BrowserReconnectingEvent(BaseEvent):
"""WebSocket reconnection attempt is starting."""
cdp_url: str
attempt: int
max_attempts: int
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_BrowserReconnectingEvent', 30.0)) # seconds
class BrowserReconnectedEvent(BaseEvent):
"""WebSocket reconnection succeeded."""
cdp_url: str
attempt: int
downtime_seconds: float
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_BrowserReconnectedEvent', 30.0)) # seconds
# ============================================================================
# Storage State Events
# ============================================================================
class SaveStorageStateEvent(BaseEvent):
"""Request to save browser storage state."""
path: str | None = None # Optional path, uses profile default if not provided
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_SaveStorageStateEvent', 45.0)) # seconds
class StorageStateSavedEvent(BaseEvent):
"""Notification that storage state was saved."""
path: str
cookies_count: int
origins_count: int
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_StorageStateSavedEvent', 30.0)) # seconds
class LoadStorageStateEvent(BaseEvent):
"""Request to load browser storage state."""
path: str | None = None # Optional path, uses profile default if not provided
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_LoadStorageStateEvent', 45.0)) # seconds
# TODO: refactor this to:
# - on_BrowserConnectedEvent() -> dispatch(LoadStorageStateEvent()) -> _copy_storage_state_from_json_to_browser(json_file, new_cdp_session) + return storage_state from handler
# - on_BrowserStopEvent() -> dispatch(SaveStorageStateEvent()) -> _copy_storage_state_from_browser_to_json(new_cdp_session, json_file)
# and get rid of StorageStateSavedEvent and StorageStateLoadedEvent, have the original events + provide handler return values for any results
class StorageStateLoadedEvent(BaseEvent):
"""Notification that storage state was loaded."""
path: str
cookies_count: int
origins_count: int
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_StorageStateLoadedEvent', 30.0)) # seconds
# ============================================================================
# File Download Events
# ============================================================================
class DownloadStartedEvent(BaseEvent):
"""A file download has started (CDP downloadWillBegin received)."""
guid: str # CDP download GUID to correlate with FileDownloadedEvent
url: str
suggested_filename: str
auto_download: bool = False # Whether this was triggered automatically
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_DownloadStartedEvent', 5.0)) # seconds
class DownloadProgressEvent(BaseEvent):
"""A file download progress update (CDP downloadProgress received)."""
guid: str # CDP download GUID to correlate with other download events
received_bytes: int
total_bytes: int # 0 if unknown
state: str # 'inProgress', 'completed', or 'canceled'
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_DownloadProgressEvent', 5.0)) # seconds
class FileDownloadedEvent(BaseEvent):
"""A file has been downloaded."""
guid: str | None = None # CDP download GUID to correlate with DownloadStartedEvent
url: str
path: str
file_name: str
file_size: int
file_type: str | None = None # e.g., 'pdf', 'zip', 'docx', etc.
mime_type: str | None = None # e.g., 'application/pdf'
from_cache: bool = False
auto_download: bool = False # Whether this was an automatic download (e.g., PDF auto-download)
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_FileDownloadedEvent', 30.0)) # seconds
class AboutBlankDVDScreensaverShownEvent(BaseEvent):
"""AboutBlankWatchdog has shown DVD screensaver animation on an about:blank tab."""
target_id: TargetID
error: str | None = None
class DialogOpenedEvent(BaseEvent):
"""Event dispatched when a JavaScript dialog is opened and handled."""
dialog_type: str # 'alert', 'confirm', 'prompt', or 'beforeunload'
message: str
url: str
frame_id: str | None = None # Can be None when frameId is not provided by CDP
# target_id: TargetID # TODO: add this to avoid needing target_id_from_frame() later
# ============================================================================
# Captcha Solver Events
# ============================================================================
class CaptchaSolverStartedEvent(BaseEvent):
"""Captcha solving started by the browser proxy.
Emitted when the browser proxy detects a CAPTCHA and begins solving it.
The agent should wait for a corresponding CaptchaSolverFinishedEvent before proceeding.
"""
target_id: TargetID
vendor: str # e.g. 'cloudflare', 'recaptcha', 'hcaptcha', 'datadome', 'perimeterx', 'geetest'
url: str
started_at: int # Unix millis
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_CaptchaSolverStartedEvent', 5.0))
class CaptchaSolverFinishedEvent(BaseEvent):
"""Captcha solving finished by the browser proxy.
Emitted when the browser proxy finishes solving a CAPTCHA (successfully or not).
"""
target_id: TargetID
vendor: str
url: str
duration_ms: int
finished_at: int # Unix millis
success: bool # Whether the captcha was solved successfully
event_timeout: float | None = Field(default_factory=lambda: _get_timeout('TIMEOUT_CaptchaSolverFinishedEvent', 5.0))
# Note: Model rebuilding for forward references is handled in the importing modules
# Events with 'EnhancedDOMTreeNode' forward references (ClickElementEvent, TypeTextEvent,
# ScrollEvent, UploadFileEvent) need model_rebuild() called after imports are complete
def _check_event_names_dont_overlap():
"""
check that event names defined in this file are valid and non-overlapping
(naiively n^2 so it's pretty slow but ok for now, optimize when >20 events)
"""
event_names = {
name.split('[')[0]
for name in globals().keys()
if not name.startswith('_')
and inspect.isclass(globals()[name])
and issubclass(globals()[name], BaseEvent)
and name != 'BaseEvent'
}
for name_a in event_names:
assert name_a.endswith('Event'), f'Event with name {name_a} does not end with "Event"'
for name_b in event_names:
if name_a != name_b: # Skip self-comparison
assert name_a not in name_b, (
f'Event with name {name_a} is a substring of {name_b}, all events must be completely unique to avoid find-and-replace accidents'
)
# overlapping event names are a nightmare to trace and rename later, dont do it!
# e.g. prevent ClickEvent and FailedClickEvent are terrible names because one is a substring of the other,
# must be ClickEvent and ClickFailedEvent to preserve the usefulnes of codebase grep/sed/awk as refactoring tools.
# at import time, we do a quick check that all event names defined above are valid and non-overlapping.
# this is hand written in blood by a human! not LLM slop. feel free to optimize but do not remove it without a good reason.
_check_event_names_dont_overlap()
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/browser/events.py",
"license": "MIT License",
"lines": 432,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/browser/python_highlights.py | """Python-based highlighting system for drawing bounding boxes on screenshots.
This module replaces JavaScript-based highlighting with fast Python image processing
to draw bounding boxes around interactive elements directly on screenshots.
"""
import asyncio
import base64
import io
import logging
import os
from PIL import Image, ImageDraw, ImageFont
from browser_use.dom.views import DOMSelectorMap, EnhancedDOMTreeNode
from browser_use.observability import observe_debug
from browser_use.utils import time_execution_async
logger = logging.getLogger(__name__)
# Font cache to prevent repeated font loading and reduce memory usage
_FONT_CACHE: dict[tuple[str, int], ImageFont.FreeTypeFont | None] = {}
# Cross-platform font paths
_FONT_PATHS = [
'/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf', # Linux (Debian/Ubuntu)
'/usr/share/fonts/TTF/DejaVuSans-Bold.ttf', # Linux (Arch/Fedora)
'/System/Library/Fonts/Arial.ttf', # macOS
'C:\\Windows\\Fonts\\arial.ttf', # Windows
'arial.ttf', # Windows (system path)
'Arial Bold.ttf', # macOS alternative
'/usr/share/fonts/truetype/liberation/LiberationSans-Bold.ttf', # Linux alternative
]
def get_cross_platform_font(font_size: int) -> ImageFont.FreeTypeFont | None:
"""Get a cross-platform compatible font with caching to prevent memory leaks.
Args:
font_size: Size of the font to load
Returns:
ImageFont object or None if no system fonts are available
"""
# Use cache key based on font size
cache_key = ('system_font', font_size)
# Return cached font if available
if cache_key in _FONT_CACHE:
return _FONT_CACHE[cache_key]
# Try to load a system font
font = None
for font_path in _FONT_PATHS:
try:
font = ImageFont.truetype(font_path, font_size)
break
except OSError:
continue
# Cache the result (even if None) to avoid repeated attempts
_FONT_CACHE[cache_key] = font
return font
def cleanup_font_cache() -> None:
"""Clean up the font cache to prevent memory leaks in long-running applications."""
global _FONT_CACHE
_FONT_CACHE.clear()
# Color scheme for different element types
ELEMENT_COLORS = {
'button': '#FF6B6B', # Red for buttons
'input': '#4ECDC4', # Teal for inputs
'select': '#45B7D1', # Blue for dropdowns
'a': '#96CEB4', # Green for links
'textarea': '#FF8C42', # Orange for text areas (was yellow, now more visible)
'default': '#DDA0DD', # Light purple for other interactive elements
}
# Element type mappings
ELEMENT_TYPE_MAP = {
'button': 'button',
'input': 'input',
'select': 'select',
'a': 'a',
'textarea': 'textarea',
}
def get_element_color(tag_name: str, element_type: str | None = None) -> str:
"""Get color for element based on tag name and type."""
# Check input type first
if tag_name == 'input' and element_type:
if element_type in ['button', 'submit']:
return ELEMENT_COLORS['button']
# Use tag-based color
return ELEMENT_COLORS.get(tag_name.lower(), ELEMENT_COLORS['default'])
def should_show_index_overlay(backend_node_id: int | None) -> bool:
"""Determine if index overlay should be shown."""
return backend_node_id is not None
def draw_enhanced_bounding_box_with_text(
draw, # ImageDraw.Draw - avoiding type annotation due to PIL typing issues
bbox: tuple[int, int, int, int],
color: str,
text: str | None = None,
font: ImageFont.FreeTypeFont | None = None,
element_type: str = 'div',
image_size: tuple[int, int] = (2000, 1500),
device_pixel_ratio: float = 1.0,
) -> None:
"""Draw an enhanced bounding box with much bigger index containers and dashed borders."""
x1, y1, x2, y2 = bbox
# Draw dashed bounding box with pattern: 1 line, 2 spaces, 1 line, 2 spaces...
dash_length = 4
gap_length = 8
line_width = 2
# Helper function to draw dashed line
def draw_dashed_line(start_x, start_y, end_x, end_y):
if start_x == end_x: # Vertical line
y = start_y
while y < end_y:
dash_end = min(y + dash_length, end_y)
draw.line([(start_x, y), (start_x, dash_end)], fill=color, width=line_width)
y += dash_length + gap_length
else: # Horizontal line
x = start_x
while x < end_x:
dash_end = min(x + dash_length, end_x)
draw.line([(x, start_y), (dash_end, start_y)], fill=color, width=line_width)
x += dash_length + gap_length
# Draw dashed rectangle
draw_dashed_line(x1, y1, x2, y1) # Top
draw_dashed_line(x2, y1, x2, y2) # Right
draw_dashed_line(x2, y2, x1, y2) # Bottom
draw_dashed_line(x1, y2, x1, y1) # Left
# Draw much bigger index overlay if we have index text
if text:
try:
# Scale font size for appropriate sizing across different resolutions
img_width, img_height = image_size
css_width = img_width # / device_pixel_ratio
# Much smaller scaling - 1% of CSS viewport width, max 16px to prevent huge highlights
base_font_size = max(10, min(20, int(css_width * 0.01)))
# Use shared font loading function with caching
big_font = get_cross_platform_font(base_font_size)
if big_font is None:
big_font = font # Fallback to original font if no system fonts found
# Get text size with bigger font
if big_font:
bbox_text = draw.textbbox((0, 0), text, font=big_font)
text_width = bbox_text[2] - bbox_text[0]
text_height = bbox_text[3] - bbox_text[1]
else:
# Fallback for default font
bbox_text = draw.textbbox((0, 0), text)
text_width = bbox_text[2] - bbox_text[0]
text_height = bbox_text[3] - bbox_text[1]
# Scale padding appropriately for different resolutions
padding = max(4, min(10, int(css_width * 0.005))) # 0.3% of CSS width, max 4px
element_width = x2 - x1
element_height = y2 - y1
# Container dimensions
container_width = text_width + padding * 2
container_height = text_height + padding * 2
# Position in top center - for small elements, place further up to avoid blocking content
# Center horizontally within the element
bg_x1 = x1 + (element_width - container_width) // 2
# Simple rule: if element is small, place index further up to avoid blocking icons
if element_width < 60 or element_height < 30:
# Small element: place well above to avoid blocking content
bg_y1 = max(0, y1 - container_height - 5)
else:
# Regular element: place inside with small offset
bg_y1 = y1 + 2
bg_x2 = bg_x1 + container_width
bg_y2 = bg_y1 + container_height
# Center the number within the index box with proper baseline handling
text_x = bg_x1 + (container_width - text_width) // 2
# Add extra vertical space to prevent clipping
text_y = bg_y1 + (container_height - text_height) // 2 - bbox_text[1] # Subtract top offset
# Ensure container stays within image bounds
img_width, img_height = image_size
if bg_x1 < 0:
offset = -bg_x1
bg_x1 += offset
bg_x2 += offset
text_x += offset
if bg_y1 < 0:
offset = -bg_y1
bg_y1 += offset
bg_y2 += offset
text_y += offset
if bg_x2 > img_width:
offset = bg_x2 - img_width
bg_x1 -= offset
bg_x2 -= offset
text_x -= offset
if bg_y2 > img_height:
offset = bg_y2 - img_height
bg_y1 -= offset
bg_y2 -= offset
text_y -= offset
# Draw bigger background rectangle with thicker border
draw.rectangle([bg_x1, bg_y1, bg_x2, bg_y2], fill=color, outline='white', width=2)
# Draw white text centered in the index box
draw.text((text_x, text_y), text, fill='white', font=big_font or font)
except Exception as e:
logger.debug(f'Failed to draw enhanced text overlay: {e}')
def draw_bounding_box_with_text(
draw, # ImageDraw.Draw - avoiding type annotation due to PIL typing issues
bbox: tuple[int, int, int, int],
color: str,
text: str | None = None,
font: ImageFont.FreeTypeFont | None = None,
) -> None:
"""Draw a bounding box with optional text overlay."""
x1, y1, x2, y2 = bbox
# Draw dashed bounding box
dash_length = 2
gap_length = 6
# Top edge
x = x1
while x < x2:
end_x = min(x + dash_length, x2)
draw.line([(x, y1), (end_x, y1)], fill=color, width=2)
draw.line([(x, y1 + 1), (end_x, y1 + 1)], fill=color, width=2)
x += dash_length + gap_length
# Bottom edge
x = x1
while x < x2:
end_x = min(x + dash_length, x2)
draw.line([(x, y2), (end_x, y2)], fill=color, width=2)
draw.line([(x, y2 - 1), (end_x, y2 - 1)], fill=color, width=2)
x += dash_length + gap_length
# Left edge
y = y1
while y < y2:
end_y = min(y + dash_length, y2)
draw.line([(x1, y), (x1, end_y)], fill=color, width=2)
draw.line([(x1 + 1, y), (x1 + 1, end_y)], fill=color, width=2)
y += dash_length + gap_length
# Right edge
y = y1
while y < y2:
end_y = min(y + dash_length, y2)
draw.line([(x2, y), (x2, end_y)], fill=color, width=2)
draw.line([(x2 - 1, y), (x2 - 1, end_y)], fill=color, width=2)
y += dash_length + gap_length
# Draw index overlay if we have index text
if text:
try:
# Get text size
if font:
bbox_text = draw.textbbox((0, 0), text, font=font)
text_width = bbox_text[2] - bbox_text[0]
text_height = bbox_text[3] - bbox_text[1]
else:
# Fallback for default font
bbox_text = draw.textbbox((0, 0), text)
text_width = bbox_text[2] - bbox_text[0]
text_height = bbox_text[3] - bbox_text[1]
# Smart positioning based on element size
padding = 5
element_width = x2 - x1
element_height = y2 - y1
element_area = element_width * element_height
index_box_area = (text_width + padding * 2) * (text_height + padding * 2)
# Calculate size ratio to determine positioning strategy
size_ratio = element_area / max(index_box_area, 1)
if size_ratio < 4:
# Very small elements: place outside in bottom-right corner
text_x = x2 + padding
text_y = y2 - text_height
# Ensure it doesn't go off screen
text_x = min(text_x, 1200 - text_width - padding)
text_y = max(text_y, 0)
elif size_ratio < 16:
# Medium elements: place in bottom-right corner inside
text_x = x2 - text_width - padding
text_y = y2 - text_height - padding
else:
# Large elements: place in center
text_x = x1 + (element_width - text_width) // 2
text_y = y1 + (element_height - text_height) // 2
# Ensure text stays within bounds
text_x = max(0, min(text_x, 1200 - text_width))
text_y = max(0, min(text_y, 800 - text_height))
# Draw background rectangle for maximum contrast
bg_x1 = text_x - padding
bg_y1 = text_y - padding
bg_x2 = text_x + text_width + padding
bg_y2 = text_y + text_height + padding
# Use white background with thick black border for maximum visibility
draw.rectangle([bg_x1, bg_y1, bg_x2, bg_y2], fill='white', outline='black', width=2)
# Draw bold dark text on light background for best contrast
draw.text((text_x, text_y), text, fill='black', font=font)
except Exception as e:
logger.debug(f'Failed to draw text overlay: {e}')
def process_element_highlight(
element_id: int,
element: EnhancedDOMTreeNode,
draw,
device_pixel_ratio: float,
font,
filter_highlight_ids: bool,
image_size: tuple[int, int],
) -> None:
"""Process a single element for highlighting."""
try:
# Use absolute_position coordinates directly
if not element.absolute_position:
return
bounds = element.absolute_position
# Scale coordinates from CSS pixels to device pixels for screenshot
# The screenshot is captured at device pixel resolution, but coordinates are in CSS pixels
x1 = int(bounds.x * device_pixel_ratio)
y1 = int(bounds.y * device_pixel_ratio)
x2 = int((bounds.x + bounds.width) * device_pixel_ratio)
y2 = int((bounds.y + bounds.height) * device_pixel_ratio)
# Ensure coordinates are within image bounds
img_width, img_height = image_size
x1 = max(0, min(x1, img_width))
y1 = max(0, min(y1, img_height))
x2 = max(x1, min(x2, img_width))
y2 = max(y1, min(y2, img_height))
# Skip if bounding box is too small or invalid
if x2 - x1 < 2 or y2 - y1 < 2:
return
# Get element color based on type
tag_name = element.tag_name if hasattr(element, 'tag_name') else 'div'
element_type = None
if hasattr(element, 'attributes') and element.attributes:
element_type = element.attributes.get('type')
color = get_element_color(tag_name, element_type)
# Get element index for overlay and apply filtering
backend_node_id = getattr(element, 'backend_node_id', None)
index_text = None
if backend_node_id is not None:
if filter_highlight_ids:
# Use the meaningful text that matches what the LLM sees
meaningful_text = element.get_meaningful_text_for_llm()
# Show ID only if meaningful text is less than 5 characters
if len(meaningful_text) < 3:
index_text = str(backend_node_id)
else:
# Always show ID when filter is disabled
index_text = str(backend_node_id)
# Draw enhanced bounding box with bigger index
draw_enhanced_bounding_box_with_text(
draw, (x1, y1, x2, y2), color, index_text, font, tag_name, image_size, device_pixel_ratio
)
except Exception as e:
logger.debug(f'Failed to draw highlight for element {element_id}: {e}')
@observe_debug(ignore_input=True, ignore_output=True, name='create_highlighted_screenshot')
@time_execution_async('create_highlighted_screenshot')
async def create_highlighted_screenshot(
screenshot_b64: str,
selector_map: DOMSelectorMap,
device_pixel_ratio: float = 1.0,
viewport_offset_x: int = 0,
viewport_offset_y: int = 0,
filter_highlight_ids: bool = True,
) -> str:
"""Create a highlighted screenshot with bounding boxes around interactive elements.
Args:
screenshot_b64: Base64 encoded screenshot
selector_map: Map of interactive elements with their positions
device_pixel_ratio: Device pixel ratio for scaling coordinates
viewport_offset_x: X offset for viewport positioning
viewport_offset_y: Y offset for viewport positioning
Returns:
Base64 encoded highlighted screenshot
"""
try:
# Decode screenshot
screenshot_data = base64.b64decode(screenshot_b64)
image = Image.open(io.BytesIO(screenshot_data)).convert('RGBA')
# Create drawing context
draw = ImageDraw.Draw(image)
# Load font using shared function with caching
font = get_cross_platform_font(12)
# If no system fonts found, font remains None and will use default font
# Process elements sequentially to avoid ImageDraw thread safety issues
# PIL ImageDraw is not thread-safe, so we process elements one by one
for element_id, element in selector_map.items():
process_element_highlight(element_id, element, draw, device_pixel_ratio, font, filter_highlight_ids, image.size)
# Convert back to base64
output_buffer = io.BytesIO()
try:
image.save(output_buffer, format='PNG')
output_buffer.seek(0)
highlighted_b64 = base64.b64encode(output_buffer.getvalue()).decode('utf-8')
logger.debug(f'Successfully created highlighted screenshot with {len(selector_map)} elements')
return highlighted_b64
finally:
# Explicit cleanup to prevent memory leaks
output_buffer.close()
if 'image' in locals():
image.close()
except Exception as e:
logger.error(f'Failed to create highlighted screenshot: {e}')
# Clean up on error as well
if 'image' in locals():
image.close()
# Return original screenshot on error
return screenshot_b64
async def get_viewport_info_from_cdp(cdp_session) -> tuple[float, int, int]:
"""Get viewport information from CDP session.
Returns:
Tuple of (device_pixel_ratio, scroll_x, scroll_y)
"""
try:
# Get layout metrics which includes viewport info and device pixel ratio
metrics = await cdp_session.cdp_client.send.Page.getLayoutMetrics(session_id=cdp_session.session_id)
# Extract viewport information
visual_viewport = metrics.get('visualViewport', {})
css_visual_viewport = metrics.get('cssVisualViewport', {})
css_layout_viewport = metrics.get('cssLayoutViewport', {})
# Calculate device pixel ratio
css_width = css_visual_viewport.get('clientWidth', css_layout_viewport.get('clientWidth', 1280.0))
device_width = visual_viewport.get('clientWidth', css_width)
device_pixel_ratio = device_width / css_width if css_width > 0 else 1.0
# Get scroll position in CSS pixels
scroll_x = int(css_visual_viewport.get('pageX', 0))
scroll_y = int(css_visual_viewport.get('pageY', 0))
return float(device_pixel_ratio), scroll_x, scroll_y
except Exception as e:
logger.debug(f'Failed to get viewport info from CDP: {e}')
return 1.0, 0, 0
@time_execution_async('create_highlighted_screenshot_async')
async def create_highlighted_screenshot_async(
screenshot_b64: str, selector_map: DOMSelectorMap, cdp_session=None, filter_highlight_ids: bool = True
) -> str:
"""Async wrapper for creating highlighted screenshots.
Args:
screenshot_b64: Base64 encoded screenshot
selector_map: Map of interactive elements
cdp_session: CDP session for getting viewport info
filter_highlight_ids: Whether to filter element IDs based on meaningful text
Returns:
Base64 encoded highlighted screenshot
"""
# Get viewport information if CDP session is available
device_pixel_ratio = 1.0
viewport_offset_x = 0
viewport_offset_y = 0
if cdp_session:
try:
device_pixel_ratio, viewport_offset_x, viewport_offset_y = await get_viewport_info_from_cdp(cdp_session)
except Exception as e:
logger.debug(f'Failed to get viewport info from CDP: {e}')
# Create highlighted screenshot with async processing
final_screenshot = await create_highlighted_screenshot(
screenshot_b64, selector_map, device_pixel_ratio, viewport_offset_x, viewport_offset_y, filter_highlight_ids
)
filename = os.getenv('BROWSER_USE_SCREENSHOT_FILE')
if filename:
def _write_screenshot():
try:
with open(filename, 'wb') as f:
f.write(base64.b64decode(final_screenshot))
logger.debug('Saved screenshot to ' + str(filename))
except Exception as e:
logger.warning(f'Failed to save screenshot to {filename}: {e}')
await asyncio.to_thread(_write_screenshot)
return final_screenshot
# Export the cleanup function for external use in long-running applications
__all__ = ['create_highlighted_screenshot', 'create_highlighted_screenshot_async', 'cleanup_font_cache']
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/browser/python_highlights.py",
"license": "MIT License",
"lines": 449,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/browser/session.py | """Event-driven browser session with backwards compatibility."""
import asyncio
import logging
import time
from functools import cached_property
from pathlib import Path
from typing import TYPE_CHECKING, Any, Literal, Self, Union, cast, overload
from urllib.parse import urlparse, urlunparse
from uuid import UUID
import httpx
from bubus import EventBus
from cdp_use import CDPClient
from cdp_use.cdp.fetch import AuthRequiredEvent, RequestPausedEvent
from cdp_use.cdp.network import Cookie
from cdp_use.cdp.target import SessionID, TargetID
from pydantic import BaseModel, ConfigDict, Field, PrivateAttr
from uuid_extensions import uuid7str
from browser_use.browser.cloud.cloud import CloudBrowserAuthError, CloudBrowserClient, CloudBrowserError
# CDP logging is now handled by setup_logging() in logging_config.py
# It automatically sets CDP logs to the same level as browser_use logs
from browser_use.browser.cloud.views import CloudBrowserParams, CreateBrowserRequest, ProxyCountryCode
from browser_use.browser.events import (
AgentFocusChangedEvent,
BrowserConnectedEvent,
BrowserErrorEvent,
BrowserLaunchEvent,
BrowserLaunchResult,
BrowserReconnectedEvent,
BrowserReconnectingEvent,
BrowserStartEvent,
BrowserStateRequestEvent,
BrowserStopEvent,
BrowserStoppedEvent,
CloseTabEvent,
FileDownloadedEvent,
NavigateToUrlEvent,
NavigationCompleteEvent,
NavigationStartedEvent,
SwitchTabEvent,
TabClosedEvent,
TabCreatedEvent,
)
from browser_use.browser.profile import BrowserProfile, ProxySettings
from browser_use.browser.views import BrowserStateSummary, TabInfo
from browser_use.dom.views import DOMRect, EnhancedDOMTreeNode, TargetInfo
from browser_use.observability import observe_debug
from browser_use.utils import _log_pretty_url, create_task_with_error_handling, is_new_tab_page
if TYPE_CHECKING:
from browser_use.actor.page import Page
from browser_use.browser.demo_mode import DemoMode
from browser_use.browser.watchdogs.captcha_watchdog import CaptchaWaitResult
DEFAULT_BROWSER_PROFILE = BrowserProfile()
_LOGGED_UNIQUE_SESSION_IDS = set() # track unique session IDs that have been logged to make sure we always assign a unique enough id to new sessions and avoid ambiguity in logs
red = '\033[91m'
reset = '\033[0m'
class Target(BaseModel):
"""Browser target (page, iframe, worker) - the actual entity being controlled.
A target represents a browsing context with its own URL, title, and type.
Multiple CDP sessions can attach to the same target for communication.
"""
model_config = ConfigDict(arbitrary_types_allowed=True, revalidate_instances='never')
target_id: TargetID
target_type: str # 'page', 'iframe', 'worker', etc.
url: str = 'about:blank'
title: str = 'Unknown title'
class CDPSession(BaseModel):
"""CDP communication channel to a target.
A session is a connection that allows sending CDP commands to a specific target.
Multiple sessions can attach to the same target.
"""
model_config = ConfigDict(arbitrary_types_allowed=True, revalidate_instances='never')
cdp_client: CDPClient
target_id: TargetID
session_id: SessionID
# Lifecycle monitoring (populated by SessionManager)
_lifecycle_events: Any = PrivateAttr(default=None)
_lifecycle_lock: Any = PrivateAttr(default=None)
class BrowserSession(BaseModel):
"""Event-driven browser session with backwards compatibility.
This class provides a 2-layer architecture:
- High-level event handling for agents/tools
- Direct CDP/Playwright calls for browser operations
Supports both event-driven and imperative calling styles.
Browser configuration is stored in the browser_profile, session identity in direct fields:
```python
# Direct settings (recommended for most users)
session = BrowserSession(headless=True, user_data_dir='./profile')
# Or use a profile (for advanced use cases)
session = BrowserSession(browser_profile=BrowserProfile(...))
# Access session fields directly, browser settings via profile or property
print(session.id) # Session field
```
"""
model_config = ConfigDict(
arbitrary_types_allowed=True,
validate_assignment=True,
extra='forbid',
revalidate_instances='never', # resets private attrs on every model rebuild
)
# Overload 1: Cloud browser mode (use cloud-specific params)
@overload
def __init__(
self,
*,
# Cloud browser params - use these for cloud mode
cloud_profile_id: UUID | str | None = None,
cloud_proxy_country_code: ProxyCountryCode | None = None,
cloud_timeout: int | None = None,
# Backward compatibility aliases
profile_id: UUID | str | None = None,
proxy_country_code: ProxyCountryCode | None = None,
timeout: int | None = None,
use_cloud: bool | None = None,
cloud_browser: bool | None = None, # Backward compatibility alias
cloud_browser_params: CloudBrowserParams | None = None,
# Common params that work with cloud
id: str | None = None,
headers: dict[str, str] | None = None,
allowed_domains: list[str] | None = None,
prohibited_domains: list[str] | None = None,
keep_alive: bool | None = None,
minimum_wait_page_load_time: float | None = None,
wait_for_network_idle_page_load_time: float | None = None,
wait_between_actions: float | None = None,
captcha_solver: bool | None = None,
auto_download_pdfs: bool | None = None,
cookie_whitelist_domains: list[str] | None = None,
cross_origin_iframes: bool | None = None,
highlight_elements: bool | None = None,
dom_highlight_elements: bool | None = None,
paint_order_filtering: bool | None = None,
max_iframes: int | None = None,
max_iframe_depth: int | None = None,
) -> None: ...
# Overload 2: Local browser mode (use local browser params)
@overload
def __init__(
self,
*,
# Core configuration for local
id: str | None = None,
cdp_url: str | None = None,
browser_profile: BrowserProfile | None = None,
# Local browser launch params
executable_path: str | Path | None = None,
headless: bool | None = None,
user_data_dir: str | Path | None = None,
args: list[str] | None = None,
downloads_path: str | Path | None = None,
# Common params
headers: dict[str, str] | None = None,
allowed_domains: list[str] | None = None,
prohibited_domains: list[str] | None = None,
keep_alive: bool | None = None,
minimum_wait_page_load_time: float | None = None,
wait_for_network_idle_page_load_time: float | None = None,
wait_between_actions: float | None = None,
auto_download_pdfs: bool | None = None,
cookie_whitelist_domains: list[str] | None = None,
cross_origin_iframes: bool | None = None,
highlight_elements: bool | None = None,
dom_highlight_elements: bool | None = None,
paint_order_filtering: bool | None = None,
max_iframes: int | None = None,
max_iframe_depth: int | None = None,
# All other local params
env: dict[str, str | float | bool] | None = None,
ignore_default_args: list[str] | Literal[True] | None = None,
channel: str | None = None,
chromium_sandbox: bool | None = None,
devtools: bool | None = None,
traces_dir: str | Path | None = None,
accept_downloads: bool | None = None,
permissions: list[str] | None = None,
user_agent: str | None = None,
screen: dict | None = None,
viewport: dict | None = None,
no_viewport: bool | None = None,
device_scale_factor: float | None = None,
record_har_content: str | None = None,
record_har_mode: str | None = None,
record_har_path: str | Path | None = None,
record_video_dir: str | Path | None = None,
record_video_framerate: int | None = None,
record_video_size: dict | None = None,
storage_state: str | Path | dict[str, Any] | None = None,
disable_security: bool | None = None,
deterministic_rendering: bool | None = None,
proxy: ProxySettings | None = None,
enable_default_extensions: bool | None = None,
captcha_solver: bool | None = None,
window_size: dict | None = None,
window_position: dict | None = None,
filter_highlight_ids: bool | None = None,
profile_directory: str | None = None,
) -> None: ...
def __init__(
self,
# Core configuration
id: str | None = None,
cdp_url: str | None = None,
is_local: bool = False,
browser_profile: BrowserProfile | None = None,
# Cloud browser params (don't mix with local browser params)
cloud_profile_id: UUID | str | None = None,
cloud_proxy_country_code: ProxyCountryCode | None = None,
cloud_timeout: int | None = None,
# Backward compatibility aliases for cloud params
profile_id: UUID | str | None = None,
proxy_country_code: ProxyCountryCode | None = None,
timeout: int | None = None,
# BrowserProfile fields that can be passed directly
# From BrowserConnectArgs
headers: dict[str, str] | None = None,
# From BrowserLaunchArgs
env: dict[str, str | float | bool] | None = None,
executable_path: str | Path | None = None,
headless: bool | None = None,
args: list[str] | None = None,
ignore_default_args: list[str] | Literal[True] | None = None,
channel: str | None = None,
chromium_sandbox: bool | None = None,
devtools: bool | None = None,
downloads_path: str | Path | None = None,
traces_dir: str | Path | None = None,
# From BrowserContextArgs
accept_downloads: bool | None = None,
permissions: list[str] | None = None,
user_agent: str | None = None,
screen: dict | None = None,
viewport: dict | None = None,
no_viewport: bool | None = None,
device_scale_factor: float | None = None,
record_har_content: str | None = None,
record_har_mode: str | None = None,
record_har_path: str | Path | None = None,
record_video_dir: str | Path | None = None,
record_video_framerate: int | None = None,
record_video_size: dict | None = None,
# From BrowserLaunchPersistentContextArgs
user_data_dir: str | Path | None = None,
# From BrowserNewContextArgs
storage_state: str | Path | dict[str, Any] | None = None,
# BrowserProfile specific fields
## Cloud Browser Fields
use_cloud: bool | None = None,
cloud_browser: bool | None = None, # Backward compatibility alias
cloud_browser_params: CloudBrowserParams | None = None,
## Other params
disable_security: bool | None = None,
deterministic_rendering: bool | None = None,
allowed_domains: list[str] | None = None,
prohibited_domains: list[str] | None = None,
keep_alive: bool | None = None,
proxy: ProxySettings | None = None,
enable_default_extensions: bool | None = None,
captcha_solver: bool | None = None,
window_size: dict | None = None,
window_position: dict | None = None,
minimum_wait_page_load_time: float | None = None,
wait_for_network_idle_page_load_time: float | None = None,
wait_between_actions: float | None = None,
filter_highlight_ids: bool | None = None,
auto_download_pdfs: bool | None = None,
profile_directory: str | None = None,
cookie_whitelist_domains: list[str] | None = None,
# DOM extraction layer configuration
cross_origin_iframes: bool | None = None,
highlight_elements: bool | None = None,
dom_highlight_elements: bool | None = None,
paint_order_filtering: bool | None = None,
# Iframe processing limits
max_iframes: int | None = None,
max_iframe_depth: int | None = None,
):
# Following the same pattern as AgentSettings in service.py
# Only pass non-None values to avoid validation errors
profile_kwargs = {
k: v
for k, v in locals().items()
if k
not in [
'self',
'browser_profile',
'id',
'cloud_profile_id',
'cloud_proxy_country_code',
'cloud_timeout',
'profile_id',
'proxy_country_code',
'timeout',
]
and v is not None
}
# Handle backward compatibility: prefer cloud_* params over old names
final_profile_id = cloud_profile_id if cloud_profile_id is not None else profile_id
final_proxy_country_code = cloud_proxy_country_code if cloud_proxy_country_code is not None else proxy_country_code
final_timeout = cloud_timeout if cloud_timeout is not None else timeout
# If any cloud params are provided, create cloud_browser_params
if final_profile_id is not None or final_proxy_country_code is not None or final_timeout is not None:
cloud_params = CreateBrowserRequest(
cloud_profile_id=final_profile_id,
cloud_proxy_country_code=final_proxy_country_code,
cloud_timeout=final_timeout,
)
profile_kwargs['cloud_browser_params'] = cloud_params
profile_kwargs['use_cloud'] = True
# Handle backward compatibility: map cloud_browser to use_cloud
if 'cloud_browser' in profile_kwargs:
profile_kwargs['use_cloud'] = profile_kwargs.pop('cloud_browser')
# If cloud_browser_params is set, force use_cloud=True
if cloud_browser_params is not None:
profile_kwargs['use_cloud'] = True
# if is_local is False but executable_path is provided, set is_local to True
if is_local is False and executable_path is not None:
profile_kwargs['is_local'] = True
# Only set is_local=True when cdp_url is missing if we're not using cloud browser
# (cloud browser will provide cdp_url later)
use_cloud = profile_kwargs.get('use_cloud') or profile_kwargs.get('cloud_browser')
if not cdp_url and not use_cloud:
profile_kwargs['is_local'] = True
# Create browser profile from direct parameters or use provided one
if browser_profile is not None:
# Merge any direct kwargs into the provided browser_profile (direct kwargs take precedence)
merged_kwargs = {**browser_profile.model_dump(exclude_unset=True), **profile_kwargs}
resolved_browser_profile = BrowserProfile(**merged_kwargs)
else:
resolved_browser_profile = BrowserProfile(**profile_kwargs)
# Initialize the Pydantic model
super().__init__(
id=id or str(uuid7str()),
browser_profile=resolved_browser_profile,
)
# Session configuration (session identity only)
id: str = Field(default_factory=lambda: str(uuid7str()), description='Unique identifier for this browser session')
# Browser configuration (reusable profile)
browser_profile: BrowserProfile = Field(
default_factory=lambda: DEFAULT_BROWSER_PROFILE,
description='BrowserProfile() options to use for the session, otherwise a default profile will be used',
)
# LLM screenshot resizing configuration
llm_screenshot_size: tuple[int, int] | None = Field(
default=None,
description='Target size (width, height) to resize screenshots before sending to LLM. Coordinates from LLM will be scaled back to original viewport size.',
)
# Cache of original viewport size for coordinate conversion (set when browser state is captured)
_original_viewport_size: tuple[int, int] | None = PrivateAttr(default=None)
@classmethod
def from_system_chrome(cls, profile_directory: str | None = None, **kwargs: Any) -> Self:
"""Create a BrowserSession using system's Chrome installation and profile"""
from browser_use.skill_cli.utils import find_chrome_executable, get_chrome_profile_path, list_chrome_profiles
executable_path = find_chrome_executable()
if executable_path is None:
raise RuntimeError(
'Chrome not found. Please install Chrome or use Browser() with explicit executable_path.\n'
'Expected locations:\n'
' macOS: /Applications/Google Chrome.app/Contents/MacOS/Google Chrome\n'
' Linux: /usr/bin/google-chrome or /usr/bin/chromium\n'
' Windows: C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe'
)
user_data_dir = get_chrome_profile_path(None)
if user_data_dir is None:
raise RuntimeError(
'Could not detect Chrome profile directory for your platform.\n'
'Expected locations:\n'
' macOS: ~/Library/Application Support/Google/Chrome\n'
' Linux: ~/.config/google-chrome\n'
' Windows: %LocalAppData%\\Google\\Chrome\\User Data'
)
# Auto-select profile if not specified
profiles = list_chrome_profiles()
if profile_directory is None:
if profiles:
# Use first available profile
profile_directory = profiles[0]['directory']
logging.getLogger('browser_use').info(
f'Auto-selected Chrome profile: {profiles[0]["name"]} ({profile_directory})'
)
else:
profile_directory = 'Default'
return cls(
executable_path=executable_path,
user_data_dir=user_data_dir,
profile_directory=profile_directory,
**kwargs,
)
@classmethod
def list_chrome_profiles(cls) -> list[dict[str, str]]:
"""List available Chrome profiles on the system"""
from browser_use.skill_cli.utils import list_chrome_profiles
return list_chrome_profiles()
# Convenience properties for common browser settings
@property
def cdp_url(self) -> str | None:
"""CDP URL from browser profile."""
return self.browser_profile.cdp_url
@property
def is_local(self) -> bool:
"""Whether this is a local browser instance from browser profile."""
return self.browser_profile.is_local
@property
def is_cdp_connected(self) -> bool:
"""Check if the CDP WebSocket connection is alive and usable.
Returns True only if the root CDP client exists and its WebSocket is in OPEN state.
A dead/closing/closed WebSocket returns False, preventing handlers from dispatching
CDP commands that would hang until timeout on a broken connection.
"""
if self._cdp_client_root is None or self._cdp_client_root.ws is None:
return False
try:
from websockets.protocol import State
return self._cdp_client_root.ws.state is State.OPEN
except Exception:
return False
async def wait_if_captcha_solving(self, timeout: float | None = None) -> 'CaptchaWaitResult | None':
"""Wait if a captcha is currently being solved by the browser proxy.
Returns:
A CaptchaWaitResult if we had to wait, or None if no captcha was in progress.
"""
if self._captcha_watchdog is not None:
return await self._captcha_watchdog.wait_if_captcha_solving(timeout=timeout)
return None
@property
def is_reconnecting(self) -> bool:
"""Whether a WebSocket reconnection attempt is currently in progress."""
return self._reconnecting
@property
def cloud_browser(self) -> bool:
"""Whether to use cloud browser service from browser profile."""
return self.browser_profile.use_cloud
@property
def demo_mode(self) -> 'DemoMode | None':
"""Lazy init demo mode helper when enabled."""
if not self.browser_profile.demo_mode:
return None
if self._demo_mode is None:
from browser_use.browser.demo_mode import DemoMode
self._demo_mode = DemoMode(self)
return self._demo_mode
# Main shared event bus for all browser session + all watchdogs
event_bus: EventBus = Field(default_factory=EventBus)
# Mutable public state - which target has agent focus
agent_focus_target_id: TargetID | None = None
# Mutable private state shared between watchdogs
_cdp_client_root: CDPClient | None = PrivateAttr(default=None)
_connection_lock: Any = PrivateAttr(default=None) # asyncio.Lock for preventing concurrent connections
# PUBLIC: SessionManager instance (OWNS all targets and sessions)
session_manager: Any = Field(default=None, exclude=True) # SessionManager
_cached_browser_state_summary: Any = PrivateAttr(default=None)
_cached_selector_map: dict[int, EnhancedDOMTreeNode] = PrivateAttr(default_factory=dict)
_downloaded_files: list[str] = PrivateAttr(default_factory=list) # Track files downloaded during this session
_closed_popup_messages: list[str] = PrivateAttr(default_factory=list) # Store messages from auto-closed JavaScript dialogs
# Watchdogs
_crash_watchdog: Any | None = PrivateAttr(default=None)
_downloads_watchdog: Any | None = PrivateAttr(default=None)
_aboutblank_watchdog: Any | None = PrivateAttr(default=None)
_security_watchdog: Any | None = PrivateAttr(default=None)
_storage_state_watchdog: Any | None = PrivateAttr(default=None)
_local_browser_watchdog: Any | None = PrivateAttr(default=None)
_default_action_watchdog: Any | None = PrivateAttr(default=None)
_dom_watchdog: Any | None = PrivateAttr(default=None)
_screenshot_watchdog: Any | None = PrivateAttr(default=None)
_permissions_watchdog: Any | None = PrivateAttr(default=None)
_recording_watchdog: Any | None = PrivateAttr(default=None)
_captcha_watchdog: Any | None = PrivateAttr(default=None)
_watchdogs_attached: bool = PrivateAttr(default=False)
_cloud_browser_client: CloudBrowserClient = PrivateAttr(default_factory=lambda: CloudBrowserClient())
_demo_mode: 'DemoMode | None' = PrivateAttr(default=None)
# WebSocket reconnection state
# Max wait = attempts * timeout_per_attempt + sum(delays) + small buffer
# Default: 3 * 15s + (1+2+4)s + 2s = 54s
RECONNECT_WAIT_TIMEOUT: float = 54.0
_reconnecting: bool = PrivateAttr(default=False)
_reconnect_event: asyncio.Event = PrivateAttr(default_factory=asyncio.Event)
_reconnect_lock: asyncio.Lock = PrivateAttr(default_factory=asyncio.Lock)
_reconnect_task: asyncio.Task | None = PrivateAttr(default=None)
_intentional_stop: bool = PrivateAttr(default=False)
_logger: Any = PrivateAttr(default=None)
@property
def logger(self) -> Any:
"""Get instance-specific logger with session ID in the name"""
# **regenerate it every time** because our id and str(self) can change as browser connection state changes
# if self._logger is None or not self._cdp_client_root:
# self._logger = logging.getLogger(f'browser_use.{self}')
return logging.getLogger(f'browser_use.{self}')
@cached_property
def _id_for_logs(self) -> str:
"""Get human-friendly semi-unique identifier for differentiating different BrowserSession instances in logs"""
str_id = self.id[-4:] # default to last 4 chars of truly random uuid, less helpful than cdp port but always unique enough
port_number = (self.cdp_url or 'no-cdp').rsplit(':', 1)[-1].split('/', 1)[0].strip()
port_is_random = not port_number.startswith('922')
port_is_unique_enough = port_number not in _LOGGED_UNIQUE_SESSION_IDS
if port_number and port_number.isdigit() and port_is_random and port_is_unique_enough:
# if cdp port is random/unique enough to identify this session, use it as our id in logs
_LOGGED_UNIQUE_SESSION_IDS.add(port_number)
str_id = port_number
return str_id
@property
def _tab_id_for_logs(self) -> str:
return self.agent_focus_target_id[-2:] if self.agent_focus_target_id else f'{red}--{reset}'
def __repr__(self) -> str:
return f'BrowserSession🅑 {self._id_for_logs} 🅣 {self._tab_id_for_logs} (cdp_url={self.cdp_url}, profile={self.browser_profile})'
def __str__(self) -> str:
return f'BrowserSession🅑 {self._id_for_logs} 🅣 {self._tab_id_for_logs}'
async def reset(self) -> None:
"""Clear all cached CDP sessions with proper cleanup."""
# Suppress auto-reconnect callback during teardown
self._intentional_stop = True
# Cancel any in-flight reconnection task
if self._reconnect_task and not self._reconnect_task.done():
self._reconnect_task.cancel()
self._reconnect_task = None
self._reconnecting = False
self._reconnect_event.set() # unblock any waiters
cdp_status = 'connected' if self._cdp_client_root else 'not connected'
session_mgr_status = 'exists' if self.session_manager else 'None'
self.logger.debug(
f'🔄 Resetting browser session (CDP: {cdp_status}, SessionManager: {session_mgr_status}, '
f'focus: {self.agent_focus_target_id[-4:] if self.agent_focus_target_id else "None"})'
)
# Clear session manager (which owns _targets, _sessions, _target_sessions)
if self.session_manager:
await self.session_manager.clear()
self.session_manager = None
# Close CDP WebSocket before clearing to prevent stale event handlers
if self._cdp_client_root:
try:
await self._cdp_client_root.stop()
self.logger.debug('Closed CDP client WebSocket during reset')
except Exception as e:
self.logger.debug(f'Error closing CDP client during reset: {e}')
self._cdp_client_root = None # type: ignore
self._cached_browser_state_summary = None
self._cached_selector_map.clear()
self._downloaded_files.clear()
self.agent_focus_target_id = None
if self.is_local:
self.browser_profile.cdp_url = None
self._crash_watchdog = None
self._downloads_watchdog = None
self._aboutblank_watchdog = None
self._security_watchdog = None
self._storage_state_watchdog = None
self._local_browser_watchdog = None
self._default_action_watchdog = None
self._dom_watchdog = None
self._screenshot_watchdog = None
self._permissions_watchdog = None
self._recording_watchdog = None
self._captcha_watchdog = None
self._watchdogs_attached = False
if self._demo_mode:
self._demo_mode.reset()
self._demo_mode = None
self._intentional_stop = False
self.logger.info('✅ Browser session reset complete')
def model_post_init(self, __context) -> None:
"""Register event handlers after model initialization."""
self._connection_lock = asyncio.Lock()
# Initialize reconnect event as set (no reconnection pending)
self._reconnect_event = asyncio.Event()
self._reconnect_event.set()
# Check if handlers are already registered to prevent duplicates
from browser_use.browser.watchdog_base import BaseWatchdog
start_handlers = self.event_bus.handlers.get('BrowserStartEvent', [])
start_handler_names = [getattr(h, '__name__', str(h)) for h in start_handlers]
if any('on_BrowserStartEvent' in name for name in start_handler_names):
raise RuntimeError(
'[BrowserSession] Duplicate handler registration attempted! '
'on_BrowserStartEvent is already registered. '
'This likely means BrowserSession was initialized multiple times with the same EventBus.'
)
BaseWatchdog.attach_handler_to_session(self, BrowserStartEvent, self.on_BrowserStartEvent)
BaseWatchdog.attach_handler_to_session(self, BrowserStopEvent, self.on_BrowserStopEvent)
BaseWatchdog.attach_handler_to_session(self, NavigateToUrlEvent, self.on_NavigateToUrlEvent)
BaseWatchdog.attach_handler_to_session(self, SwitchTabEvent, self.on_SwitchTabEvent)
BaseWatchdog.attach_handler_to_session(self, TabCreatedEvent, self.on_TabCreatedEvent)
BaseWatchdog.attach_handler_to_session(self, TabClosedEvent, self.on_TabClosedEvent)
BaseWatchdog.attach_handler_to_session(self, AgentFocusChangedEvent, self.on_AgentFocusChangedEvent)
BaseWatchdog.attach_handler_to_session(self, FileDownloadedEvent, self.on_FileDownloadedEvent)
BaseWatchdog.attach_handler_to_session(self, CloseTabEvent, self.on_CloseTabEvent)
@observe_debug(ignore_input=True, ignore_output=True, name='browser_session_start')
async def start(self) -> None:
"""Start the browser session."""
start_event = self.event_bus.dispatch(BrowserStartEvent())
await start_event
# Ensure any exceptions from the event handler are propagated
await start_event.event_result(raise_if_any=True, raise_if_none=False)
async def kill(self) -> None:
"""Kill the browser session and reset all state."""
self._intentional_stop = True
self.logger.debug('🛑 kill() called - stopping browser with force=True and resetting state')
# First save storage state while CDP is still connected
from browser_use.browser.events import SaveStorageStateEvent
save_event = self.event_bus.dispatch(SaveStorageStateEvent())
await save_event
# Dispatch stop event to kill the browser
await self.event_bus.dispatch(BrowserStopEvent(force=True))
# Stop the event bus
await self.event_bus.stop(clear=True, timeout=5)
# Reset all state
await self.reset()
# Create fresh event bus
self.event_bus = EventBus()
async def stop(self) -> None:
"""Stop the browser session without killing the browser process.
This clears event buses and cached state but keeps the browser alive.
Useful when you want to clean up resources but plan to reconnect later.
"""
self._intentional_stop = True
self.logger.debug('⏸️ stop() called - stopping browser gracefully (force=False) and resetting state')
# First save storage state while CDP is still connected
from browser_use.browser.events import SaveStorageStateEvent
save_event = self.event_bus.dispatch(SaveStorageStateEvent())
await save_event
# Now dispatch BrowserStopEvent to notify watchdogs
await self.event_bus.dispatch(BrowserStopEvent(force=False))
# Stop the event bus
await self.event_bus.stop(clear=True, timeout=5)
# Reset all state
await self.reset()
# Create fresh event bus
self.event_bus = EventBus()
@observe_debug(ignore_input=True, ignore_output=True, name='browser_start_event_handler')
async def on_BrowserStartEvent(self, event: BrowserStartEvent) -> dict[str, str]:
"""Handle browser start request.
Returns:
Dict with 'cdp_url' key containing the CDP URL
Note: This method is idempotent - calling start() multiple times is safe.
- If already connected, it skips reconnection
- If you need to reset state, call stop() or kill() first
"""
# Initialize and attach all watchdogs FIRST so LocalBrowserWatchdog can handle BrowserLaunchEvent
await self.attach_all_watchdogs()
try:
# If no CDP URL, launch local browser or cloud browser
if not self.cdp_url:
if self.browser_profile.use_cloud or self.browser_profile.cloud_browser_params is not None:
# Use cloud browser service
try:
# Use cloud_browser_params if provided, otherwise create empty request
cloud_params = self.browser_profile.cloud_browser_params or CreateBrowserRequest()
cloud_browser_response = await self._cloud_browser_client.create_browser(cloud_params)
self.browser_profile.cdp_url = cloud_browser_response.cdpUrl
self.browser_profile.is_local = False
self.logger.info('🌤️ Successfully connected to cloud browser service')
except CloudBrowserAuthError:
raise CloudBrowserAuthError(
'Authentication failed for cloud browser service. Set BROWSER_USE_API_KEY environment variable. You can also create an API key at https://cloud.browser-use.com/new-api-key'
)
except CloudBrowserError as e:
raise CloudBrowserError(f'Failed to create cloud browser: {e}')
elif self.is_local:
# Launch local browser using event-driven approach
launch_event = self.event_bus.dispatch(BrowserLaunchEvent())
await launch_event
# Get the CDP URL from LocalBrowserWatchdog handler result
launch_result: BrowserLaunchResult = cast(
BrowserLaunchResult, await launch_event.event_result(raise_if_none=True, raise_if_any=True)
)
self.browser_profile.cdp_url = launch_result.cdp_url
else:
raise ValueError('Got BrowserSession(is_local=False) but no cdp_url was provided to connect to!')
assert self.cdp_url and '://' in self.cdp_url
# Use lock to prevent concurrent connection attempts (race condition protection)
async with self._connection_lock:
# Only connect if not already connected
if self._cdp_client_root is None:
# Setup browser via CDP (for both local and remote cases)
# Global timeout prevents connect() from hanging indefinitely on
# slow/broken WebSocket connections (common on Lambda → remote browser)
try:
await asyncio.wait_for(self.connect(cdp_url=self.cdp_url), timeout=15.0)
except TimeoutError:
# Timeout cancels connect() via CancelledError, which bypasses
# connect()'s `except Exception` cleanup (CancelledError is BaseException).
# Clean up the partially-initialized client so future start attempts
# don't skip reconnection due to _cdp_client_root being non-None.
cdp_client = cast(CDPClient | None, self._cdp_client_root)
if cdp_client is not None:
try:
await cdp_client.stop()
except Exception:
pass
self._cdp_client_root = None
manager = self.session_manager
if manager is not None:
try:
await manager.clear()
except Exception:
pass
self.session_manager = None
self.agent_focus_target_id = None
raise RuntimeError(
f'connect() timed out after 15s — CDP connection to {self.cdp_url} is too slow or unresponsive'
)
assert self.cdp_client is not None
# Notify that browser is connected (single place)
# Ensure BrowserConnected handlers (storage_state restore) complete before
# start() returns so cookies/storage are applied before navigation.
await self.event_bus.dispatch(BrowserConnectedEvent(cdp_url=self.cdp_url))
if self.browser_profile.demo_mode:
try:
demo = self.demo_mode
if demo:
await demo.ensure_ready()
except Exception as exc:
self.logger.warning(f'[DemoMode] Failed to inject demo overlay: {exc}')
else:
self.logger.debug('Already connected to CDP, skipping reconnection')
if self.browser_profile.demo_mode:
try:
demo = self.demo_mode
if demo:
await demo.ensure_ready()
except Exception as exc:
self.logger.warning(f'[DemoMode] Failed to inject demo overlay: {exc}')
# Return the CDP URL for other components
return {'cdp_url': self.cdp_url}
except Exception as e:
self.event_bus.dispatch(
BrowserErrorEvent(
error_type='BrowserStartEventError',
message=f'Failed to start browser: {type(e).__name__} {e}',
details={'cdp_url': self.cdp_url, 'is_local': self.is_local},
)
)
raise
async def on_NavigateToUrlEvent(self, event: NavigateToUrlEvent) -> None:
"""Handle navigation requests - core browser functionality."""
self.logger.debug(f'[on_NavigateToUrlEvent] Received NavigateToUrlEvent: url={event.url}, new_tab={event.new_tab}')
if not self.agent_focus_target_id:
self.logger.warning('Cannot navigate - browser not connected')
return
target_id = None
current_target_id = self.agent_focus_target_id
# If new_tab=True but we're already in a new tab, set new_tab=False
current_target = self.session_manager.get_target(current_target_id)
if event.new_tab and is_new_tab_page(current_target.url):
self.logger.debug(f'[on_NavigateToUrlEvent] Already on blank tab ({current_target.url}), reusing')
event.new_tab = False
try:
# Find or create target for navigation
self.logger.debug(f'[on_NavigateToUrlEvent] Processing new_tab={event.new_tab}')
if event.new_tab:
page_targets = self.session_manager.get_all_page_targets()
self.logger.debug(f'[on_NavigateToUrlEvent] Found {len(page_targets)} existing tabs')
# Look for existing about:blank tab that's not the current one
for idx, target in enumerate(page_targets):
self.logger.debug(f'[on_NavigateToUrlEvent] Tab {idx}: url={target.url}, targetId={target.target_id}')
if target.url == 'about:blank' and target.target_id != current_target_id:
target_id = target.target_id
self.logger.debug(f'Reusing existing about:blank tab #{target_id[-4:]}')
break
# Create new tab if no reusable one found
if not target_id:
self.logger.debug('[on_NavigateToUrlEvent] No reusable about:blank tab found, creating new tab...')
try:
target_id = await self._cdp_create_new_page('about:blank')
self.logger.debug(f'Created new tab #{target_id[-4:]}')
# Dispatch TabCreatedEvent for new tab
await self.event_bus.dispatch(TabCreatedEvent(target_id=target_id, url='about:blank'))
except Exception as e:
self.logger.error(f'[on_NavigateToUrlEvent] Failed to create new tab: {type(e).__name__}: {e}')
# Fall back to using current tab
target_id = current_target_id
self.logger.warning(f'[on_NavigateToUrlEvent] Falling back to current tab #{target_id[-4:]}')
else:
# Use current tab
target_id = target_id or current_target_id
# Switch to target tab if needed (for both new_tab=True and new_tab=False)
if self.agent_focus_target_id is None or self.agent_focus_target_id != target_id:
self.logger.debug(
f'[on_NavigateToUrlEvent] Switching to target tab {target_id[-4:]} (current: {self.agent_focus_target_id[-4:] if self.agent_focus_target_id else "none"})'
)
# Activate target (bring to foreground)
await self.event_bus.dispatch(SwitchTabEvent(target_id=target_id))
else:
self.logger.debug(f'[on_NavigateToUrlEvent] Already on target tab {target_id[-4:]}, skipping SwitchTabEvent')
assert self.agent_focus_target_id is not None and self.agent_focus_target_id == target_id, (
'Agent focus not updated to new target_id after SwitchTabEvent should have switched to it'
)
# Dispatch navigation started
await self.event_bus.dispatch(NavigationStartedEvent(target_id=target_id, url=event.url))
# Navigate to URL with proper lifecycle waiting
await self._navigate_and_wait(event.url, target_id, wait_until=event.wait_until)
# Close any extension options pages that might have opened
await self._close_extension_options_pages()
# Dispatch navigation complete
self.logger.debug(f'Dispatching NavigationCompleteEvent for {event.url} (tab #{target_id[-4:]})')
await self.event_bus.dispatch(
NavigationCompleteEvent(
target_id=target_id,
url=event.url,
status=None, # CDP doesn't provide status directly
)
)
await self.event_bus.dispatch(AgentFocusChangedEvent(target_id=target_id, url=event.url))
# Note: These should be handled by dedicated watchdogs:
# - Security checks (security_watchdog)
# - Page health checks (crash_watchdog)
# - Dialog handling (dialog_watchdog)
# - Download handling (downloads_watchdog)
# - DOM rebuilding (dom_watchdog)
except Exception as e:
self.logger.error(f'Navigation failed: {type(e).__name__}: {e}')
# target_id might be unbound if exception happens early
if 'target_id' in locals() and target_id:
await self.event_bus.dispatch(
NavigationCompleteEvent(
target_id=target_id,
url=event.url,
error_message=f'{type(e).__name__}: {e}',
)
)
await self.event_bus.dispatch(AgentFocusChangedEvent(target_id=target_id, url=event.url))
raise
async def _navigate_and_wait(
self,
url: str,
target_id: str,
timeout: float | None = None,
wait_until: str = 'load',
) -> None:
"""Navigate to URL and wait for page readiness using CDP lifecycle events.
Polls stored lifecycle events (registered once per session in SessionManager).
wait_until controls the minimum acceptable signal: 'commit', 'domcontentloaded', 'load', 'networkidle'.
"""
cdp_session = await self.get_or_create_cdp_session(target_id, focus=False)
if timeout is None:
target = self.session_manager.get_target(target_id)
current_url = target.url
same_domain = (
url.split('/')[2] == current_url.split('/')[2]
if url.startswith('http') and current_url.startswith('http')
else False
)
timeout = 3.0 if same_domain else 8.0
nav_start_time = asyncio.get_event_loop().time()
# Wrap Page.navigate() with timeout — heavy sites can block here for 10s+
nav_timeout = 20.0
try:
nav_result = await asyncio.wait_for(
cdp_session.cdp_client.send.Page.navigate(
params={'url': url, 'transitionType': 'address_bar'},
session_id=cdp_session.session_id,
),
timeout=nav_timeout,
)
except TimeoutError:
duration_ms = (asyncio.get_event_loop().time() - nav_start_time) * 1000
raise RuntimeError(f'Page.navigate() timed out after {nav_timeout}s ({duration_ms:.0f}ms) for {url}')
if nav_result.get('errorText'):
raise RuntimeError(f'Navigation failed: {nav_result["errorText"]}')
if wait_until == 'commit':
duration_ms = (asyncio.get_event_loop().time() - nav_start_time) * 1000
self.logger.debug(f'✅ Page ready for {url} (commit, {duration_ms:.0f}ms)')
return
navigation_id = nav_result.get('loaderId')
start_time = asyncio.get_event_loop().time()
seen_events = []
if not hasattr(cdp_session, '_lifecycle_events'):
raise RuntimeError(
f'❌ Lifecycle monitoring not enabled for {cdp_session.target_id[:8]}! '
f'This is a bug - SessionManager should have initialized it. '
f'Session: {cdp_session}'
)
# Acceptable events by readiness level (higher is always acceptable)
acceptable_events: set[str] = {'networkIdle'}
if wait_until in ('load', 'domcontentloaded'):
acceptable_events.add('load')
if wait_until == 'domcontentloaded':
acceptable_events.add('DOMContentLoaded')
poll_interval = 0.05
while (asyncio.get_event_loop().time() - start_time) < timeout:
try:
for event_data in list(cdp_session._lifecycle_events):
event_name = event_data.get('name')
event_loader_id = event_data.get('loaderId')
event_str = f'{event_name}(loader={event_loader_id[:8] if event_loader_id else "none"})'
if event_str not in seen_events:
seen_events.append(event_str)
if event_loader_id and navigation_id and event_loader_id != navigation_id:
continue
if event_name in acceptable_events:
duration_ms = (asyncio.get_event_loop().time() - nav_start_time) * 1000
self.logger.debug(f'✅ Page ready for {url} ({event_name}, {duration_ms:.0f}ms)')
return
except Exception as e:
self.logger.debug(f'Error polling lifecycle events: {e}')
await asyncio.sleep(poll_interval)
duration_ms = (asyncio.get_event_loop().time() - nav_start_time) * 1000
if not seen_events:
self.logger.error(
f'❌ No lifecycle events received for {url} after {duration_ms:.0f}ms! '
f'Monitoring may have failed. Target: {cdp_session.target_id[:8]}'
)
else:
self.logger.warning(f'⚠️ Page readiness timeout ({timeout}s, {duration_ms:.0f}ms) for {url}')
async def on_SwitchTabEvent(self, event: SwitchTabEvent) -> TargetID:
"""Handle tab switching - core browser functionality."""
if not self.agent_focus_target_id:
raise RuntimeError('Cannot switch tabs - browser not connected')
# Get all page targets
page_targets = self.session_manager.get_all_page_targets()
if event.target_id is None:
# Most recently opened page
if page_targets:
# Update the target id to be the id of the most recently opened page, then proceed to switch to it
event.target_id = page_targets[-1].target_id
else:
# No pages open at all, create a new one (handles switching to it automatically)
assert self._cdp_client_root is not None, 'CDP client root not initialized - browser may not be connected yet'
new_target = await self._cdp_client_root.send.Target.createTarget(params={'url': 'about:blank'})
target_id = new_target['targetId']
# Don't await, these may circularly trigger SwitchTabEvent and could deadlock, dispatch to enqueue and return
self.event_bus.dispatch(TabCreatedEvent(url='about:blank', target_id=target_id))
self.event_bus.dispatch(AgentFocusChangedEvent(target_id=target_id, url='about:blank'))
return target_id
# Switch to the target
assert event.target_id is not None, 'target_id must be set at this point'
# Ensure session exists and update agent focus (only for page/tab targets)
cdp_session = await self.get_or_create_cdp_session(target_id=event.target_id, focus=True)
# Visually switch to the tab in the browser
# The Force Background Tab extension prevents Chrome from auto-switching when links create new tabs,
# but we still want the agent to be able to explicitly switch tabs when needed
await cdp_session.cdp_client.send.Target.activateTarget(params={'targetId': event.target_id})
# Get target to access url
target = self.session_manager.get_target(event.target_id)
# dispatch focus changed event
await self.event_bus.dispatch(
AgentFocusChangedEvent(
target_id=target.target_id,
url=target.url,
)
)
return target.target_id
async def on_CloseTabEvent(self, event: CloseTabEvent) -> None:
"""Handle tab closure - update focus if needed."""
try:
# Dispatch tab closed event
await self.event_bus.dispatch(TabClosedEvent(target_id=event.target_id))
# Try to close the target, but don't fail if it's already closed
try:
cdp_session = await self.get_or_create_cdp_session(target_id=None, focus=False)
await cdp_session.cdp_client.send.Target.closeTarget(params={'targetId': event.target_id})
except Exception as e:
self.logger.debug(f'Target may already be closed: {e}')
except Exception as e:
self.logger.warning(f'Error during tab close cleanup: {e}')
async def on_TabCreatedEvent(self, event: TabCreatedEvent) -> None:
"""Handle tab creation - apply viewport settings to new tab."""
# Note: Tab switching prevention is handled by the Force Background Tab extension
# The extension automatically keeps focus on the current tab when new tabs are created
# Apply viewport settings if configured
if self.browser_profile.viewport and not self.browser_profile.no_viewport:
try:
viewport_width = self.browser_profile.viewport.width
viewport_height = self.browser_profile.viewport.height
device_scale_factor = self.browser_profile.device_scale_factor or 1.0
self.logger.info(
f'Setting viewport to {viewport_width}x{viewport_height} with device scale factor {device_scale_factor} whereas original device scale factor was {self.browser_profile.device_scale_factor}'
)
# Use the helper method with the new tab's target_id
await self._cdp_set_viewport(viewport_width, viewport_height, device_scale_factor, target_id=event.target_id)
self.logger.debug(f'Applied viewport {viewport_width}x{viewport_height} to tab {event.target_id[-8:]}')
except Exception as e:
self.logger.warning(f'Failed to set viewport for new tab {event.target_id[-8:]}: {e}')
async def on_TabClosedEvent(self, event: TabClosedEvent) -> None:
"""Handle tab closure - update focus if needed."""
if not self.agent_focus_target_id:
return
# Get current tab index
current_target_id = self.agent_focus_target_id
# If the closed tab was the current one, find a new target
if current_target_id == event.target_id:
await self.event_bus.dispatch(SwitchTabEvent(target_id=None))
async def on_AgentFocusChangedEvent(self, event: AgentFocusChangedEvent) -> None:
"""Handle agent focus change - update focus and clear cache."""
self.logger.debug(f'🔄 AgentFocusChangedEvent received: target_id=...{event.target_id[-4:]} url={event.url}')
# Clear cached DOM state since focus changed
if self._dom_watchdog:
self._dom_watchdog.clear_cache()
# Clear cached browser state
self._cached_browser_state_summary = None
self._cached_selector_map.clear()
self.logger.debug('🔄 Cached browser state cleared')
# Update agent focus if a specific target_id is provided (only for page/tab targets)
if event.target_id:
# Ensure session exists and update agent focus (validates target_type internally)
await self.get_or_create_cdp_session(target_id=event.target_id, focus=True)
# Apply viewport settings to the newly focused tab
if self.browser_profile.viewport and not self.browser_profile.no_viewport:
try:
viewport_width = self.browser_profile.viewport.width
viewport_height = self.browser_profile.viewport.height
device_scale_factor = self.browser_profile.device_scale_factor or 1.0
# Use the helper method with the current tab's target_id
await self._cdp_set_viewport(viewport_width, viewport_height, device_scale_factor, target_id=event.target_id)
self.logger.debug(f'Applied viewport {viewport_width}x{viewport_height} to tab {event.target_id[-8:]}')
except Exception as e:
self.logger.warning(f'Failed to set viewport for tab {event.target_id[-8:]}: {e}')
else:
raise RuntimeError('AgentFocusChangedEvent received with no target_id for newly focused tab')
async def on_FileDownloadedEvent(self, event: FileDownloadedEvent) -> None:
"""Track downloaded files during this session."""
self.logger.debug(f'FileDownloadedEvent received: {event.file_name} at {event.path}')
if event.path and event.path not in self._downloaded_files:
self._downloaded_files.append(event.path)
self.logger.info(f'📁 Tracked download: {event.file_name} ({len(self._downloaded_files)} total downloads in session)')
else:
if not event.path:
self.logger.warning(f'FileDownloadedEvent has no path: {event}')
else:
self.logger.debug(f'File already tracked: {event.path}')
async def on_BrowserStopEvent(self, event: BrowserStopEvent) -> None:
"""Handle browser stop request."""
try:
# Check if we should keep the browser alive
if self.browser_profile.keep_alive and not event.force:
self.event_bus.dispatch(BrowserStoppedEvent(reason='Kept alive due to keep_alive=True'))
return
# Clean up cloud browser session if using cloud browser
if self.browser_profile.use_cloud:
try:
await self._cloud_browser_client.stop_browser()
self.logger.info('🌤️ Cloud browser session cleaned up')
except Exception as e:
self.logger.debug(f'Failed to cleanup cloud browser session: {e}')
# Clear CDP session cache before stopping
self.logger.info(
f'📢 on_BrowserStopEvent - Calling reset() (force={event.force}, keep_alive={self.browser_profile.keep_alive})'
)
await self.reset()
# Reset state
if self.is_local:
self.browser_profile.cdp_url = None
# Notify stop and wait for all handlers to complete
# LocalBrowserWatchdog listens for BrowserStopEvent and dispatches BrowserKillEvent
stop_event = self.event_bus.dispatch(BrowserStoppedEvent(reason='Stopped by request'))
await stop_event
except Exception as e:
self.event_bus.dispatch(
BrowserErrorEvent(
error_type='BrowserStopEventError',
message=f'Failed to stop browser: {type(e).__name__} {e}',
details={'cdp_url': self.cdp_url, 'is_local': self.is_local},
)
)
# region - ========== CDP-based replacements for browser_context operations ==========
@property
def cdp_client(self) -> CDPClient:
"""Get the cached root CDP cdp_session.cdp_client. The client is created and started in self.connect()."""
assert self._cdp_client_root is not None, 'CDP client not initialized - browser may not be connected yet'
return self._cdp_client_root
async def new_page(self, url: str | None = None) -> 'Page':
"""Create a new page (tab)."""
from cdp_use.cdp.target.commands import CreateTargetParameters
params: CreateTargetParameters = {'url': url or 'about:blank'}
result = await self.cdp_client.send.Target.createTarget(params)
target_id = result['targetId']
# Import here to avoid circular import
from browser_use.actor.page import Page as Target
return Target(self, target_id)
async def get_current_page(self) -> 'Page | None':
"""Get the current page as an actor Page."""
target_info = await self.get_current_target_info()
if not target_info:
return None
from browser_use.actor.page import Page as Target
return Target(self, target_info['targetId'])
async def must_get_current_page(self) -> 'Page':
"""Get the current page as an actor Page."""
page = await self.get_current_page()
if not page:
raise RuntimeError('No current target found')
return page
async def get_pages(self) -> list['Page']:
"""Get all available pages using SessionManager (source of truth)."""
# Import here to avoid circular import
from browser_use.actor.page import Page as PageActor
page_targets = self.session_manager.get_all_page_targets() if self.session_manager else []
targets = []
for target in page_targets:
targets.append(PageActor(self, target.target_id))
return targets
def get_focused_target(self) -> 'Target | None':
"""Get the target that currently has agent focus.
Returns:
Target object if agent has focus, None otherwise.
"""
if not self.session_manager:
return None
return self.session_manager.get_focused_target()
def get_page_targets(self) -> list['Target']:
"""Get all page/tab targets (excludes iframes, workers, etc.).
Returns:
List of Target objects for all page/tab targets.
"""
if not self.session_manager:
return []
return self.session_manager.get_all_page_targets()
async def close_page(self, page: 'Union[Page, str]') -> None:
"""Close a page by Page object or target ID."""
from cdp_use.cdp.target.commands import CloseTargetParameters
# Import here to avoid circular import
from browser_use.actor.page import Page as Target
if isinstance(page, Target):
target_id = page._target_id
else:
target_id = str(page)
params: CloseTargetParameters = {'targetId': target_id}
await self.cdp_client.send.Target.closeTarget(params)
async def cookies(self) -> list['Cookie']:
"""Get cookies, optionally filtered by URLs."""
result = await self.cdp_client.send.Storage.getCookies()
return result['cookies']
async def clear_cookies(self) -> None:
"""Clear all cookies."""
await self.cdp_client.send.Network.clearBrowserCookies()
async def export_storage_state(self, output_path: str | Path | None = None) -> dict[str, Any]:
"""Export all browser cookies and storage to storage_state format.
Extracts decrypted cookies via CDP, bypassing keychain encryption.
Args:
output_path: Optional path to save storage_state.json. If None, returns dict only.
Returns:
Storage state dict with cookies in Playwright format.
"""
from pathlib import Path
# Get all cookies using Storage.getCookies (returns decrypted cookies from all domains)
cookies = await self._cdp_get_cookies()
# Convert CDP cookie format to Playwright storage_state format
storage_state = {
'cookies': [
{
'name': c['name'],
'value': c['value'],
'domain': c['domain'],
'path': c['path'],
'expires': c.get('expires', -1),
'httpOnly': c.get('httpOnly', False),
'secure': c.get('secure', False),
'sameSite': c.get('sameSite', 'Lax'),
}
for c in cookies
],
'origins': [], # Could add localStorage/sessionStorage extraction if needed
}
if output_path:
import json
output_file = Path(output_path).expanduser().resolve()
output_file.parent.mkdir(parents=True, exist_ok=True)
output_file.write_text(json.dumps(storage_state, indent=2, ensure_ascii=False), encoding='utf-8')
self.logger.info(f'💾 Exported {len(cookies)} cookies to {output_file}')
return storage_state
async def get_or_create_cdp_session(self, target_id: TargetID | None = None, focus: bool = True) -> CDPSession:
"""Get CDP session for a target from the event-driven pool.
With autoAttach=True, sessions are created automatically by Chrome and added
to the pool via Target.attachedToTarget events. This method retrieves them.
Args:
target_id: Target ID to get session for. If None, uses current agent focus.
focus: If True, switches agent focus to this target (page targets only).
Returns:
CDPSession for the specified target.
Raises:
ValueError: If target doesn't exist or session is not available.
"""
assert self._cdp_client_root is not None, 'Root CDP client not initialized'
assert self.session_manager is not None, 'SessionManager not initialized'
# If no target_id specified, ensure current agent focus is valid and wait for recovery if needed
if target_id is None:
# Validate and wait for focus recovery if stale (centralized protection)
focus_valid = await self.session_manager.ensure_valid_focus(timeout=5.0)
if not focus_valid:
raise ValueError(
'No valid agent focus available - target may have detached and recovery failed. '
'This indicates browser is in an unstable state.'
)
assert self.agent_focus_target_id is not None, 'Focus validation passed but agent_focus_target_id is None'
target_id = self.agent_focus_target_id
session = self.session_manager._get_session_for_target(target_id)
if not session:
# Session not in pool yet - wait for attach event
self.logger.debug(f'[SessionManager] Waiting for target {target_id[:8]}... to attach...')
# Wait up to 2 seconds for the attach event
for attempt in range(20):
await asyncio.sleep(0.1)
session = self.session_manager._get_session_for_target(target_id)
if session:
self.logger.debug(f'[SessionManager] Target appeared after {attempt * 100}ms')
break
if not session:
# Timeout - target doesn't exist
raise ValueError(f'Target {target_id} not found - may have detached or never existed')
# Validate session is still active
is_valid = await self.session_manager.validate_session(target_id)
if not is_valid:
raise ValueError(f'Target {target_id} has detached - no active sessions')
# Update focus if requested
# CRITICAL: Only allow focus change to 'page' type targets, not iframes/workers
if focus and self.agent_focus_target_id != target_id:
# Get target type from SessionManager
target = self.session_manager.get_target(target_id)
target_type = target.target_type if target else 'unknown'
if target_type == 'page':
# Format current focus safely (could be None after detach)
current_focus = self.agent_focus_target_id[:8] if self.agent_focus_target_id else 'None'
self.logger.debug(f'[SessionManager] Switching focus: {current_focus}... → {target_id[:8]}...')
self.agent_focus_target_id = target_id
else:
# Ignore focus request for non-page targets (iframes, workers, etc.)
# These can detach at any time, causing agent_focus to point to dead target
current_focus = self.agent_focus_target_id[:8] if self.agent_focus_target_id else 'None'
self.logger.debug(
f'[SessionManager] Ignoring focus request for {target_type} target {target_id[:8]}... '
f'(agent_focus stays on {current_focus}...)'
)
# Resume if waiting for debugger (non-essential, don't let it block connect)
if focus:
try:
await asyncio.wait_for(
session.cdp_client.send.Runtime.runIfWaitingForDebugger(session_id=session.session_id),
timeout=3.0,
)
except Exception:
pass # May fail if not waiting, or timeout — either is fine
return session
async def set_extra_headers(self, headers: dict[str, str], target_id: TargetID | None = None) -> None:
"""Set extra HTTP headers using CDP Network.setExtraHTTPHeaders.
These headers will be sent with every HTTP request made by the target.
Network domain must be enabled first (done automatically for page targets
in SessionManager._enable_page_monitoring).
Args:
headers: Dictionary of header name -> value pairs to inject into every request.
target_id: Target to set headers on. Defaults to the current agent focus target.
"""
if target_id is None:
if not self.agent_focus_target_id:
return
target_id = self.agent_focus_target_id
cdp_session = await self.get_or_create_cdp_session(target_id, focus=False)
# Ensure Network domain is enabled (idempotent - safe to call multiple times)
await cdp_session.cdp_client.send.Network.enable(session_id=cdp_session.session_id)
await cdp_session.cdp_client.send.Network.setExtraHTTPHeaders(
params={'headers': cast(Any, headers)}, session_id=cdp_session.session_id
)
# endregion - ========== CDP-based ... ==========
# region - ========== Helper Methods ==========
@observe_debug(ignore_input=True, ignore_output=True, name='get_browser_state_summary')
async def get_browser_state_summary(
self,
include_screenshot: bool = True,
cached: bool = False,
include_recent_events: bool = False,
) -> BrowserStateSummary:
if cached and self._cached_browser_state_summary is not None and self._cached_browser_state_summary.dom_state:
# Don't use cached state if it has 0 interactive elements
selector_map = self._cached_browser_state_summary.dom_state.selector_map
# Don't use cached state if we need a screenshot but the cached state doesn't have one
if include_screenshot and not self._cached_browser_state_summary.screenshot:
self.logger.debug('⚠️ Cached browser state has no screenshot, fetching fresh state with screenshot')
# Fall through to fetch fresh state with screenshot
elif selector_map and len(selector_map) > 0:
self.logger.debug('🔄 Using pre-cached browser state summary for open tab')
return self._cached_browser_state_summary
else:
self.logger.debug('⚠️ Cached browser state has 0 interactive elements, fetching fresh state')
# Fall through to fetch fresh state
# Dispatch the event and wait for result
event: BrowserStateRequestEvent = cast(
BrowserStateRequestEvent,
self.event_bus.dispatch(
BrowserStateRequestEvent(
include_dom=True,
include_screenshot=include_screenshot,
include_recent_events=include_recent_events,
)
),
)
# The handler returns the BrowserStateSummary directly
result = await event.event_result(raise_if_none=True, raise_if_any=True)
assert result is not None and result.dom_state is not None
return result
async def get_state_as_text(self) -> str:
"""Get the browser state as text."""
state = await self.get_browser_state_summary()
assert state.dom_state is not None
dom_state = state.dom_state
return dom_state.llm_representation()
async def attach_all_watchdogs(self) -> None:
"""Initialize and attach all watchdogs with explicit handler registration."""
# Prevent duplicate watchdog attachment
if self._watchdogs_attached:
self.logger.debug('Watchdogs already attached, skipping duplicate attachment')
return
from browser_use.browser.watchdogs.aboutblank_watchdog import AboutBlankWatchdog
from browser_use.browser.watchdogs.captcha_watchdog import CaptchaWatchdog
# from browser_use.browser.crash_watchdog import CrashWatchdog
from browser_use.browser.watchdogs.default_action_watchdog import DefaultActionWatchdog
from browser_use.browser.watchdogs.dom_watchdog import DOMWatchdog
from browser_use.browser.watchdogs.downloads_watchdog import DownloadsWatchdog
from browser_use.browser.watchdogs.har_recording_watchdog import HarRecordingWatchdog
from browser_use.browser.watchdogs.local_browser_watchdog import LocalBrowserWatchdog
from browser_use.browser.watchdogs.permissions_watchdog import PermissionsWatchdog
from browser_use.browser.watchdogs.popups_watchdog import PopupsWatchdog
from browser_use.browser.watchdogs.recording_watchdog import RecordingWatchdog
from browser_use.browser.watchdogs.screenshot_watchdog import ScreenshotWatchdog
from browser_use.browser.watchdogs.security_watchdog import SecurityWatchdog
from browser_use.browser.watchdogs.storage_state_watchdog import StorageStateWatchdog
# Initialize CrashWatchdog
# CrashWatchdog.model_rebuild()
# self._crash_watchdog = CrashWatchdog(event_bus=self.event_bus, browser_session=self)
# self.event_bus.on(BrowserConnectedEvent, self._crash_watchdog.on_BrowserConnectedEvent)
# self.event_bus.on(BrowserStoppedEvent, self._crash_watchdog.on_BrowserStoppedEvent)
# self._crash_watchdog.attach_to_session()
# Initialize DownloadsWatchdog
DownloadsWatchdog.model_rebuild()
self._downloads_watchdog = DownloadsWatchdog(event_bus=self.event_bus, browser_session=self)
# self.event_bus.on(BrowserLaunchEvent, self._downloads_watchdog.on_BrowserLaunchEvent)
# self.event_bus.on(TabCreatedEvent, self._downloads_watchdog.on_TabCreatedEvent)
# self.event_bus.on(TabClosedEvent, self._downloads_watchdog.on_TabClosedEvent)
# self.event_bus.on(BrowserStoppedEvent, self._downloads_watchdog.on_BrowserStoppedEvent)
# self.event_bus.on(NavigationCompleteEvent, self._downloads_watchdog.on_NavigationCompleteEvent)
self._downloads_watchdog.attach_to_session()
if self.browser_profile.auto_download_pdfs:
self.logger.debug('📄 PDF auto-download enabled for this session')
# Initialize StorageStateWatchdog conditionally
# Enable when user provides either storage_state or user_data_dir (indicating they want persistence)
should_enable_storage_state = (
self.browser_profile.storage_state is not None or self.browser_profile.user_data_dir is not None
)
if should_enable_storage_state:
StorageStateWatchdog.model_rebuild()
self._storage_state_watchdog = StorageStateWatchdog(
event_bus=self.event_bus,
browser_session=self,
# More conservative defaults when auto-enabled
auto_save_interval=60.0, # 1 minute instead of 30 seconds
save_on_change=False, # Only save on shutdown by default
)
self._storage_state_watchdog.attach_to_session()
self.logger.debug(
f'🍪 StorageStateWatchdog enabled (storage_state: {bool(self.browser_profile.storage_state)}, user_data_dir: {bool(self.browser_profile.user_data_dir)})'
)
else:
self.logger.debug('🍪 StorageStateWatchdog disabled (no storage_state or user_data_dir configured)')
# Initialize LocalBrowserWatchdog
LocalBrowserWatchdog.model_rebuild()
self._local_browser_watchdog = LocalBrowserWatchdog(event_bus=self.event_bus, browser_session=self)
# self.event_bus.on(BrowserLaunchEvent, self._local_browser_watchdog.on_BrowserLaunchEvent)
# self.event_bus.on(BrowserKillEvent, self._local_browser_watchdog.on_BrowserKillEvent)
# self.event_bus.on(BrowserStopEvent, self._local_browser_watchdog.on_BrowserStopEvent)
self._local_browser_watchdog.attach_to_session()
# Initialize SecurityWatchdog (hooks NavigationWatchdog and implements allowed_domains restriction)
SecurityWatchdog.model_rebuild()
self._security_watchdog = SecurityWatchdog(event_bus=self.event_bus, browser_session=self)
# Core navigation is now handled in BrowserSession directly
# SecurityWatchdog only handles security policy enforcement
self._security_watchdog.attach_to_session()
# Initialize AboutBlankWatchdog (handles about:blank pages and DVD loading animation on first load)
AboutBlankWatchdog.model_rebuild()
self._aboutblank_watchdog = AboutBlankWatchdog(event_bus=self.event_bus, browser_session=self)
# self.event_bus.on(BrowserStopEvent, self._aboutblank_watchdog.on_BrowserStopEvent)
# self.event_bus.on(BrowserStoppedEvent, self._aboutblank_watchdog.on_BrowserStoppedEvent)
# self.event_bus.on(TabCreatedEvent, self._aboutblank_watchdog.on_TabCreatedEvent)
# self.event_bus.on(TabClosedEvent, self._aboutblank_watchdog.on_TabClosedEvent)
self._aboutblank_watchdog.attach_to_session()
# Initialize PopupsWatchdog (handles accepting and dismissing JS dialogs, alerts, confirm, onbeforeunload, etc.)
PopupsWatchdog.model_rebuild()
self._popups_watchdog = PopupsWatchdog(event_bus=self.event_bus, browser_session=self)
# self.event_bus.on(TabCreatedEvent, self._popups_watchdog.on_TabCreatedEvent)
# self.event_bus.on(DialogCloseEvent, self._popups_watchdog.on_DialogCloseEvent)
self._popups_watchdog.attach_to_session()
# Initialize PermissionsWatchdog (handles granting and revoking browser permissions like clipboard, microphone, camera, etc.)
PermissionsWatchdog.model_rebuild()
self._permissions_watchdog = PermissionsWatchdog(event_bus=self.event_bus, browser_session=self)
# self.event_bus.on(BrowserConnectedEvent, self._permissions_watchdog.on_BrowserConnectedEvent)
self._permissions_watchdog.attach_to_session()
# Initialize DefaultActionWatchdog (handles all default actions like click, type, scroll, go back, go forward, refresh, wait, send keys, upload file, scroll to text, etc.)
DefaultActionWatchdog.model_rebuild()
self._default_action_watchdog = DefaultActionWatchdog(event_bus=self.event_bus, browser_session=self)
# self.event_bus.on(ClickElementEvent, self._default_action_watchdog.on_ClickElementEvent)
# self.event_bus.on(TypeTextEvent, self._default_action_watchdog.on_TypeTextEvent)
# self.event_bus.on(ScrollEvent, self._default_action_watchdog.on_ScrollEvent)
# self.event_bus.on(GoBackEvent, self._default_action_watchdog.on_GoBackEvent)
# self.event_bus.on(GoForwardEvent, self._default_action_watchdog.on_GoForwardEvent)
# self.event_bus.on(RefreshEvent, self._default_action_watchdog.on_RefreshEvent)
# self.event_bus.on(WaitEvent, self._default_action_watchdog.on_WaitEvent)
# self.event_bus.on(SendKeysEvent, self._default_action_watchdog.on_SendKeysEvent)
# self.event_bus.on(UploadFileEvent, self._default_action_watchdog.on_UploadFileEvent)
# self.event_bus.on(ScrollToTextEvent, self._default_action_watchdog.on_ScrollToTextEvent)
self._default_action_watchdog.attach_to_session()
# Initialize ScreenshotWatchdog (handles taking screenshots of the browser)
ScreenshotWatchdog.model_rebuild()
self._screenshot_watchdog = ScreenshotWatchdog(event_bus=self.event_bus, browser_session=self)
# self.event_bus.on(BrowserStartEvent, self._screenshot_watchdog.on_BrowserStartEvent)
# self.event_bus.on(BrowserStoppedEvent, self._screenshot_watchdog.on_BrowserStoppedEvent)
# self.event_bus.on(ScreenshotEvent, self._screenshot_watchdog.on_ScreenshotEvent)
self._screenshot_watchdog.attach_to_session()
# Initialize DOMWatchdog (handles building the DOM tree and detecting interactive elements, depends on ScreenshotWatchdog)
DOMWatchdog.model_rebuild()
self._dom_watchdog = DOMWatchdog(event_bus=self.event_bus, browser_session=self)
# self.event_bus.on(TabCreatedEvent, self._dom_watchdog.on_TabCreatedEvent)
# self.event_bus.on(BrowserStateRequestEvent, self._dom_watchdog.on_BrowserStateRequestEvent)
self._dom_watchdog.attach_to_session()
# Initialize RecordingWatchdog (handles video recording)
RecordingWatchdog.model_rebuild()
self._recording_watchdog = RecordingWatchdog(event_bus=self.event_bus, browser_session=self)
self._recording_watchdog.attach_to_session()
# Initialize HarRecordingWatchdog if record_har_path is configured (handles HTTPS HAR capture)
if self.browser_profile.record_har_path:
HarRecordingWatchdog.model_rebuild()
self._har_recording_watchdog = HarRecordingWatchdog(event_bus=self.event_bus, browser_session=self)
self._har_recording_watchdog.attach_to_session()
# Initialize CaptchaWatchdog (listens for captcha solver events from the browser proxy)
if self.browser_profile.captcha_solver:
CaptchaWatchdog.model_rebuild()
self._captcha_watchdog = CaptchaWatchdog(event_bus=self.event_bus, browser_session=self)
self._captcha_watchdog.attach_to_session()
# Mark watchdogs as attached to prevent duplicate attachment
self._watchdogs_attached = True
async def connect(self, cdp_url: str | None = None) -> Self:
"""Connect to a remote chromium-based browser via CDP using cdp-use.
This MUST succeed or the browser is unusable. Fails hard on any error.
"""
self.browser_profile.cdp_url = cdp_url or self.cdp_url
if not self.cdp_url:
raise RuntimeError('Cannot setup CDP connection without CDP URL')
# Prevent duplicate connections - clean up existing connection first
if self._cdp_client_root is not None:
self.logger.warning(
'⚠️ connect() called but CDP client already exists! Cleaning up old connection before creating new one.'
)
try:
await self._cdp_client_root.stop()
except Exception as e:
self.logger.debug(f'Error stopping old CDP client: {e}')
self._cdp_client_root = None
if not self.cdp_url.startswith('ws'):
# If it's an HTTP URL, fetch the WebSocket URL from /json/version endpoint
parsed_url = urlparse(self.cdp_url)
path = parsed_url.path.rstrip('/')
if not path.endswith('/json/version'):
path = path + '/json/version'
url = urlunparse(
(parsed_url.scheme, parsed_url.netloc, path, parsed_url.params, parsed_url.query, parsed_url.fragment)
)
# Run a tiny HTTP client to query for the WebSocket URL from the /json/version endpoint
# Default httpx timeout is 5s which can race the global wait_for(connect(), 15s).
# Use 30s as a safety net for direct connect() callers; the wait_for is the real deadline.
# For localhost/127.0.0.1, disable trust_env to prevent proxy env vars (HTTP_PROXY, HTTPS_PROXY)
# from routing local requests through a proxy, which causes 502 errors on Windows.
# Remote CDP URLs should still respect proxy settings.
is_localhost = parsed_url.hostname in ('localhost', '127.0.0.1', '::1')
async with httpx.AsyncClient(timeout=httpx.Timeout(30.0), trust_env=not is_localhost) as client:
headers = self.browser_profile.headers or {}
version_info = await client.get(url, headers=headers)
self.logger.debug(f'Raw version info: {str(version_info)}')
self.browser_profile.cdp_url = version_info.json()['webSocketDebuggerUrl']
assert self.cdp_url is not None, 'CDP URL is None.'
browser_location = 'local browser' if self.is_local else 'remote browser'
self.logger.debug(f'🌎 Connecting to existing chromium-based browser via CDP: {self.cdp_url} -> ({browser_location})')
try:
# Create and store the CDP client for direct CDP communication
headers = getattr(self.browser_profile, 'headers', None)
self._cdp_client_root = CDPClient(
self.cdp_url,
additional_headers=headers,
max_ws_frame_size=200 * 1024 * 1024, # Use 200MB limit to handle pages with very large DOMs
)
assert self._cdp_client_root is not None
await self._cdp_client_root.start()
# Initialize event-driven session manager FIRST (before enabling autoAttach)
# SessionManager will:
# 1. Register attach/detach event handlers
# 2. Discover and attach to all existing targets
# 3. Initialize sessions and enable lifecycle monitoring
# 4. Enable autoAttach for future targets
from browser_use.browser.session_manager import SessionManager
self.session_manager = SessionManager(self)
await self.session_manager.start_monitoring()
self.logger.debug('Event-driven session manager started')
# Enable auto-attach so Chrome automatically notifies us when NEW targets attach/detach
# This is the foundation of event-driven session management
await self._cdp_client_root.send.Target.setAutoAttach(
params={'autoAttach': True, 'waitForDebuggerOnStart': False, 'flatten': True}
)
self.logger.debug('CDP client connected with auto-attach enabled')
# Get browser targets from SessionManager (source of truth)
# SessionManager has already discovered all targets via start_monitoring()
page_targets_from_manager = self.session_manager.get_all_page_targets()
# Check for chrome://newtab pages and redirect them to about:blank (in parallel)
from browser_use.utils import is_new_tab_page
async def _redirect_newtab(target):
target_url = target.url
target_id = target.target_id
self.logger.debug(f'🔄 Redirecting {target_url} to about:blank for target {target_id}')
try:
session = await self.get_or_create_cdp_session(target_id, focus=False)
await session.cdp_client.send.Page.navigate(params={'url': 'about:blank'}, session_id=session.session_id)
target.url = 'about:blank'
except Exception as e:
self.logger.warning(f'Failed to redirect {target_url}: {e}')
redirect_tasks = [
_redirect_newtab(target)
for target in page_targets_from_manager
if is_new_tab_page(target.url) and target.url != 'about:blank'
]
if redirect_tasks:
await asyncio.gather(*redirect_tasks, return_exceptions=True)
# Ensure we have at least one page
if not page_targets_from_manager:
new_target = await self._cdp_client_root.send.Target.createTarget(params={'url': 'about:blank'})
target_id = new_target['targetId']
self.logger.debug(f'📄 Created new blank page: {target_id}')
else:
target_id = page_targets_from_manager[0].target_id
self.logger.debug(f'📄 Using existing page: {target_id}')
# Set up initial focus using the public API
# Note: get_or_create_cdp_session() will wait for attach event and set focus
try:
await self.get_or_create_cdp_session(target_id, focus=True)
# agent_focus_target_id is now set by get_or_create_cdp_session
self.logger.debug(f'📄 Agent focus set to {target_id[:8]}...')
except ValueError as e:
raise RuntimeError(f'Failed to get session for initial target {target_id}: {e}') from e
# Note: Lifecycle monitoring is enabled automatically in SessionManager._handle_target_attached()
# when targets attach, so no manual enablement needed!
# Enable proxy authentication handling if configured
await self._setup_proxy_auth()
# Attach WS drop detection callback for auto-reconnection
self._intentional_stop = False
self._attach_ws_drop_callback()
# Verify the target is working
if self.agent_focus_target_id:
target = self.session_manager.get_target(self.agent_focus_target_id)
if target.title == 'Unknown title':
self.logger.warning('Target created but title is unknown (may be normal for about:blank)')
# Dispatch TabCreatedEvent for all initial tabs (so watchdogs can initialize)
for idx, target in enumerate(page_targets_from_manager):
target_url = target.url
self.logger.debug(f'Dispatching TabCreatedEvent for initial tab {idx}: {target_url}')
self.event_bus.dispatch(TabCreatedEvent(url=target_url, target_id=target.target_id))
# Dispatch initial focus event
if page_targets_from_manager:
initial_url = page_targets_from_manager[0].url
self.event_bus.dispatch(AgentFocusChangedEvent(target_id=page_targets_from_manager[0].target_id, url=initial_url))
self.logger.debug(f'Initial agent focus set to tab 0: {initial_url}')
except Exception as e:
# Fatal error - browser is not usable without CDP connection
self.logger.error(f'❌ FATAL: Failed to setup CDP connection: {e}')
self.logger.error('❌ Browser cannot continue without CDP connection')
# Clear SessionManager state
if self.session_manager:
try:
await self.session_manager.clear()
self.logger.debug('Cleared SessionManager state after initialization failure')
except Exception as cleanup_error:
self.logger.debug(f'Error clearing SessionManager: {cleanup_error}')
# Close CDP client WebSocket and unregister handlers
if self._cdp_client_root:
try:
await self._cdp_client_root.stop() # Close WebSocket and unregister handlers
self.logger.debug('Closed CDP client WebSocket after initialization failure')
except Exception as cleanup_error:
self.logger.debug(f'Error closing CDP client: {cleanup_error}')
self.session_manager = None
self._cdp_client_root = None
self.agent_focus_target_id = None
# Re-raise as a fatal error
raise RuntimeError(f'Failed to establish CDP connection to browser: {e}') from e
return self
async def _setup_proxy_auth(self) -> None:
"""Enable CDP Fetch auth handling for authenticated proxy, if credentials provided.
Handles HTTP proxy authentication challenges (Basic/Proxy) by providing
configured credentials from BrowserProfile.
"""
assert self._cdp_client_root
try:
proxy_cfg = self.browser_profile.proxy
username = proxy_cfg.username if proxy_cfg else None
password = proxy_cfg.password if proxy_cfg else None
if not username or not password:
self.logger.debug('Proxy credentials not provided; skipping proxy auth setup')
return
# Enable Fetch domain with auth handling (do not pause all requests)
try:
await self._cdp_client_root.send.Fetch.enable(params={'handleAuthRequests': True})
self.logger.debug('Fetch.enable(handleAuthRequests=True) enabled on root client')
except Exception as e:
self.logger.debug(f'Fetch.enable on root failed: {type(e).__name__}: {e}')
# Also enable on the focused target's session if available to ensure events are delivered
try:
if self.agent_focus_target_id:
cdp_session = await self.get_or_create_cdp_session(self.agent_focus_target_id, focus=False)
await cdp_session.cdp_client.send.Fetch.enable(
params={'handleAuthRequests': True},
session_id=cdp_session.session_id,
)
self.logger.debug('Fetch.enable(handleAuthRequests=True) enabled on focused session')
except Exception as e:
self.logger.debug(f'Fetch.enable on focused session failed: {type(e).__name__}: {e}')
def _on_auth_required(event: AuthRequiredEvent, session_id: SessionID | None = None):
# event keys may be snake_case or camelCase depending on generator; handle both
request_id = event.get('requestId') or event.get('request_id')
if not request_id:
return
challenge = event.get('authChallenge') or event.get('auth_challenge') or {}
source = (challenge.get('source') or '').lower()
# Only respond to proxy challenges
if source == 'proxy' and request_id:
async def _respond():
assert self._cdp_client_root
try:
await self._cdp_client_root.send.Fetch.continueWithAuth(
params={
'requestId': request_id,
'authChallengeResponse': {
'response': 'ProvideCredentials',
'username': username,
'password': password,
},
},
session_id=session_id,
)
except Exception as e:
self.logger.debug(f'Proxy auth respond failed: {type(e).__name__}: {e}')
# schedule
create_task_with_error_handling(
_respond(), name='auth_respond', logger_instance=self.logger, suppress_exceptions=True
)
else:
# Default behaviour for non-proxy challenges: let browser handle
async def _default():
assert self._cdp_client_root
try:
await self._cdp_client_root.send.Fetch.continueWithAuth(
params={'requestId': request_id, 'authChallengeResponse': {'response': 'Default'}},
session_id=session_id,
)
except Exception as e:
self.logger.debug(f'Default auth respond failed: {type(e).__name__}: {e}')
if request_id:
create_task_with_error_handling(
_default(), name='auth_default', logger_instance=self.logger, suppress_exceptions=True
)
def _on_request_paused(event: RequestPausedEvent, session_id: SessionID | None = None):
# Continue all paused requests to avoid stalling the network
request_id = event.get('requestId') or event.get('request_id')
if not request_id:
return
async def _continue():
assert self._cdp_client_root
try:
await self._cdp_client_root.send.Fetch.continueRequest(
params={'requestId': request_id},
session_id=session_id,
)
except Exception:
pass
create_task_with_error_handling(
_continue(), name='request_continue', logger_instance=self.logger, suppress_exceptions=True
)
# Register event handler on root client
try:
self._cdp_client_root.register.Fetch.authRequired(_on_auth_required)
self._cdp_client_root.register.Fetch.requestPaused(_on_request_paused)
if self.agent_focus_target_id:
cdp_session = await self.get_or_create_cdp_session(self.agent_focus_target_id, focus=False)
cdp_session.cdp_client.register.Fetch.authRequired(_on_auth_required)
cdp_session.cdp_client.register.Fetch.requestPaused(_on_request_paused)
self.logger.debug('Registered Fetch.authRequired handlers')
except Exception as e:
self.logger.debug(f'Failed to register authRequired handlers: {type(e).__name__}: {e}')
# Ensure Fetch is enabled for the current focused target's session, too
try:
if self.agent_focus_target_id:
# Use safe API with focus=False to avoid changing focus
cdp_session = await self.get_or_create_cdp_session(self.agent_focus_target_id, focus=False)
await cdp_session.cdp_client.send.Fetch.enable(
params={'handleAuthRequests': True, 'patterns': [{'urlPattern': '*'}]},
session_id=cdp_session.session_id,
)
except Exception as e:
self.logger.debug(f'Fetch.enable on focused session failed: {type(e).__name__}: {e}')
except Exception as e:
self.logger.debug(f'Skipping proxy auth setup: {type(e).__name__}: {e}')
async def reconnect(self) -> None:
"""Re-establish the CDP WebSocket connection to an already-running browser.
This is a lightweight reconnection that:
1. Stops the old CDPClient (WS already dead, just clean state)
2. Clears SessionManager (all CDP sessions are invalid post-disconnect)
3. Creates a new CDPClient with the same cdp_url
4. Re-initializes SessionManager and re-enables autoAttach
5. Re-discovers page targets and restores agent focus
6. Re-enables proxy auth if configured
"""
assert self.cdp_url, 'Cannot reconnect without a CDP URL'
old_focus_target_id = self.agent_focus_target_id
# 1. Stop old CDPClient (WS is already dead, this just cleans internal state)
if self._cdp_client_root:
try:
await self._cdp_client_root.stop()
except Exception as e:
self.logger.debug(f'Error stopping old CDP client during reconnect: {e}')
self._cdp_client_root = None
# 2. Clear SessionManager (all sessions are stale)
if self.session_manager:
try:
await self.session_manager.clear()
except Exception as e:
self.logger.debug(f'Error clearing SessionManager during reconnect: {e}')
self.session_manager = None
self.agent_focus_target_id = None
# 3. Create new CDPClient with the same cdp_url
headers = getattr(self.browser_profile, 'headers', None)
self._cdp_client_root = CDPClient(
self.cdp_url,
additional_headers=headers,
max_ws_frame_size=200 * 1024 * 1024,
)
await self._cdp_client_root.start()
# 4. Re-initialize SessionManager
from browser_use.browser.session_manager import SessionManager
self.session_manager = SessionManager(self)
await self.session_manager.start_monitoring()
# 5. Re-enable autoAttach
await self._cdp_client_root.send.Target.setAutoAttach(
params={'autoAttach': True, 'waitForDebuggerOnStart': False, 'flatten': True}
)
# 6. Re-discover page targets and restore focus
page_targets = self.session_manager.get_all_page_targets()
# Prefer the old focus target if it still exists
restored = False
if old_focus_target_id:
for target in page_targets:
if target.target_id == old_focus_target_id:
await self.get_or_create_cdp_session(old_focus_target_id, focus=True)
restored = True
self.logger.debug(f'🔄 Restored agent focus to previous target {old_focus_target_id[:8]}...')
break
if not restored:
if page_targets:
fallback_id = page_targets[0].target_id
await self.get_or_create_cdp_session(fallback_id, focus=True)
self.logger.debug(f'🔄 Agent focus set to fallback target {fallback_id[:8]}...')
else:
# No pages exist — create one
new_target = await self._cdp_client_root.send.Target.createTarget(params={'url': 'about:blank'})
target_id = new_target['targetId']
await self.get_or_create_cdp_session(target_id, focus=True)
self.logger.debug(f'🔄 Created new blank page during reconnect: {target_id[:8]}...')
# 7. Re-enable proxy auth if configured
await self._setup_proxy_auth()
# 8. Attach the WS drop detection callback to the new client
self._attach_ws_drop_callback()
async def _auto_reconnect(self, max_attempts: int = 3) -> None:
"""Attempt to reconnect with exponential backoff.
Dispatches BrowserReconnectingEvent before each attempt and
BrowserReconnectedEvent on success.
"""
async with self._reconnect_lock:
if self._reconnecting:
return # already in progress from another caller
self._reconnecting = True
self._reconnect_event.clear()
start_time = time.time()
delays = [1.0, 2.0, 4.0]
try:
for attempt in range(1, max_attempts + 1):
self.event_bus.dispatch(
BrowserReconnectingEvent(
cdp_url=self.cdp_url or '',
attempt=attempt,
max_attempts=max_attempts,
)
)
self.logger.warning(f'🔄 WebSocket reconnection attempt {attempt}/{max_attempts}...')
try:
await asyncio.wait_for(self.reconnect(), timeout=15.0)
# Success
downtime = time.time() - start_time
self.event_bus.dispatch(
BrowserReconnectedEvent(
cdp_url=self.cdp_url or '',
attempt=attempt,
downtime_seconds=downtime,
)
)
self.logger.info(f'🔄 WebSocket reconnected after {downtime:.1f}s (attempt {attempt})')
return
except Exception as e:
self.logger.warning(f'🔄 Reconnection attempt {attempt} failed: {type(e).__name__}: {e}')
if attempt < max_attempts:
delay = delays[attempt - 1] if attempt - 1 < len(delays) else delays[-1]
await asyncio.sleep(delay)
# All attempts exhausted
self.logger.error(f'🔄 All {max_attempts} reconnection attempts failed')
self.event_bus.dispatch(
BrowserErrorEvent(
error_type='ReconnectionFailed',
message=f'Failed to reconnect after {max_attempts} attempts ({time.time() - start_time:.1f}s)',
details={'cdp_url': self.cdp_url or '', 'max_attempts': max_attempts},
)
)
finally:
self._reconnecting = False
self._reconnect_event.set() # wake up all waiters regardless of outcome
def _attach_ws_drop_callback(self) -> None:
"""Attach a done callback to the CDPClient's message handler task to detect WS drops."""
if not self._cdp_client_root or not hasattr(self._cdp_client_root, '_message_handler_task'):
return
task = self._cdp_client_root._message_handler_task
if task is None or task.done():
return
def _on_message_handler_done(fut: asyncio.Future) -> None:
# Guard: skip if intentionally stopped, already reconnecting, or no cdp_url
if self._intentional_stop or self._reconnecting or not self.cdp_url:
return
# The message handler task exiting means the WS connection dropped
exc = fut.exception() if not fut.cancelled() else None
self.logger.warning(
f'🔌 CDP WebSocket message handler exited unexpectedly'
f'{f": {type(exc).__name__}: {exc}" if exc else " (connection closed)"}'
)
# Fire auto-reconnect as an asyncio task
try:
loop = asyncio.get_running_loop()
self._reconnect_task = loop.create_task(self._auto_reconnect())
except RuntimeError:
# No running event loop — can't reconnect
self.logger.error('🔌 No event loop available for auto-reconnect')
task.add_done_callback(_on_message_handler_done)
async def get_tabs(self) -> list[TabInfo]:
"""Get information about all open tabs using cached target data."""
tabs = []
# Safety check - return empty list if browser not connected yet
if not self.session_manager:
return tabs
# Get all page targets from SessionManager
page_targets = self.session_manager.get_all_page_targets()
for i, target in enumerate(page_targets):
target_id = target.target_id
url = target.url
title = target.title
try:
# Skip JS execution for chrome:// pages and new tab pages
if is_new_tab_page(url) or url.startswith('chrome://'):
# Use URL as title for chrome pages, or mark new tabs as unusable
if is_new_tab_page(url):
title = ''
elif not title:
# For chrome:// pages without a title, use the URL itself
title = url
# Special handling for PDF pages without titles
if (not title or title == '') and (url.endswith('.pdf') or 'pdf' in url):
# PDF pages might not have a title, use URL filename
try:
from urllib.parse import urlparse
filename = urlparse(url).path.split('/')[-1]
if filename:
title = filename
except Exception:
pass
except Exception as e:
# Fallback to basic title handling
self.logger.debug(f'⚠️ Failed to get target info for tab #{i}: {_log_pretty_url(url)} - {type(e).__name__}')
if is_new_tab_page(url):
title = ''
elif url.startswith('chrome://'):
title = url
else:
title = ''
tab_info = TabInfo(
target_id=target_id,
url=url,
title=title,
parent_target_id=None,
)
tabs.append(tab_info)
return tabs
# endregion - ========== Helper Methods ==========
# region - ========== ID Lookup Methods ==========
async def get_current_target_info(self) -> TargetInfo | None:
"""Get info about the current active target using cached session data."""
if not self.agent_focus_target_id:
return None
target = self.session_manager.get_target(self.agent_focus_target_id)
return {
'targetId': target.target_id,
'url': target.url,
'title': target.title,
'type': target.target_type,
'attached': True,
'canAccessOpener': False,
}
async def get_current_page_url(self) -> str:
"""Get the URL of the current page."""
if self.agent_focus_target_id:
target = self.session_manager.get_target(self.agent_focus_target_id)
return target.url
return 'about:blank'
async def get_current_page_title(self) -> str:
"""Get the title of the current page."""
if self.agent_focus_target_id:
target = self.session_manager.get_target(self.agent_focus_target_id)
return target.title
return 'Unknown page title'
async def navigate_to(self, url: str, new_tab: bool = False) -> None:
"""Navigate to a URL using the standard event system.
Args:
url: URL to navigate to
new_tab: Whether to open in a new tab
"""
from browser_use.browser.events import NavigateToUrlEvent
event = self.event_bus.dispatch(NavigateToUrlEvent(url=url, new_tab=new_tab))
await event
await event.event_result(raise_if_any=True, raise_if_none=False)
# endregion - ========== ID Lookup Methods ==========
# region - ========== DOM Helper Methods ==========
async def get_dom_element_by_index(self, index: int) -> EnhancedDOMTreeNode | None:
"""Get DOM element by index.
Get element from cached selector map.
Args:
index: The element index from the serialized DOM
Returns:
EnhancedDOMTreeNode or None if index not found
"""
# Check cached selector map
if self._cached_selector_map and index in self._cached_selector_map:
return self._cached_selector_map[index]
return None
def update_cached_selector_map(self, selector_map: dict[int, EnhancedDOMTreeNode]) -> None:
"""Update the cached selector map with new DOM state.
This should be called by the DOM watchdog after rebuilding the DOM.
Args:
selector_map: The new selector map from DOM serialization
"""
self._cached_selector_map = selector_map
# Alias for backwards compatibility
async def get_element_by_index(self, index: int) -> EnhancedDOMTreeNode | None:
"""Alias for get_dom_element_by_index for backwards compatibility."""
return await self.get_dom_element_by_index(index)
async def get_dom_element_at_coordinates(self, x: int, y: int) -> EnhancedDOMTreeNode | None:
"""Get DOM element at coordinates as EnhancedDOMTreeNode.
First checks the cached selector_map for a matching element, then falls back
to CDP DOM.describeNode if not found. This ensures safety checks (e.g., for
<select> elements and file inputs) work correctly.
Args:
x: X coordinate relative to viewport
y: Y coordinate relative to viewport
Returns:
EnhancedDOMTreeNode at the coordinates, or None if no element found
"""
from browser_use.dom.views import NodeType
# Get current page to access CDP session
page = await self.get_current_page()
if page is None:
raise RuntimeError('No active page found')
# Get session ID for CDP call
session_id = await page._ensure_session()
try:
# Call CDP DOM.getNodeForLocation to get backend_node_id
result = await self.cdp_client.send.DOM.getNodeForLocation(
params={
'x': x,
'y': y,
'includeUserAgentShadowDOM': False,
'ignorePointerEventsNone': False,
},
session_id=session_id,
)
backend_node_id = result.get('backendNodeId')
if backend_node_id is None:
self.logger.debug(f'No element found at coordinates ({x}, {y})')
return None
# Try to find element in cached selector_map (avoids extra CDP call)
if self._cached_selector_map:
for node in self._cached_selector_map.values():
if node.backend_node_id == backend_node_id:
self.logger.debug(f'Found element at ({x}, {y}) in cached selector_map')
return node
# Not in cache - fall back to CDP DOM.describeNode to get actual node info
try:
describe_result = await self.cdp_client.send.DOM.describeNode(
params={'backendNodeId': backend_node_id},
session_id=session_id,
)
node_info = describe_result.get('node', {})
node_name = node_info.get('nodeName', '')
# Parse attributes from flat list [key1, val1, key2, val2, ...] to dict
attrs_list = node_info.get('attributes', [])
attributes = {attrs_list[i]: attrs_list[i + 1] for i in range(0, len(attrs_list), 2)}
return EnhancedDOMTreeNode(
node_id=result.get('nodeId', 0),
backend_node_id=backend_node_id,
node_type=NodeType(node_info.get('nodeType', NodeType.ELEMENT_NODE.value)),
node_name=node_name,
node_value=node_info.get('nodeValue', '') or '',
attributes=attributes,
is_scrollable=None,
frame_id=result.get('frameId'),
session_id=session_id,
target_id=self.agent_focus_target_id or '',
content_document=None,
shadow_root_type=None,
shadow_roots=None,
parent_node=None,
children_nodes=None,
ax_node=None,
snapshot_node=None,
is_visible=None,
absolute_position=None,
)
except Exception as e:
self.logger.debug(f'DOM.describeNode failed for backend_node_id={backend_node_id}: {e}')
# Fall back to minimal node if describeNode fails
return EnhancedDOMTreeNode(
node_id=result.get('nodeId', 0),
backend_node_id=backend_node_id,
node_type=NodeType.ELEMENT_NODE,
node_name='',
node_value='',
attributes={},
is_scrollable=None,
frame_id=result.get('frameId'),
session_id=session_id,
target_id=self.agent_focus_target_id or '',
content_document=None,
shadow_root_type=None,
shadow_roots=None,
parent_node=None,
children_nodes=None,
ax_node=None,
snapshot_node=None,
is_visible=None,
absolute_position=None,
)
except Exception as e:
self.logger.warning(f'Failed to get DOM element at coordinates ({x}, {y}): {e}')
return None
async def get_target_id_from_tab_id(self, tab_id: str) -> TargetID:
"""Get the full-length TargetID from the truncated 4-char tab_id using SessionManager."""
if not self.session_manager:
raise RuntimeError('SessionManager not initialized')
for full_target_id in self.session_manager.get_all_target_ids():
if full_target_id.endswith(tab_id):
if await self.session_manager.is_target_valid(full_target_id):
return full_target_id
# Stale target - Chrome should have sent detach event
# If we're here, event listener will clean it up
self.logger.debug(f'Found stale target {full_target_id}, skipping')
raise ValueError(f'No TargetID found ending in tab_id=...{tab_id}')
async def get_target_id_from_url(self, url: str) -> TargetID:
"""Get the TargetID from a URL using SessionManager (source of truth)."""
if not self.session_manager:
raise RuntimeError('SessionManager not initialized')
# Search in SessionManager targets (exact match first)
for target_id, target in self.session_manager.get_all_targets().items():
if target.target_type in ('page', 'tab') and target.url == url:
return target_id
# Still not found, try substring match as fallback
for target_id, target in self.session_manager.get_all_targets().items():
if target.target_type in ('page', 'tab') and url in target.url:
return target_id
raise ValueError(f'No TargetID found for url={url}')
async def get_most_recently_opened_target_id(self) -> TargetID:
"""Get the most recently opened target ID using SessionManager."""
# Get all page targets from SessionManager
page_targets = self.session_manager.get_all_page_targets()
if not page_targets:
raise RuntimeError('No page targets available')
return page_targets[-1].target_id
def is_file_input(self, element: Any) -> bool:
"""Check if element is a file input.
Args:
element: The DOM element to check
Returns:
True if element is a file input, False otherwise
"""
if self._dom_watchdog:
return self._dom_watchdog.is_file_input(element)
# Fallback if watchdog not available
return (
hasattr(element, 'node_name')
and element.node_name.upper() == 'INPUT'
and hasattr(element, 'attributes')
and element.attributes.get('type', '').lower() == 'file'
)
async def get_selector_map(self) -> dict[int, EnhancedDOMTreeNode]:
"""Get the current selector map from cached state or DOM watchdog.
Returns:
Dictionary mapping element indices to EnhancedDOMTreeNode objects
"""
# First try cached selector map
if self._cached_selector_map:
return self._cached_selector_map
# Try to get from DOM watchdog
if self._dom_watchdog and hasattr(self._dom_watchdog, 'selector_map'):
return self._dom_watchdog.selector_map or {}
# Return empty dict if nothing available
return {}
async def get_index_by_id(self, element_id: str) -> int | None:
"""Find element index by its id attribute.
Args:
element_id: The id attribute value to search for
Returns:
Index of the element, or None if not found
"""
selector_map = await self.get_selector_map()
for idx, element in selector_map.items():
if element.attributes and element.attributes.get('id') == element_id:
return idx
return None
async def get_index_by_class(self, class_name: str) -> int | None:
"""Find element index by its class attribute (matches if class contains the given name).
Args:
class_name: The class name to search for
Returns:
Index of the first matching element, or None if not found
"""
selector_map = await self.get_selector_map()
for idx, element in selector_map.items():
if element.attributes:
element_class = element.attributes.get('class', '')
if class_name in element_class.split():
return idx
return None
async def remove_highlights(self) -> None:
"""Remove highlights from the page using CDP."""
if not self.browser_profile.highlight_elements:
return
try:
# Get cached session
cdp_session = await self.get_or_create_cdp_session()
# Remove highlights via JavaScript - be thorough
script = """
(function() {
// Remove all browser-use highlight elements
const highlights = document.querySelectorAll('[data-browser-use-highlight]');
console.log('Removing', highlights.length, 'browser-use highlight elements');
highlights.forEach(el => el.remove());
// Also remove by ID in case selector missed anything
const highlightContainer = document.getElementById('browser-use-debug-highlights');
if (highlightContainer) {
console.log('Removing highlight container by ID');
highlightContainer.remove();
}
// Final cleanup - remove any orphaned tooltips
const orphanedTooltips = document.querySelectorAll('[data-browser-use-highlight="tooltip"]');
orphanedTooltips.forEach(el => el.remove());
return { removed: highlights.length };
})();
"""
result = await cdp_session.cdp_client.send.Runtime.evaluate(
params={'expression': script, 'returnByValue': True}, session_id=cdp_session.session_id
)
# Log the result for debugging
if result and 'result' in result and 'value' in result['result']:
removed_count = result['result']['value'].get('removed', 0)
self.logger.debug(f'Successfully removed {removed_count} highlight elements')
else:
self.logger.debug('Highlight removal completed')
except Exception as e:
self.logger.warning(f'Failed to remove highlights: {e}')
@observe_debug(ignore_input=True, ignore_output=True, name='get_element_coordinates')
async def get_element_coordinates(self, backend_node_id: int, cdp_session: CDPSession) -> DOMRect | None:
"""Get element coordinates for a backend node ID using multiple methods.
This method tries DOM.getContentQuads first, then falls back to DOM.getBoxModel,
and finally uses JavaScript getBoundingClientRect as a last resort.
Args:
backend_node_id: The backend node ID to get coordinates for
cdp_session: The CDP session to use
Returns:
DOMRect with coordinates or None if element not found/no bounds
"""
session_id = cdp_session.session_id
quads = []
# Method 1: Try DOM.getContentQuads first (best for inline elements and complex layouts)
try:
content_quads_result = await cdp_session.cdp_client.send.DOM.getContentQuads(
params={'backendNodeId': backend_node_id}, session_id=session_id
)
if 'quads' in content_quads_result and content_quads_result['quads']:
quads = content_quads_result['quads']
self.logger.debug(f'Got {len(quads)} quads from DOM.getContentQuads')
else:
self.logger.debug(f'No quads found from DOM.getContentQuads {content_quads_result}')
except Exception as e:
self.logger.debug(f'DOM.getContentQuads failed: {e}')
# Method 2: Fall back to DOM.getBoxModel
if not quads:
try:
box_model = await cdp_session.cdp_client.send.DOM.getBoxModel(
params={'backendNodeId': backend_node_id}, session_id=session_id
)
if 'model' in box_model and 'content' in box_model['model']:
content_quad = box_model['model']['content']
if len(content_quad) >= 8:
# Convert box model format to quad format
quads = [
[
content_quad[0],
content_quad[1], # x1, y1
content_quad[2],
content_quad[3], # x2, y2
content_quad[4],
content_quad[5], # x3, y3
content_quad[6],
content_quad[7], # x4, y4
]
]
self.logger.debug('Got quad from DOM.getBoxModel')
except Exception as e:
self.logger.debug(f'DOM.getBoxModel failed: {e}')
# Method 3: Fall back to JavaScript getBoundingClientRect
if not quads:
try:
result = await cdp_session.cdp_client.send.DOM.resolveNode(
params={'backendNodeId': backend_node_id},
session_id=session_id,
)
if 'object' in result and 'objectId' in result['object']:
object_id = result['object']['objectId']
js_result = await cdp_session.cdp_client.send.Runtime.callFunctionOn(
params={
'objectId': object_id,
'functionDeclaration': """
function() {
const rect = this.getBoundingClientRect();
return {
x: rect.x,
y: rect.y,
width: rect.width,
height: rect.height
};
}
""",
'returnByValue': True,
},
session_id=session_id,
)
if 'result' in js_result and 'value' in js_result['result']:
rect_data = js_result['result']['value']
if rect_data['width'] > 0 and rect_data['height'] > 0:
return DOMRect(
x=rect_data['x'], y=rect_data['y'], width=rect_data['width'], height=rect_data['height']
)
except Exception as e:
self.logger.debug(f'JavaScript getBoundingClientRect failed: {e}')
# Convert quads to bounding rectangle if we have them
if quads:
# Use the first quad (most relevant for the element)
quad = quads[0]
if len(quad) >= 8:
# Calculate bounding rect from quad points
x_coords = [quad[i] for i in range(0, 8, 2)]
y_coords = [quad[i] for i in range(1, 8, 2)]
min_x = min(x_coords)
min_y = min(y_coords)
max_x = max(x_coords)
max_y = max(y_coords)
width = max_x - min_x
height = max_y - min_y
if width > 0 and height > 0:
return DOMRect(x=min_x, y=min_y, width=width, height=height)
return None
async def highlight_interaction_element(self, node: 'EnhancedDOMTreeNode') -> None:
"""Temporarily highlight an element during interaction for user visibility.
This creates a visual highlight on the browser that shows the user which element
is being interacted with. The highlight automatically fades after the configured duration.
Args:
node: The DOM node to highlight with backend_node_id for coordinate lookup
"""
if not self.browser_profile.highlight_elements:
return
try:
import json
cdp_session = await self.get_or_create_cdp_session()
# Get current coordinates
rect = await self.get_element_coordinates(node.backend_node_id, cdp_session)
color = self.browser_profile.interaction_highlight_color
duration_ms = int(self.browser_profile.interaction_highlight_duration * 1000)
if not rect:
self.logger.debug(f'No coordinates found for backend node {node.backend_node_id}')
return
# Create animated corner brackets that start offset and animate inward
script = f"""
(function() {{
const rect = {json.dumps({'x': rect.x, 'y': rect.y, 'width': rect.width, 'height': rect.height})};
const color = {json.dumps(color)};
const duration = {duration_ms};
// Scale corner size based on element dimensions to ensure gaps between corners
const maxCornerSize = 20;
const minCornerSize = 8;
const cornerSize = Math.max(
minCornerSize,
Math.min(maxCornerSize, Math.min(rect.width, rect.height) * 0.35)
);
const borderWidth = 3;
const startOffset = 10; // Starting offset in pixels
const finalOffset = -3; // Final position slightly outside the element
// Get current scroll position
const scrollX = window.pageXOffset || document.documentElement.scrollLeft || 0;
const scrollY = window.pageYOffset || document.documentElement.scrollTop || 0;
// Create container for all corners
const container = document.createElement('div');
container.setAttribute('data-browser-use-interaction-highlight', 'true');
container.style.cssText = `
position: absolute;
left: ${{rect.x + scrollX}}px;
top: ${{rect.y + scrollY}}px;
width: ${{rect.width}}px;
height: ${{rect.height}}px;
pointer-events: none;
z-index: 2147483647;
`;
// Create 4 corner brackets
const corners = [
{{ pos: 'top-left', startX: -startOffset, startY: -startOffset, finalX: finalOffset, finalY: finalOffset }},
{{ pos: 'top-right', startX: startOffset, startY: -startOffset, finalX: -finalOffset, finalY: finalOffset }},
{{ pos: 'bottom-left', startX: -startOffset, startY: startOffset, finalX: finalOffset, finalY: -finalOffset }},
{{ pos: 'bottom-right', startX: startOffset, startY: startOffset, finalX: -finalOffset, finalY: -finalOffset }}
];
corners.forEach(corner => {{
const bracket = document.createElement('div');
bracket.style.cssText = `
position: absolute;
width: ${{cornerSize}}px;
height: ${{cornerSize}}px;
pointer-events: none;
transition: all 0.15s ease-out;
`;
// Position corners
if (corner.pos === 'top-left') {{
bracket.style.top = '0';
bracket.style.left = '0';
bracket.style.borderTop = `${{borderWidth}}px solid ${{color}}`;
bracket.style.borderLeft = `${{borderWidth}}px solid ${{color}}`;
bracket.style.transform = `translate(${{corner.startX}}px, ${{corner.startY}}px)`;
}} else if (corner.pos === 'top-right') {{
bracket.style.top = '0';
bracket.style.right = '0';
bracket.style.borderTop = `${{borderWidth}}px solid ${{color}}`;
bracket.style.borderRight = `${{borderWidth}}px solid ${{color}}`;
bracket.style.transform = `translate(${{corner.startX}}px, ${{corner.startY}}px)`;
}} else if (corner.pos === 'bottom-left') {{
bracket.style.bottom = '0';
bracket.style.left = '0';
bracket.style.borderBottom = `${{borderWidth}}px solid ${{color}}`;
bracket.style.borderLeft = `${{borderWidth}}px solid ${{color}}`;
bracket.style.transform = `translate(${{corner.startX}}px, ${{corner.startY}}px)`;
}} else if (corner.pos === 'bottom-right') {{
bracket.style.bottom = '0';
bracket.style.right = '0';
bracket.style.borderBottom = `${{borderWidth}}px solid ${{color}}`;
bracket.style.borderRight = `${{borderWidth}}px solid ${{color}}`;
bracket.style.transform = `translate(${{corner.startX}}px, ${{corner.startY}}px)`;
}}
container.appendChild(bracket);
// Animate to final position slightly outside the element
setTimeout(() => {{
bracket.style.transform = `translate(${{corner.finalX}}px, ${{corner.finalY}}px)`;
}}, 10);
}});
document.body.appendChild(container);
// Auto-remove after duration
setTimeout(() => {{
container.style.opacity = '0';
container.style.transition = 'opacity 0.3s ease-out';
setTimeout(() => container.remove(), 300);
}}, duration);
return {{ created: true }};
}})();
"""
# Fire and forget - don't wait for completion
await cdp_session.cdp_client.send.Runtime.evaluate(
params={'expression': script, 'returnByValue': True}, session_id=cdp_session.session_id
)
except Exception as e:
# Don't fail the action if highlighting fails
self.logger.debug(f'Failed to highlight interaction element: {e}')
async def highlight_coordinate_click(self, x: int, y: int) -> None:
"""Temporarily highlight a coordinate click position for user visibility.
This creates a visual highlight at the specified coordinates showing where
the click action occurred. The highlight automatically fades after the configured duration.
Args:
x: Horizontal coordinate relative to viewport left edge
y: Vertical coordinate relative to viewport top edge
"""
if not self.browser_profile.highlight_elements:
return
try:
import json
cdp_session = await self.get_or_create_cdp_session()
color = self.browser_profile.interaction_highlight_color
duration_ms = int(self.browser_profile.interaction_highlight_duration * 1000)
# Create animated crosshair and circle at the click coordinates
script = f"""
(function() {{
const x = {x};
const y = {y};
const color = {json.dumps(color)};
const duration = {duration_ms};
// Get current scroll position
const scrollX = window.pageXOffset || document.documentElement.scrollLeft || 0;
const scrollY = window.pageYOffset || document.documentElement.scrollTop || 0;
// Create container
const container = document.createElement('div');
container.setAttribute('data-browser-use-coordinate-highlight', 'true');
container.style.cssText = `
position: absolute;
left: ${{x + scrollX}}px;
top: ${{y + scrollY}}px;
width: 0;
height: 0;
pointer-events: none;
z-index: 2147483647;
`;
// Create outer circle
const outerCircle = document.createElement('div');
outerCircle.style.cssText = `
position: absolute;
left: -15px;
top: -15px;
width: 30px;
height: 30px;
border: 3px solid ${{color}};
border-radius: 50%;
opacity: 0;
transform: scale(0.3);
transition: all 0.2s ease-out;
`;
container.appendChild(outerCircle);
// Create center dot
const centerDot = document.createElement('div');
centerDot.style.cssText = `
position: absolute;
left: -4px;
top: -4px;
width: 8px;
height: 8px;
background: ${{color}};
border-radius: 50%;
opacity: 0;
transform: scale(0);
transition: all 0.15s ease-out;
`;
container.appendChild(centerDot);
document.body.appendChild(container);
// Animate in
setTimeout(() => {{
outerCircle.style.opacity = '0.8';
outerCircle.style.transform = 'scale(1)';
centerDot.style.opacity = '1';
centerDot.style.transform = 'scale(1)';
}}, 10);
// Animate out and remove
setTimeout(() => {{
outerCircle.style.opacity = '0';
outerCircle.style.transform = 'scale(1.5)';
centerDot.style.opacity = '0';
setTimeout(() => container.remove(), 300);
}}, duration);
return {{ created: true }};
}})();
"""
# Fire and forget - don't wait for completion
await cdp_session.cdp_client.send.Runtime.evaluate(
params={'expression': script, 'returnByValue': True}, session_id=cdp_session.session_id
)
except Exception as e:
# Don't fail the action if highlighting fails
self.logger.debug(f'Failed to highlight coordinate click: {e}')
async def add_highlights(self, selector_map: dict[int, 'EnhancedDOMTreeNode']) -> None:
"""Add visual highlights to the browser DOM for user visibility."""
if not self.browser_profile.dom_highlight_elements or not selector_map:
return
try:
import json
# Convert selector_map to the format expected by the highlighting script
elements_data = []
for _, node in selector_map.items():
# Get bounding box using absolute position (includes iframe translations) if available
if node.absolute_position:
# Use absolute position which includes iframe coordinate translations
rect = node.absolute_position
bbox = {'x': rect.x, 'y': rect.y, 'width': rect.width, 'height': rect.height}
# Only include elements with valid bounding boxes
if bbox and bbox.get('width', 0) > 0 and bbox.get('height', 0) > 0:
element = {
'x': bbox['x'],
'y': bbox['y'],
'width': bbox['width'],
'height': bbox['height'],
'element_name': node.node_name,
'is_clickable': node.snapshot_node.is_clickable if node.snapshot_node else True,
'is_scrollable': getattr(node, 'is_scrollable', False),
'attributes': node.attributes or {},
'frame_id': getattr(node, 'frame_id', None),
'node_id': node.node_id,
'backend_node_id': node.backend_node_id,
'xpath': node.xpath,
'text_content': node.get_all_children_text()[:50]
if hasattr(node, 'get_all_children_text')
else node.node_value[:50],
}
elements_data.append(element)
if not elements_data:
self.logger.debug('⚠️ No valid elements to highlight')
return
self.logger.debug(f'📍 Creating highlights for {len(elements_data)} elements')
# Always remove existing highlights first
await self.remove_highlights()
# Add a small delay to ensure removal completes
import asyncio
await asyncio.sleep(0.05)
# Get CDP session
cdp_session = await self.get_or_create_cdp_session()
# Create the proven highlighting script from v0.6.0 with fixed positioning
script = f"""
(function() {{
// Interactive elements data
const interactiveElements = {json.dumps(elements_data)};
console.log('=== BROWSER-USE HIGHLIGHTING ===');
console.log('Highlighting', interactiveElements.length, 'interactive elements');
// Double-check: Remove any existing highlight container first
const existingContainer = document.getElementById('browser-use-debug-highlights');
if (existingContainer) {{
console.log('⚠️ Found existing highlight container, removing it first');
existingContainer.remove();
}}
// Also remove any stray highlight elements
const strayHighlights = document.querySelectorAll('[data-browser-use-highlight]');
if (strayHighlights.length > 0) {{
console.log('⚠️ Found', strayHighlights.length, 'stray highlight elements, removing them');
strayHighlights.forEach(el => el.remove());
}}
// Use maximum z-index for visibility
const HIGHLIGHT_Z_INDEX = 2147483647;
// Create container for all highlights - use FIXED positioning (key insight from v0.6.0)
const container = document.createElement('div');
container.id = 'browser-use-debug-highlights';
container.setAttribute('data-browser-use-highlight', 'container');
container.style.cssText = `
position: absolute;
top: 0;
left: 0;
width: 100vw;
height: 100vh;
pointer-events: none;
z-index: ${{HIGHLIGHT_Z_INDEX}};
overflow: visible;
margin: 0;
padding: 0;
border: none;
outline: none;
box-shadow: none;
background: none;
font-family: inherit;
`;
// Helper function to create text elements safely
function createTextElement(tag, text, styles) {{
const element = document.createElement(tag);
element.textContent = text;
if (styles) element.style.cssText = styles;
return element;
}}
// Add highlights for each element
interactiveElements.forEach((element, index) => {{
const highlight = document.createElement('div');
highlight.setAttribute('data-browser-use-highlight', 'element');
highlight.setAttribute('data-element-id', element.backend_node_id);
highlight.style.cssText = `
position: absolute;
left: ${{element.x}}px;
top: ${{element.y}}px;
width: ${{element.width}}px;
height: ${{element.height}}px;
outline: 2px dashed #4a90e2;
outline-offset: -2px;
background: transparent;
pointer-events: none;
box-sizing: content-box;
transition: outline 0.2s ease;
margin: 0;
padding: 0;
border: none;
`;
// Enhanced label with backend node ID
const label = createTextElement('div', element.backend_node_id, `
position: absolute;
top: -20px;
left: 0;
background-color: #4a90e2;
color: white;
padding: 2px 6px;
font-size: 11px;
font-family: 'Monaco', 'Menlo', 'Ubuntu Mono', monospace;
font-weight: bold;
border-radius: 3px;
white-space: nowrap;
z-index: ${{HIGHLIGHT_Z_INDEX + 1}};
box-shadow: 0 2px 4px rgba(0,0,0,0.3);
border: none;
outline: none;
margin: 0;
line-height: 1.2;
`);
highlight.appendChild(label);
container.appendChild(highlight);
}});
// Add container to document
document.body.appendChild(container);
console.log('Highlighting complete - added', interactiveElements.length, 'highlights');
return {{ added: interactiveElements.length }};
}})();
"""
# Execute the script
result = await cdp_session.cdp_client.send.Runtime.evaluate(
params={'expression': script, 'returnByValue': True}, session_id=cdp_session.session_id
)
# Log the result
if result and 'result' in result and 'value' in result['result']:
added_count = result['result']['value'].get('added', 0)
self.logger.debug(f'Successfully added {added_count} highlight elements to browser DOM')
else:
self.logger.debug('Browser highlight injection completed')
except Exception as e:
self.logger.warning(f'Failed to add browser highlights: {e}')
import traceback
self.logger.debug(f'Browser highlight traceback: {traceback.format_exc()}')
async def _close_extension_options_pages(self) -> None:
"""Close any extension options/welcome pages that have opened."""
try:
# Get all page targets from SessionManager
page_targets = self.session_manager.get_all_page_targets()
for target in page_targets:
target_url = target.url
target_id = target.target_id
# Check if this is an extension options/welcome page
if 'chrome-extension://' in target_url and (
'options.html' in target_url or 'welcome.html' in target_url or 'onboarding.html' in target_url
):
self.logger.info(f'[BrowserSession] 🚫 Closing extension options page: {target_url}')
try:
await self._cdp_close_page(target_id)
except Exception as e:
self.logger.debug(f'[BrowserSession] Could not close extension page {target_id}: {e}')
except Exception as e:
self.logger.debug(f'[BrowserSession] Error closing extension options pages: {e}')
async def send_demo_mode_log(self, message: str, level: str = 'info', metadata: dict[str, Any] | None = None) -> None:
"""Send a message to the in-browser demo panel if enabled."""
if not self.browser_profile.demo_mode:
return
demo = self.demo_mode
if not demo:
return
try:
await demo.send_log(message=message, level=level, metadata=metadata or {})
except Exception as exc:
self.logger.debug(f'[DemoMode] Failed to send log: {exc}')
@property
def downloaded_files(self) -> list[str]:
"""Get list of files downloaded during this browser session.
Returns:
list[str]: List of absolute file paths to downloaded files in this session
"""
return self._downloaded_files.copy()
# endregion - ========== Helper Methods ==========
# region - ========== CDP-based replacements for browser_context operations ==========
async def _cdp_get_all_pages(
self,
include_http: bool = True,
include_about: bool = True,
include_pages: bool = True,
include_iframes: bool = False,
include_workers: bool = False,
include_chrome: bool = False,
include_chrome_extensions: bool = False,
include_chrome_error: bool = False,
) -> list[TargetInfo]:
"""Get all browser pages/tabs using SessionManager (source of truth)."""
# Safety check - return empty list if browser not connected yet
if not self.session_manager:
return []
# Build TargetInfo dicts from SessionManager owned data (crystal clear ownership)
result = []
for target_id, target in self.session_manager.get_all_targets().items():
# Create TargetInfo dict
target_info: TargetInfo = {
'targetId': target.target_id,
'type': target.target_type,
'title': target.title,
'url': target.url,
'attached': True,
'canAccessOpener': False,
}
# Apply filters
if self._is_valid_target(
target_info,
include_http=include_http,
include_about=include_about,
include_pages=include_pages,
include_iframes=include_iframes,
include_workers=include_workers,
include_chrome=include_chrome,
include_chrome_extensions=include_chrome_extensions,
include_chrome_error=include_chrome_error,
):
result.append(target_info)
return result
async def _cdp_create_new_page(self, url: str = 'about:blank', background: bool = False, new_window: bool = False) -> str:
"""Create a new page/tab using CDP Target.createTarget. Returns target ID."""
# Use the root CDP client to create tabs at the browser level
if self._cdp_client_root:
result = await self._cdp_client_root.send.Target.createTarget(
params={'url': url, 'newWindow': new_window, 'background': background}
)
else:
# Fallback to using cdp_client if root is not available
result = await self.cdp_client.send.Target.createTarget(
params={'url': url, 'newWindow': new_window, 'background': background}
)
return result['targetId']
async def _cdp_close_page(self, target_id: TargetID) -> None:
"""Close a page/tab using CDP Target.closeTarget."""
await self.cdp_client.send.Target.closeTarget(params={'targetId': target_id})
async def _cdp_get_cookies(self) -> list[Cookie]:
"""Get cookies using CDP Network.getCookies."""
cdp_session = await self.get_or_create_cdp_session(target_id=None)
result = await asyncio.wait_for(
cdp_session.cdp_client.send.Storage.getCookies(session_id=cdp_session.session_id), timeout=8.0
)
return result.get('cookies', [])
async def _cdp_set_cookies(self, cookies: list[Cookie]) -> None:
"""Set cookies using CDP Storage.setCookies."""
if not self.agent_focus_target_id or not cookies:
return
cdp_session = await self.get_or_create_cdp_session(target_id=None)
# Storage.setCookies expects params dict with 'cookies' key
await cdp_session.cdp_client.send.Storage.setCookies(
params={'cookies': cookies}, # type: ignore[arg-type]
session_id=cdp_session.session_id,
)
async def _cdp_clear_cookies(self) -> None:
"""Clear all cookies using CDP Network.clearBrowserCookies."""
cdp_session = await self.get_or_create_cdp_session()
await cdp_session.cdp_client.send.Storage.clearCookies(session_id=cdp_session.session_id)
async def _cdp_grant_permissions(self, permissions: list[str], origin: str | None = None) -> None:
"""Grant permissions using CDP Browser.grantPermissions."""
params = {'permissions': permissions}
# if origin:
# params['origin'] = origin
cdp_session = await self.get_or_create_cdp_session()
# await cdp_session.cdp_client.send.Browser.grantPermissions(params=params, session_id=cdp_session.session_id)
raise NotImplementedError('Not implemented yet')
async def _cdp_set_geolocation(self, latitude: float, longitude: float, accuracy: float = 100) -> None:
"""Set geolocation using CDP Emulation.setGeolocationOverride."""
await self.cdp_client.send.Emulation.setGeolocationOverride(
params={'latitude': latitude, 'longitude': longitude, 'accuracy': accuracy}
)
async def _cdp_clear_geolocation(self) -> None:
"""Clear geolocation override using CDP."""
await self.cdp_client.send.Emulation.clearGeolocationOverride()
async def _cdp_add_init_script(self, script: str) -> str:
"""Add script to evaluate on new document using CDP Page.addScriptToEvaluateOnNewDocument."""
assert self._cdp_client_root is not None
cdp_session = await self.get_or_create_cdp_session()
result = await cdp_session.cdp_client.send.Page.addScriptToEvaluateOnNewDocument(
params={'source': script, 'runImmediately': True}, session_id=cdp_session.session_id
)
return result['identifier']
async def _cdp_remove_init_script(self, identifier: str) -> None:
"""Remove script added with addScriptToEvaluateOnNewDocument."""
cdp_session = await self.get_or_create_cdp_session(target_id=None)
await cdp_session.cdp_client.send.Page.removeScriptToEvaluateOnNewDocument(
params={'identifier': identifier}, session_id=cdp_session.session_id
)
async def _cdp_set_viewport(
self, width: int, height: int, device_scale_factor: float = 1.0, mobile: bool = False, target_id: str | None = None
) -> None:
"""Set viewport using CDP Emulation.setDeviceMetricsOverride.
Args:
width: Viewport width
height: Viewport height
device_scale_factor: Device scale factor (default 1.0)
mobile: Whether to emulate mobile device (default False)
target_id: Optional target ID to set viewport for. If not provided, uses agent_focus.
"""
if target_id:
# Set viewport for specific target
cdp_session = await self.get_or_create_cdp_session(target_id, focus=False)
elif self.agent_focus_target_id:
# Use current focus - use safe API with focus=False to avoid changing focus
try:
cdp_session = await self.get_or_create_cdp_session(self.agent_focus_target_id, focus=False)
except ValueError:
self.logger.warning('Cannot set viewport: focused target has no sessions')
return
else:
self.logger.warning('Cannot set viewport: no target_id provided and agent_focus not initialized')
return
await cdp_session.cdp_client.send.Emulation.setDeviceMetricsOverride(
params={'width': width, 'height': height, 'deviceScaleFactor': device_scale_factor, 'mobile': mobile},
session_id=cdp_session.session_id,
)
async def _cdp_get_origins(self) -> list[dict[str, Any]]:
"""Get origins with localStorage and sessionStorage using CDP."""
origins = []
cdp_session = await self.get_or_create_cdp_session(target_id=None)
try:
# Enable DOMStorage domain to track storage
await cdp_session.cdp_client.send.DOMStorage.enable(session_id=cdp_session.session_id)
try:
# Get all frames to find unique origins
frames_result = await cdp_session.cdp_client.send.Page.getFrameTree(session_id=cdp_session.session_id)
# Extract unique origins from frames
unique_origins = set()
def _extract_origins(frame_tree):
"""Recursively extract origins from frame tree."""
frame = frame_tree.get('frame', {})
origin = frame.get('securityOrigin')
if origin and origin != 'null':
unique_origins.add(origin)
# Process child frames
for child in frame_tree.get('childFrames', []):
_extract_origins(child)
async def _get_storage_items(origin: str, is_local_storage: bool) -> list[dict[str, str]] | None:
"""Helper to get storage items for an origin."""
storage_type = 'localStorage' if is_local_storage else 'sessionStorage'
try:
result = await cdp_session.cdp_client.send.DOMStorage.getDOMStorageItems(
params={'storageId': {'securityOrigin': origin, 'isLocalStorage': is_local_storage}},
session_id=cdp_session.session_id,
)
items = []
for item in result.get('entries', []):
if len(item) == 2: # Each item is [key, value]
items.append({'name': item[0], 'value': item[1]})
return items if items else None
except Exception as e:
self.logger.debug(f'Failed to get {storage_type} for {origin}: {e}')
return None
_extract_origins(frames_result.get('frameTree', {}))
# For each unique origin, get localStorage and sessionStorage
for origin in unique_origins:
origin_data = {'origin': origin}
# Get localStorage
local_storage = await _get_storage_items(origin, is_local_storage=True)
if local_storage:
origin_data['localStorage'] = local_storage
# Get sessionStorage
session_storage = await _get_storage_items(origin, is_local_storage=False)
if session_storage:
origin_data['sessionStorage'] = session_storage
# Only add origin if it has storage data
if 'localStorage' in origin_data or 'sessionStorage' in origin_data:
origins.append(origin_data)
finally:
# Always disable DOMStorage tracking when done
await cdp_session.cdp_client.send.DOMStorage.disable(session_id=cdp_session.session_id)
except Exception as e:
self.logger.warning(f'Failed to get origins: {e}')
return origins
async def _cdp_get_storage_state(self) -> dict:
"""Get storage state (cookies, localStorage, sessionStorage) using CDP."""
# Use the _cdp_get_cookies helper which handles session attachment
cookies = await self._cdp_get_cookies()
# Get origins with localStorage/sessionStorage
origins = await self._cdp_get_origins()
return {
'cookies': cookies,
'origins': origins,
}
async def _cdp_navigate(self, url: str, target_id: TargetID | None = None) -> None:
"""Navigate to URL using CDP Page.navigate."""
# Use provided target_id or fall back to agent_focus_target_id
assert self._cdp_client_root is not None, 'CDP client not initialized - browser may not be connected yet'
assert self.agent_focus_target_id is not None, 'Agent focus not initialized - browser may not be connected yet'
target_id_to_use = target_id or self.agent_focus_target_id
cdp_session = await self.get_or_create_cdp_session(target_id_to_use, focus=True)
# Use helper to navigate on the target
await cdp_session.cdp_client.send.Page.navigate(params={'url': url}, session_id=cdp_session.session_id)
@staticmethod
def _is_valid_target(
target_info: TargetInfo,
include_http: bool = True,
include_chrome: bool = False,
include_chrome_extensions: bool = False,
include_chrome_error: bool = False,
include_about: bool = True,
include_iframes: bool = True,
include_pages: bool = True,
include_workers: bool = False,
) -> bool:
"""Check if a target should be processed.
Args:
target_info: Target info dict from CDP
Returns:
True if target should be processed, False if it should be skipped
"""
target_type = target_info.get('type', '')
url = target_info.get('url', '')
url_allowed, type_allowed = False, False
# Always allow new tab pages (chrome://new-tab-page/, chrome://newtab/, about:blank)
# so they can be redirected to about:blank in connect()
from browser_use.utils import is_new_tab_page
if is_new_tab_page(url):
url_allowed = True
if url.startswith('chrome-error://') and include_chrome_error:
url_allowed = True
if url.startswith('chrome://') and include_chrome:
url_allowed = True
if url.startswith('chrome-extension://') and include_chrome_extensions:
url_allowed = True
# dont allow about:srcdoc! there are also other rare about: pages that we want to avoid
if url == 'about:blank' and include_about:
url_allowed = True
if (url.startswith('http://') or url.startswith('https://')) and include_http:
url_allowed = True
if target_type in ('service_worker', 'shared_worker', 'worker') and include_workers:
type_allowed = True
if target_type in ('page', 'tab') and include_pages:
type_allowed = True
if target_type in ('iframe', 'webview') and include_iframes:
type_allowed = True
return url_allowed and type_allowed
async def get_all_frames(self) -> tuple[dict[str, dict], dict[str, str]]:
"""Get a complete frame hierarchy from all browser targets.
Returns:
Tuple of (all_frames, target_sessions) where:
- all_frames: dict mapping frame_id -> frame info dict with all metadata
- target_sessions: dict mapping target_id -> session_id for active sessions
"""
all_frames = {} # frame_id -> FrameInfo dict
target_sessions = {} # target_id -> session_id (keep sessions alive during collection)
# Check if cross-origin iframe support is enabled
include_cross_origin = self.browser_profile.cross_origin_iframes
# Get all targets - only include iframes if cross-origin support is enabled
targets = await self._cdp_get_all_pages(
include_http=True,
include_about=True,
include_pages=True,
include_iframes=include_cross_origin, # Only include iframe targets if flag is set
include_workers=False,
include_chrome=False,
include_chrome_extensions=False,
include_chrome_error=include_cross_origin, # Only include error pages if cross-origin is enabled
)
all_targets = targets
# First pass: collect frame trees from ALL targets
for target in all_targets:
target_id = target['targetId']
# Skip iframe targets if cross-origin support is disabled
if not include_cross_origin and target.get('type') == 'iframe':
continue
# When cross-origin support is disabled, only process the current target
if not include_cross_origin:
# Only process the current focus target
if self.agent_focus_target_id and target_id != self.agent_focus_target_id:
continue
# Use the existing agent_focus target's session - use safe API with focus=False
try:
cdp_session = await self.get_or_create_cdp_session(self.agent_focus_target_id, focus=False)
except ValueError:
continue # Skip if no session available
else:
# Get cached session for this target (don't change focus - iterating frames)
cdp_session = await self.get_or_create_cdp_session(target_id, focus=False)
if cdp_session:
target_sessions[target_id] = cdp_session.session_id
try:
# Try to get frame tree (not all target types support this)
frame_tree_result = await cdp_session.cdp_client.send.Page.getFrameTree(session_id=cdp_session.session_id)
# Process the frame tree recursively
def process_frame_tree(node, parent_frame_id=None):
"""Recursively process frame tree and add to all_frames."""
frame = node.get('frame', {})
current_frame_id = frame.get('id')
if current_frame_id:
# For iframe targets, check if the frame has a parentId field
# This indicates it's an OOPIF with a parent in another target
actual_parent_id = frame.get('parentId') or parent_frame_id
# Create frame info with all CDP response data plus our additions
frame_info = {
**frame, # Include all original frame data: id, url, parentId, etc.
'frameTargetId': target_id, # Target that can access this frame
'parentFrameId': actual_parent_id, # Use parentId from frame if available
'childFrameIds': [], # Will be populated below
'isCrossOrigin': False, # Will be determined based on context
'isValidTarget': self._is_valid_target(
target,
include_http=True,
include_about=True,
include_pages=True,
include_iframes=True,
include_workers=False,
include_chrome=False, # chrome://newtab, chrome://settings, etc. are not valid frames we can control (for sanity reasons)
include_chrome_extensions=False, # chrome-extension://
include_chrome_error=False, # chrome-error:// (e.g. when iframes fail to load or are blocked by uBlock Origin)
),
}
# Check if frame is cross-origin based on crossOriginIsolatedContextType
cross_origin_type = frame.get('crossOriginIsolatedContextType')
if cross_origin_type and cross_origin_type != 'NotIsolated':
frame_info['isCrossOrigin'] = True
# For iframe targets, the frame itself is likely cross-origin
if target.get('type') == 'iframe':
frame_info['isCrossOrigin'] = True
# Skip cross-origin frames if support is disabled
if not include_cross_origin and frame_info.get('isCrossOrigin'):
return # Skip this frame and its children
# Add child frame IDs (note: OOPIFs won't appear here)
child_frames = node.get('childFrames', [])
for child in child_frames:
child_frame = child.get('frame', {})
child_frame_id = child_frame.get('id')
if child_frame_id:
frame_info['childFrameIds'].append(child_frame_id)
# Store or merge frame info
if current_frame_id in all_frames:
# Frame already seen from another target, merge info
existing = all_frames[current_frame_id]
# If this is an iframe target, it has direct access to the frame
if target.get('type') == 'iframe':
existing['frameTargetId'] = target_id
existing['isCrossOrigin'] = True
else:
all_frames[current_frame_id] = frame_info
# Process child frames recursively (only if we're not skipping this frame)
if include_cross_origin or not frame_info.get('isCrossOrigin'):
for child in child_frames:
process_frame_tree(child, current_frame_id)
# Process the entire frame tree
process_frame_tree(frame_tree_result.get('frameTree', {}))
except Exception as e:
# Target doesn't support Page domain or has no frames
self.logger.debug(f'Failed to get frame tree for target {target_id}: {e}')
# Second pass: populate backend node IDs and parent target IDs
# Only do this if cross-origin support is enabled
if include_cross_origin:
await self._populate_frame_metadata(all_frames, target_sessions)
return all_frames, target_sessions
async def _populate_frame_metadata(self, all_frames: dict[str, dict], target_sessions: dict[str, str]) -> None:
"""Populate additional frame metadata like backend node IDs and parent target IDs.
Args:
all_frames: Frame hierarchy dict to populate
target_sessions: Active target sessions
"""
for frame_id_iter, frame_info in all_frames.items():
parent_frame_id = frame_info.get('parentFrameId')
if parent_frame_id and parent_frame_id in all_frames:
parent_frame_info = all_frames[parent_frame_id]
parent_target_id = parent_frame_info.get('frameTargetId')
# Store parent target ID
frame_info['parentTargetId'] = parent_target_id
# Try to get backend node ID from parent context
if parent_target_id in target_sessions:
assert parent_target_id is not None
parent_session_id = target_sessions[parent_target_id]
try:
# Enable DOM domain
await self.cdp_client.send.DOM.enable(session_id=parent_session_id)
# Get frame owner info to find backend node ID
frame_owner = await self.cdp_client.send.DOM.getFrameOwner(
params={'frameId': frame_id_iter}, session_id=parent_session_id
)
if frame_owner:
frame_info['backendNodeId'] = frame_owner.get('backendNodeId')
frame_info['nodeId'] = frame_owner.get('nodeId')
except Exception:
# Frame owner not available (likely cross-origin)
pass
async def find_frame_target(self, frame_id: str, all_frames: dict[str, dict] | None = None) -> dict | None:
"""Find the frame info for a specific frame ID.
Args:
frame_id: The frame ID to search for
all_frames: Optional pre-built frame hierarchy. If None, will call get_all_frames()
Returns:
Frame info dict if found, None otherwise
"""
if all_frames is None:
all_frames, _ = await self.get_all_frames()
return all_frames.get(frame_id)
async def cdp_client_for_target(self, target_id: TargetID) -> CDPSession:
return await self.get_or_create_cdp_session(target_id, focus=False)
async def cdp_client_for_frame(self, frame_id: str) -> CDPSession:
"""Get a CDP client attached to the target containing the specified frame.
Builds a unified frame hierarchy from all targets to find the correct target
for any frame, including OOPIFs (Out-of-Process iframes).
Args:
frame_id: The frame ID to search for
Returns:
Tuple of (cdp_cdp_session, target_id) for the target containing the frame
Raises:
ValueError: If the frame is not found in any target
"""
# If cross-origin iframes are disabled, just use the main session
if not self.browser_profile.cross_origin_iframes:
return await self.get_or_create_cdp_session()
# Get complete frame hierarchy
all_frames, target_sessions = await self.get_all_frames()
# Find the requested frame
frame_info = await self.find_frame_target(frame_id, all_frames)
if frame_info:
target_id = frame_info.get('frameTargetId')
if target_id in target_sessions:
assert target_id is not None
# Use existing session
session_id = target_sessions[target_id]
# Return the client with session attached (don't change focus)
return await self.get_or_create_cdp_session(target_id, focus=False)
# Frame not found
raise ValueError(f"Frame with ID '{frame_id}' not found in any target")
async def cdp_client_for_node(self, node: EnhancedDOMTreeNode) -> CDPSession:
"""Get CDP client for a specific DOM node based on its frame.
IMPORTANT: backend_node_id is only valid in the session where the DOM was captured.
We trust the node's session_id/frame_id/target_id instead of searching all sessions.
"""
# Strategy 1: If node has session_id, try to use that exact session (most specific)
if node.session_id and self.session_manager:
try:
# Find the CDP session by session_id from SessionManager
cdp_session = self.session_manager.get_session(node.session_id)
if cdp_session:
# Get target to log URL
target = self.session_manager.get_target(cdp_session.target_id)
self.logger.debug(f'✅ Using session from node.session_id for node {node.backend_node_id}: {target.url}')
return cdp_session
except Exception as e:
self.logger.debug(f'Failed to get session by session_id {node.session_id}: {e}')
# Strategy 2: If node has frame_id, use that frame's session
if node.frame_id:
try:
cdp_session = await self.cdp_client_for_frame(node.frame_id)
target = self.session_manager.get_target(cdp_session.target_id)
self.logger.debug(f'✅ Using session from node.frame_id for node {node.backend_node_id}: {target.url}')
return cdp_session
except Exception as e:
self.logger.debug(f'Failed to get session for frame {node.frame_id}: {e}')
# Strategy 3: If node has target_id, use that target's session
if node.target_id:
try:
cdp_session = await self.get_or_create_cdp_session(target_id=node.target_id, focus=False)
target = self.session_manager.get_target(cdp_session.target_id)
self.logger.debug(f'✅ Using session from node.target_id for node {node.backend_node_id}: {target.url}')
return cdp_session
except Exception as e:
self.logger.debug(f'Failed to get session for target {node.target_id}: {e}')
# Strategy 4: Fallback to agent_focus_target_id (the page where agent is currently working)
if self.agent_focus_target_id:
target = self.session_manager.get_target(self.agent_focus_target_id)
try:
# Use safe API with focus=False to avoid changing focus
cdp_session = await self.get_or_create_cdp_session(self.agent_focus_target_id, focus=False)
if target:
self.logger.warning(
f'⚠️ Node {node.backend_node_id} has no session/frame/target info. Using agent_focus session: {target.url}'
)
return cdp_session
except ValueError:
pass # Fall through to last resort
# Last resort: use main session
self.logger.error(f'❌ No session info for node {node.backend_node_id} and no agent_focus available. Using main session.')
return await self.get_or_create_cdp_session()
@observe_debug(ignore_input=True, ignore_output=True, name='take_screenshot')
async def take_screenshot(
self,
path: str | None = None,
full_page: bool = False,
format: str = 'png',
quality: int | None = None,
clip: dict | None = None,
) -> bytes:
"""Take a screenshot using CDP.
Args:
path: Optional file path to save screenshot
full_page: Capture entire scrollable page beyond viewport
format: Image format ('png', 'jpeg', 'webp')
quality: Quality 0-100 for JPEG format
clip: Region to capture {'x': int, 'y': int, 'width': int, 'height': int}
Returns:
Screenshot data as bytes
"""
import base64
from cdp_use.cdp.page import CaptureScreenshotParameters
cdp_session = await self.get_or_create_cdp_session()
# Build parameters dict explicitly to satisfy TypedDict expectations
params: CaptureScreenshotParameters = {
'format': format,
'captureBeyondViewport': full_page,
}
if quality is not None and format == 'jpeg':
params['quality'] = quality
if clip:
params['clip'] = {
'x': clip['x'],
'y': clip['y'],
'width': clip['width'],
'height': clip['height'],
'scale': 1,
}
params = CaptureScreenshotParameters(**params)
result = await cdp_session.cdp_client.send.Page.captureScreenshot(params=params, session_id=cdp_session.session_id)
if not result or 'data' not in result:
raise Exception('Screenshot failed - no data returned')
screenshot_data = base64.b64decode(result['data'])
if path:
Path(path).write_bytes(screenshot_data)
return screenshot_data
async def screenshot_element(
self,
selector: str,
path: str | None = None,
format: str = 'png',
quality: int | None = None,
) -> bytes:
"""Take a screenshot of a specific element.
Args:
selector: CSS selector for the element
path: Optional file path to save screenshot
format: Image format ('png', 'jpeg', 'webp')
quality: Quality 0-100 for JPEG format
Returns:
Screenshot data as bytes
"""
bounds = await self._get_element_bounds(selector)
if not bounds:
raise ValueError(f"Element '{selector}' not found or has no bounds")
return await self.take_screenshot(
path=path,
format=format,
quality=quality,
clip=bounds,
)
async def _get_element_bounds(self, selector: str) -> dict | None:
"""Get element bounding box using CDP."""
cdp_session = await self.get_or_create_cdp_session()
# Get document
doc = await cdp_session.cdp_client.send.DOM.getDocument(params={'depth': 1}, session_id=cdp_session.session_id)
# Query selector
node_result = await cdp_session.cdp_client.send.DOM.querySelector(
params={'nodeId': doc['root']['nodeId'], 'selector': selector}, session_id=cdp_session.session_id
)
node_id = node_result.get('nodeId')
if not node_id:
return None
# Get bounding box
box_result = await cdp_session.cdp_client.send.DOM.getBoxModel(
params={'nodeId': node_id}, session_id=cdp_session.session_id
)
box_model = box_result.get('model')
if not box_model:
return None
content = box_model['content']
return {
'x': min(content[0], content[2], content[4], content[6]),
'y': min(content[1], content[3], content[5], content[7]),
'width': max(content[0], content[2], content[4], content[6]) - min(content[0], content[2], content[4], content[6]),
'height': max(content[1], content[3], content[5], content[7]) - min(content[1], content[3], content[5], content[7]),
}
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/browser/session.py",
"license": "MIT License",
"lines": 3262,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/browser/video_recorder.py | """Video Recording Service for Browser Use Sessions."""
import base64
import io
import logging
import math
from pathlib import Path
from typing import Optional
from browser_use.browser.profile import ViewportSize
try:
import imageio.v2 as iio # type: ignore[import-not-found]
import numpy as np # type: ignore[import-not-found]
from imageio.core.format import Format # type: ignore[import-not-found]
from PIL import Image
IMAGEIO_AVAILABLE = True
except ImportError:
IMAGEIO_AVAILABLE = False
logger = logging.getLogger(__name__)
def _get_padded_size(size: ViewportSize, macro_block_size: int = 16) -> ViewportSize:
"""Calculates the dimensions padded to the nearest multiple of macro_block_size."""
width = int(math.ceil(size['width'] / macro_block_size)) * macro_block_size
height = int(math.ceil(size['height'] / macro_block_size)) * macro_block_size
return ViewportSize(width=width, height=height)
class VideoRecorderService:
"""
Handles the video encoding process for a browser session using imageio.
This service captures individual frames from the CDP screencast, decodes them,
and appends them to a video file using a pip-installable ffmpeg backend.
It automatically resizes frames to match the target video dimensions.
"""
def __init__(self, output_path: Path, size: ViewportSize, framerate: int):
"""
Initializes the video recorder.
Args:
output_path: The full path where the video will be saved.
size: A ViewportSize object specifying the width and height of the video.
framerate: The desired framerate for the output video.
"""
self.output_path = output_path
self.size = size
self.framerate = framerate
self._writer: Optional['Format.Writer'] = None
self._is_active = False
self.padded_size = _get_padded_size(self.size)
def start(self) -> None:
"""
Prepares and starts the video writer.
If the required optional dependencies are not installed, this method will
log an error and do nothing.
"""
if not IMAGEIO_AVAILABLE:
logger.error(
'MP4 recording requires optional dependencies. Please install them with: pip install "browser-use[video]"'
)
return
try:
self.output_path.parent.mkdir(parents=True, exist_ok=True)
# The macro_block_size is set to None because we handle padding ourselves
self._writer = iio.get_writer(
str(self.output_path),
fps=self.framerate,
codec='libx264',
quality=8, # A good balance of quality and file size (1-10 scale)
pixelformat='yuv420p', # Ensures compatibility with most players
macro_block_size=None,
)
self._is_active = True
logger.debug(f'Video recorder started. Output will be saved to {self.output_path}')
except Exception as e:
logger.error(f'Failed to initialize video writer: {e}')
self._is_active = False
def add_frame(self, frame_data_b64: str) -> None:
"""
Decodes a base64-encoded PNG frame, resizes it, pads it to be codec-compatible,
and appends it to the video.
Args:
frame_data_b64: A base64-encoded string of the PNG frame data.
"""
if not self._is_active or not self._writer:
return
try:
frame_bytes = base64.b64decode(frame_data_b64)
# Use PIL to handle image processing in memory - much faster than spawning ffmpeg subprocess per frame
with Image.open(io.BytesIO(frame_bytes)) as img:
# 1. Resize if needed to target viewport size
if img.size != (self.size['width'], self.size['height']):
# Use BICUBIC as it's faster than LANCZOS and good enough for screen recordings
img = img.resize((self.size['width'], self.size['height']), Image.Resampling.BICUBIC)
# 2. Handle Padding (Macro block alignment for codecs)
# Check if padding is actually needed
if self.padded_size['width'] != self.size['width'] or self.padded_size['height'] != self.size['height']:
new_img = Image.new('RGB', (self.padded_size['width'], self.padded_size['height']), (0, 0, 0))
# Center the image
x_offset = (self.padded_size['width'] - self.size['width']) // 2
y_offset = (self.padded_size['height'] - self.size['height']) // 2
new_img.paste(img, (x_offset, y_offset))
img = new_img
# 3. Convert to numpy array for imageio
img_array = np.array(img)
self._writer.append_data(img_array)
except Exception as e:
logger.warning(f'Could not process and add video frame: {e}')
def stop_and_save(self) -> None:
"""
Finalizes the video file by closing the writer.
This method should be called when the recording session is complete.
"""
if not self._is_active or not self._writer:
return
try:
self._writer.close()
logger.info(f'📹 Video recording saved successfully to: {self.output_path}')
except Exception as e:
logger.error(f'Failed to finalize and save video: {e}')
finally:
self._is_active = False
self._writer = None
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/browser/video_recorder.py",
"license": "MIT License",
"lines": 116,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/browser/watchdog_base.py | """Base watchdog class for browser monitoring components."""
import asyncio
import inspect
import time
from collections.abc import Iterable
from typing import Any, ClassVar
from bubus import BaseEvent, EventBus
from pydantic import BaseModel, ConfigDict, Field
from browser_use.browser.session import BrowserSession
class BaseWatchdog(BaseModel):
"""Base class for all browser watchdogs.
Watchdogs monitor browser state and emit events based on changes.
They automatically register event handlers based on method names.
Handler methods should be named: on_EventTypeName(self, event: EventTypeName)
"""
model_config = ConfigDict(
arbitrary_types_allowed=True, # allow non-serializable objects like EventBus/BrowserSession in fields
extra='forbid', # dont allow implicit class/instance state, everything must be a properly typed Field or PrivateAttr
validate_assignment=False, # avoid re-triggering __init__ / validators on values on every assignment
revalidate_instances='never', # avoid re-triggering __init__ / validators and erasing private attrs
)
# Class variables to statically define the list of events relevant to each watchdog
# (not enforced, just to make it easier to understand the code and debug watchdogs at runtime)
LISTENS_TO: ClassVar[list[type[BaseEvent[Any]]]] = [] # Events this watchdog listens to
EMITS: ClassVar[list[type[BaseEvent[Any]]]] = [] # Events this watchdog emits
# Core dependencies
event_bus: EventBus = Field()
browser_session: BrowserSession = Field()
# Shared state that other watchdogs might need to access should not be defined on BrowserSession, not here!
# Shared helper methods needed by other watchdogs should be defined on BrowserSession, not here!
# Alternatively, expose some events on the watchdog to allow access to state/helpers via event_bus system.
# Private state internal to the watchdog can be defined like this on BaseWatchdog subclasses:
# _screenshot_cache: dict[str, bytes] = PrivateAttr(default_factory=dict)
# _browser_crash_watcher_task: asyncio.Task | None = PrivateAttr(default=None)
# _cdp_download_tasks: WeakSet[asyncio.Task] = PrivateAttr(default_factory=WeakSet)
# ...
@property
def logger(self):
"""Get the logger from the browser session."""
return self.browser_session.logger
@staticmethod
def attach_handler_to_session(browser_session: 'BrowserSession', event_class: type[BaseEvent[Any]], handler) -> None:
"""Attach a single event handler to a browser session.
Args:
browser_session: The browser session to attach to
event_class: The event class to listen for
handler: The handler method (must start with 'on_' and end with event type)
"""
event_bus = browser_session.event_bus
# Validate handler naming convention
assert hasattr(handler, '__name__'), 'Handler must have a __name__ attribute'
assert handler.__name__.startswith('on_'), f'Handler {handler.__name__} must start with "on_"'
assert handler.__name__.endswith(event_class.__name__), (
f'Handler {handler.__name__} must end with event type {event_class.__name__}'
)
# Get the watchdog instance if this is a bound method
watchdog_instance = getattr(handler, '__self__', None)
watchdog_class_name = watchdog_instance.__class__.__name__ if watchdog_instance else 'Unknown'
# Events that should always run even when CDP is disconnected (lifecycle management)
LIFECYCLE_EVENT_NAMES = frozenset(
{
'BrowserStartEvent',
'BrowserStopEvent',
'BrowserStoppedEvent',
'BrowserLaunchEvent',
'BrowserErrorEvent',
'BrowserKillEvent',
'BrowserReconnectingEvent',
'BrowserReconnectedEvent',
}
)
# Create a wrapper function with unique name to avoid duplicate handler warnings
# Capture handler by value to avoid closure issues
def make_unique_handler(actual_handler):
async def unique_handler(event):
# Circuit breaker: skip handler if CDP WebSocket is dead
# (prevents handlers from hanging on broken connections until timeout)
# Lifecycle events are exempt — they manage browser start/stop
if event.event_type not in LIFECYCLE_EVENT_NAMES and not browser_session.is_cdp_connected:
# If reconnection is in progress, wait for it instead of silently skipping
if browser_session.is_reconnecting:
wait_timeout = browser_session.RECONNECT_WAIT_TIMEOUT
browser_session.logger.debug(
f'🚌 [{watchdog_class_name}.{actual_handler.__name__}] ⏳ Waiting for reconnection ({wait_timeout}s)...'
)
try:
await asyncio.wait_for(browser_session._reconnect_event.wait(), timeout=wait_timeout)
except TimeoutError:
raise ConnectionError(
f'[{watchdog_class_name}.{actual_handler.__name__}] '
f'Reconnection wait timed out after {wait_timeout}s'
)
# After wait: check if reconnection actually succeeded
if not browser_session.is_cdp_connected:
raise ConnectionError(
f'[{watchdog_class_name}.{actual_handler.__name__}] Reconnection failed — CDP still not connected'
)
# Reconnection succeeded — fall through to execute handler normally
else:
# Not reconnecting — intentional stop, backward compat silent skip
browser_session.logger.debug(
f'🚌 [{watchdog_class_name}.{actual_handler.__name__}] ⚡ Skipped — CDP not connected'
)
return None
# just for debug logging, not used for anything else
parent_event = event_bus.event_history.get(event.event_parent_id) if event.event_parent_id else None
grandparent_event = (
event_bus.event_history.get(parent_event.event_parent_id)
if parent_event and parent_event.event_parent_id
else None
)
parent = (
f'↲ triggered by on_{parent_event.event_type}#{parent_event.event_id[-4:]}'
if parent_event
else '👈 by Agent'
)
grandparent = (
(
f'↲ under {grandparent_event.event_type}#{grandparent_event.event_id[-4:]}'
if grandparent_event
else '👈 by Agent'
)
if parent_event
else ''
)
event_str = f'#{event.event_id[-4:]}'
time_start = time.time()
watchdog_and_handler_str = f'[{watchdog_class_name}.{actual_handler.__name__}({event_str})]'.ljust(54)
browser_session.logger.debug(f'🚌 {watchdog_and_handler_str} ⏳ Starting... {parent} {grandparent}')
try:
# **EXECUTE THE EVENT HANDLER FUNCTION**
result = await actual_handler(event)
if isinstance(result, Exception):
raise result
# just for debug logging, not used for anything else
time_end = time.time()
time_elapsed = time_end - time_start
result_summary = '' if result is None else f' ➡️ <{type(result).__name__}>'
parents_summary = f' {parent}'.replace('↲ triggered by ', '⤴ returned to ').replace(
'👈 by Agent', '👉 returned to Agent'
)
browser_session.logger.debug(
f'🚌 {watchdog_and_handler_str} Succeeded ({time_elapsed:.2f}s){result_summary}{parents_summary}'
)
return result
except Exception as e:
time_end = time.time()
time_elapsed = time_end - time_start
original_error = e
browser_session.logger.error(
f'🚌 {watchdog_and_handler_str} ❌ Failed ({time_elapsed:.2f}s): {type(e).__name__}: {e}'
)
# attempt to repair potentially crashed CDP session
try:
if browser_session.agent_focus_target_id:
# With event-driven sessions, Chrome will send detach/attach events
# SessionManager handles pool cleanup automatically
target_id_to_restore = browser_session.agent_focus_target_id
browser_session.logger.debug(
f'🚌 {watchdog_and_handler_str} ⚠️ Session error detected, waiting for CDP events to sync (target: {target_id_to_restore})'
)
# Wait for new attach event to restore the session
# This will raise ValueError if target doesn't re-attach
await browser_session.get_or_create_cdp_session(target_id=target_id_to_restore, focus=True)
else:
# Try to get any available session
await browser_session.get_or_create_cdp_session(target_id=None, focus=True)
except Exception as sub_error:
if 'ConnectionClosedError' in str(type(sub_error)) or 'ConnectionError' in str(type(sub_error)):
browser_session.logger.error(
f'🚌 {watchdog_and_handler_str} ❌ Browser closed or CDP Connection disconnected by remote. {type(sub_error).__name__}: {sub_error}\n'
)
raise
else:
browser_session.logger.error(
f'🚌 {watchdog_and_handler_str} ❌ CDP connected but failed to re-create CDP session after error "{type(original_error).__name__}: {original_error}" in {actual_handler.__name__}({event.event_type}#{event.event_id[-4:]}): due to {type(sub_error).__name__}: {sub_error}\n'
)
# Always re-raise the original error with its traceback preserved
raise
return unique_handler
unique_handler = make_unique_handler(handler)
unique_handler.__name__ = f'{watchdog_class_name}.{handler.__name__}'
# Check if this handler is already registered - throw error if duplicate
existing_handlers = event_bus.handlers.get(event_class.__name__, [])
handler_names = [getattr(h, '__name__', str(h)) for h in existing_handlers]
if unique_handler.__name__ in handler_names:
raise RuntimeError(
f'[{watchdog_class_name}] Duplicate handler registration attempted! '
f'Handler {unique_handler.__name__} is already registered for {event_class.__name__}. '
f'This likely means attach_to_session() was called multiple times.'
)
event_bus.on(event_class, unique_handler)
@staticmethod
def detach_handler_from_session(browser_session: 'BrowserSession', event_class: type[BaseEvent[Any]], handler) -> None:
"""Detach a single event handler from a browser session."""
event_bus = browser_session.event_bus
# Get the watchdog instance if this is a bound method
watchdog_instance = getattr(handler, '__self__', None)
watchdog_class_name = watchdog_instance.__class__.__name__ if watchdog_instance else 'Unknown'
# Find and remove the handler by its unique name pattern
unique_handler_name = f'{watchdog_class_name}.{handler.__name__}'
existing_handlers = event_bus.handlers.get(event_class.__name__, [])
for existing_handler in existing_handlers[:]: # copy list to allow modification during iteration
if getattr(existing_handler, '__name__', '') == unique_handler_name:
existing_handlers.remove(existing_handler)
break
def attach_to_session(self) -> None:
"""Attach watchdog to its browser session and start monitoring.
This method handles event listener registration. The watchdog is already
bound to a browser session via self.browser_session from initialization.
"""
# Register event handlers automatically based on method names
assert self.browser_session is not None, 'Root CDP client not initialized - browser may not be connected yet'
from browser_use.browser import events
event_classes = {}
for name in dir(events):
obj = getattr(events, name)
if inspect.isclass(obj) and issubclass(obj, BaseEvent) and obj is not BaseEvent:
event_classes[name] = obj
# Find all handler methods (on_EventName)
registered_events = set()
for method_name in dir(self):
if method_name.startswith('on_') and callable(getattr(self, method_name)):
# Extract event name from method name (on_EventName -> EventName)
event_name = method_name[3:] # Remove 'on_' prefix
if event_name in event_classes:
event_class = event_classes[event_name]
# ASSERTION: If LISTENS_TO is defined, enforce it
if self.LISTENS_TO:
assert event_class in self.LISTENS_TO, (
f'[{self.__class__.__name__}] Handler {method_name} listens to {event_name} '
f'but {event_name} is not declared in LISTENS_TO: {[e.__name__ for e in self.LISTENS_TO]}'
)
handler = getattr(self, method_name)
# Use the static helper to attach the handler
self.attach_handler_to_session(self.browser_session, event_class, handler)
registered_events.add(event_class)
# ASSERTION: If LISTENS_TO is defined, ensure all declared events have handlers
if self.LISTENS_TO:
missing_handlers = set(self.LISTENS_TO) - registered_events
if missing_handlers:
missing_names = [e.__name__ for e in missing_handlers]
self.logger.warning(
f'[{self.__class__.__name__}] LISTENS_TO declares {missing_names} '
f'but no handlers found (missing on_{"_, on_".join(missing_names)} methods)'
)
def __del__(self) -> None:
"""Clean up any running tasks during garbage collection."""
# A BIT OF MAGIC: Cancel any private attributes that look like asyncio tasks
try:
for attr_name in dir(self):
# e.g. _browser_crash_watcher_task = asyncio.Task
if attr_name.startswith('_') and attr_name.endswith('_task'):
try:
task = getattr(self, attr_name)
if hasattr(task, 'cancel') and callable(task.cancel) and not task.done():
task.cancel()
# self.logger.debug(f'[{self.__class__.__name__}] Cancelled {attr_name} during cleanup')
except Exception:
pass # Ignore errors during cleanup
# e.g. _cdp_download_tasks = WeakSet[asyncio.Task] or list[asyncio.Task]
if attr_name.startswith('_') and attr_name.endswith('_tasks') and isinstance(getattr(self, attr_name), Iterable):
for task in getattr(self, attr_name):
try:
if hasattr(task, 'cancel') and callable(task.cancel) and not task.done():
task.cancel()
# self.logger.debug(f'[{self.__class__.__name__}] Cancelled {attr_name} during cleanup')
except Exception:
pass # Ignore errors during cleanup
except Exception as e:
from browser_use.utils import logger
logger.error(f'⚠️ Error during BrowserSession {self.__class__.__name__} garbage collection __del__(): {type(e)}: {e}')
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/browser/watchdog_base.py",
"license": "MIT License",
"lines": 272,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/browser/watchdogs/aboutblank_watchdog.py | """About:blank watchdog for managing about:blank tabs with DVD screensaver."""
from typing import TYPE_CHECKING, ClassVar
from bubus import BaseEvent
from cdp_use.cdp.target import TargetID
from pydantic import PrivateAttr
from browser_use.browser.events import (
AboutBlankDVDScreensaverShownEvent,
BrowserStopEvent,
BrowserStoppedEvent,
CloseTabEvent,
NavigateToUrlEvent,
TabClosedEvent,
TabCreatedEvent,
)
from browser_use.browser.watchdog_base import BaseWatchdog
if TYPE_CHECKING:
pass
class AboutBlankWatchdog(BaseWatchdog):
"""Ensures there's always exactly one about:blank tab with DVD screensaver."""
# Event contracts
LISTENS_TO: ClassVar[list[type[BaseEvent]]] = [
BrowserStopEvent,
BrowserStoppedEvent,
TabCreatedEvent,
TabClosedEvent,
]
EMITS: ClassVar[list[type[BaseEvent]]] = [
NavigateToUrlEvent,
CloseTabEvent,
AboutBlankDVDScreensaverShownEvent,
]
_stopping: bool = PrivateAttr(default=False)
async def on_BrowserStopEvent(self, event: BrowserStopEvent) -> None:
"""Handle browser stop request - stop creating new tabs."""
# logger.info('[AboutBlankWatchdog] Browser stop requested, stopping tab creation')
self._stopping = True
async def on_BrowserStoppedEvent(self, event: BrowserStoppedEvent) -> None:
"""Handle browser stopped event."""
# logger.info('[AboutBlankWatchdog] Browser stopped')
self._stopping = True
async def on_TabCreatedEvent(self, event: TabCreatedEvent) -> None:
"""Check tabs when a new tab is created."""
# logger.debug(f'[AboutBlankWatchdog] ➕ New tab created: {event.url}')
# If an about:blank tab was created, show DVD screensaver on all about:blank tabs
if event.url == 'about:blank':
await self._show_dvd_screensaver_on_about_blank_tabs()
async def on_TabClosedEvent(self, event: TabClosedEvent) -> None:
"""Check tabs when a tab is closed and proactively create about:blank if needed."""
# Don't create new tabs if browser is shutting down
if self._stopping:
return
# Don't attempt CDP operations if the WebSocket is dead — dispatching
# NavigateToUrlEvent on a broken connection will hang until timeout
if not self.browser_session.is_cdp_connected:
self.logger.debug('[AboutBlankWatchdog] CDP not connected, skipping tab recovery')
return
# Check if we're about to close the last tab (event happens BEFORE tab closes)
# Use _cdp_get_all_pages for quick check without fetching titles
page_targets = await self.browser_session._cdp_get_all_pages()
if len(page_targets) < 1:
self.logger.debug(
'[AboutBlankWatchdog] Last tab closing, creating new about:blank tab to avoid closing entire browser'
)
# Create the animation tab since no tabs should remain
navigate_event = self.event_bus.dispatch(NavigateToUrlEvent(url='about:blank', new_tab=True))
await navigate_event
# Show DVD screensaver on the new tab
await self._show_dvd_screensaver_on_about_blank_tabs()
else:
# Multiple tabs exist, check after close
await self._check_and_ensure_about_blank_tab()
async def attach_to_target(self, target_id: TargetID) -> None:
"""AboutBlankWatchdog doesn't monitor individual targets."""
pass
async def _check_and_ensure_about_blank_tab(self) -> None:
"""Check current tabs and ensure exactly one about:blank tab with animation exists."""
try:
if not self.browser_session.is_cdp_connected:
return
# For quick checks, just get page targets without titles to reduce noise
page_targets = await self.browser_session._cdp_get_all_pages()
# If no tabs exist at all, create one to keep browser alive
if len(page_targets) == 0:
# Only create a new tab if there are no tabs at all
self.logger.debug('[AboutBlankWatchdog] No tabs exist, creating new about:blank DVD screensaver tab')
navigate_event = self.event_bus.dispatch(NavigateToUrlEvent(url='about:blank', new_tab=True))
await navigate_event
# Show DVD screensaver on the new tab
await self._show_dvd_screensaver_on_about_blank_tabs()
# Otherwise there are tabs, don't create new ones to avoid interfering
except Exception as e:
self.logger.error(f'[AboutBlankWatchdog] Error ensuring about:blank tab: {e}')
async def _show_dvd_screensaver_on_about_blank_tabs(self) -> None:
"""Show DVD screensaver on all about:blank pages only."""
try:
# Get just the page targets without expensive title fetching
page_targets = await self.browser_session._cdp_get_all_pages()
browser_session_label = str(self.browser_session.id)[-4:]
for page_target in page_targets:
target_id = page_target['targetId']
url = page_target['url']
# Only target about:blank pages specifically
if url == 'about:blank':
await self._show_dvd_screensaver_loading_animation_cdp(target_id, browser_session_label)
except Exception as e:
self.logger.error(f'[AboutBlankWatchdog] Error showing DVD screensaver: {e}')
async def _show_dvd_screensaver_loading_animation_cdp(self, target_id: TargetID, browser_session_label: str) -> None:
"""
Injects a DVD screensaver-style bouncing logo loading animation overlay into the target using CDP.
This is used to visually indicate that the browser is setting up or waiting.
"""
try:
# Create temporary session for this target without switching focus
temp_session = await self.browser_session.get_or_create_cdp_session(target_id, focus=False)
# Inject the DVD screensaver script (from main branch with idempotency added)
script = f"""
(function(browser_session_label) {{
// Idempotency check
if (window.__dvdAnimationRunning) {{
return; // Already running, don't add another
}}
window.__dvdAnimationRunning = true;
// Ensure document.body exists before proceeding
if (!document.body) {{
// Try again after DOM is ready
window.__dvdAnimationRunning = false; // Reset flag to retry
if (document.readyState === 'loading') {{
document.addEventListener('DOMContentLoaded', () => arguments.callee(browser_session_label));
}}
return;
}}
const animated_title = `Starting agent ${{browser_session_label}}...`;
if (document.title === animated_title) {{
return; // already run on this tab, dont run again
}}
document.title = animated_title;
// Create the main overlay
const loadingOverlay = document.createElement('div');
loadingOverlay.id = 'pretty-loading-animation';
loadingOverlay.style.position = 'fixed';
loadingOverlay.style.top = '0';
loadingOverlay.style.left = '0';
loadingOverlay.style.width = '100vw';
loadingOverlay.style.height = '100vh';
loadingOverlay.style.background = '#000';
loadingOverlay.style.zIndex = '99999';
loadingOverlay.style.overflow = 'hidden';
// Create the image element
const img = document.createElement('img');
img.src = 'https://cf.browser-use.com/logo.svg';
img.alt = 'Browser-Use';
img.style.width = '200px';
img.style.height = 'auto';
img.style.position = 'absolute';
img.style.left = '0px';
img.style.top = '0px';
img.style.zIndex = '2';
img.style.opacity = '0.8';
loadingOverlay.appendChild(img);
document.body.appendChild(loadingOverlay);
// DVD screensaver bounce logic
let x = Math.random() * (window.innerWidth - 300);
let y = Math.random() * (window.innerHeight - 300);
let dx = 1.2 + Math.random() * 0.4; // px per frame
let dy = 1.2 + Math.random() * 0.4;
// Randomize direction
if (Math.random() > 0.5) dx = -dx;
if (Math.random() > 0.5) dy = -dy;
function animate() {{
const imgWidth = img.offsetWidth || 300;
const imgHeight = img.offsetHeight || 300;
x += dx;
y += dy;
if (x <= 0) {{
x = 0;
dx = Math.abs(dx);
}} else if (x + imgWidth >= window.innerWidth) {{
x = window.innerWidth - imgWidth;
dx = -Math.abs(dx);
}}
if (y <= 0) {{
y = 0;
dy = Math.abs(dy);
}} else if (y + imgHeight >= window.innerHeight) {{
y = window.innerHeight - imgHeight;
dy = -Math.abs(dy);
}}
img.style.left = `${{x}}px`;
img.style.top = `${{y}}px`;
requestAnimationFrame(animate);
}}
animate();
// Responsive: update bounds on resize
window.addEventListener('resize', () => {{
x = Math.min(x, window.innerWidth - img.offsetWidth);
y = Math.min(y, window.innerHeight - img.offsetHeight);
}});
// Add a little CSS for smoothness
const style = document.createElement('style');
style.textContent = `
#pretty-loading-animation {{
/*backdrop-filter: blur(2px) brightness(0.9);*/
}}
#pretty-loading-animation img {{
user-select: none;
pointer-events: none;
}}
`;
document.head.appendChild(style);
}})('{browser_session_label}');
"""
await temp_session.cdp_client.send.Runtime.evaluate(params={'expression': script}, session_id=temp_session.session_id)
# No need to detach - session is cached
# Dispatch event
self.event_bus.dispatch(AboutBlankDVDScreensaverShownEvent(target_id=target_id))
except Exception as e:
self.logger.error(f'[AboutBlankWatchdog] Error injecting DVD screensaver: {e}')
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/browser/watchdogs/aboutblank_watchdog.py",
"license": "MIT License",
"lines": 217,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
browser-use/browser-use:browser_use/browser/watchdogs/crash_watchdog.py | """Browser watchdog for monitoring crashes and network timeouts using CDP."""
import asyncio
import time
from typing import TYPE_CHECKING, ClassVar
import psutil
from bubus import BaseEvent
from cdp_use.cdp.target import SessionID, TargetID
from cdp_use.cdp.target.events import TargetCrashedEvent
from pydantic import Field, PrivateAttr
from browser_use.browser.events import (
BrowserConnectedEvent,
BrowserErrorEvent,
BrowserStoppedEvent,
TabClosedEvent,
TabCreatedEvent,
)
from browser_use.browser.watchdog_base import BaseWatchdog
from browser_use.utils import create_task_with_error_handling
if TYPE_CHECKING:
pass
class NetworkRequestTracker:
"""Tracks ongoing network requests."""
def __init__(self, request_id: str, start_time: float, url: str, method: str, resource_type: str | None = None):
self.request_id = request_id
self.start_time = start_time
self.url = url
self.method = method
self.resource_type = resource_type
class CrashWatchdog(BaseWatchdog):
"""Monitors browser health for crashes and network timeouts using CDP."""
# Event contracts
LISTENS_TO: ClassVar[list[type[BaseEvent]]] = [
BrowserConnectedEvent,
BrowserStoppedEvent,
TabCreatedEvent,
TabClosedEvent,
]
EMITS: ClassVar[list[type[BaseEvent]]] = [BrowserErrorEvent]
# Configuration
network_timeout_seconds: float = Field(default=10.0)
check_interval_seconds: float = Field(default=5.0) # Reduced frequency to reduce noise
# Private state
_active_requests: dict[str, NetworkRequestTracker] = PrivateAttr(default_factory=dict)
_monitoring_task: asyncio.Task | None = PrivateAttr(default=None)
_last_responsive_checks: dict[str, float] = PrivateAttr(default_factory=dict) # target_url -> timestamp
_cdp_event_tasks: set[asyncio.Task] = PrivateAttr(default_factory=set) # Track CDP event handler tasks
_targets_with_listeners: set[str] = PrivateAttr(default_factory=set) # Track targets that already have event listeners
async def on_BrowserConnectedEvent(self, event: BrowserConnectedEvent) -> None:
"""Start monitoring when browser is connected."""
# logger.debug('[CrashWatchdog] Browser connected event received, beginning monitoring')
create_task_with_error_handling(
self._start_monitoring(), name='start_crash_monitoring', logger_instance=self.logger, suppress_exceptions=True
)
# logger.debug(f'[CrashWatchdog] Monitoring task started: {self._monitoring_task and not self._monitoring_task.done()}')
async def on_BrowserStoppedEvent(self, event: BrowserStoppedEvent) -> None:
"""Stop monitoring when browser stops."""
# logger.debug('[CrashWatchdog] Browser stopped, ending monitoring')
await self._stop_monitoring()
async def on_TabCreatedEvent(self, event: TabCreatedEvent) -> None:
"""Attach to new tab."""
assert self.browser_session.agent_focus_target_id is not None, 'No current target ID'
await self.attach_to_target(self.browser_session.agent_focus_target_id)
async def on_TabClosedEvent(self, event: TabClosedEvent) -> None:
"""Clean up tracking when tab closes."""
# Remove target from listener tracking to prevent memory leak
if event.target_id in self._targets_with_listeners:
self._targets_with_listeners.discard(event.target_id)
self.logger.debug(f'[CrashWatchdog] Removed target {event.target_id[:8]}... from monitoring')
async def attach_to_target(self, target_id: TargetID) -> None:
"""Set up crash monitoring for a specific target using CDP."""
try:
# Check if we already have listeners for this target
if target_id in self._targets_with_listeners:
self.logger.debug(f'[CrashWatchdog] Event listeners already exist for target: {target_id[:8]}...')
return
# Create temporary session for monitoring without switching focus
cdp_session = await self.browser_session.get_or_create_cdp_session(target_id, focus=False)
# Register crash event handler
def on_target_crashed(event: TargetCrashedEvent, session_id: SessionID | None = None):
# Create and track the task
task = create_task_with_error_handling(
self._on_target_crash_cdp(target_id),
name='handle_target_crash',
logger_instance=self.logger,
suppress_exceptions=True,
)
self._cdp_event_tasks.add(task)
# Remove from set when done
task.add_done_callback(lambda t: self._cdp_event_tasks.discard(t))
cdp_session.cdp_client.register.Target.targetCrashed(on_target_crashed)
# Track that we've added listeners to this target
self._targets_with_listeners.add(target_id)
target = self.browser_session.session_manager.get_target(target_id)
if target:
self.logger.debug(f'[CrashWatchdog] Added target to monitoring: {target.url}')
except Exception as e:
self.logger.warning(f'[CrashWatchdog] Failed to attach to target {target_id}: {e}')
async def _on_request_cdp(self, event: dict) -> None:
"""Track new network request from CDP event."""
request_id = event.get('requestId', '')
request = event.get('request', {})
self._active_requests[request_id] = NetworkRequestTracker(
request_id=request_id,
start_time=time.time(),
url=request.get('url', ''),
method=request.get('method', ''),
resource_type=event.get('type'),
)
# logger.debug(f'[CrashWatchdog] Tracking request: {request.get("method", "")} {request.get("url", "")[:50]}...')
def _on_response_cdp(self, event: dict) -> None:
"""Remove request from tracking on response."""
request_id = event.get('requestId', '')
if request_id in self._active_requests:
elapsed = time.time() - self._active_requests[request_id].start_time
response = event.get('response', {})
self.logger.debug(f'[CrashWatchdog] Request completed in {elapsed:.2f}s: {response.get("url", "")[:50]}...')
# Don't remove yet - wait for loadingFinished
def _on_request_failed_cdp(self, event: dict) -> None:
"""Remove request from tracking on failure."""
request_id = event.get('requestId', '')
if request_id in self._active_requests:
elapsed = time.time() - self._active_requests[request_id].start_time
self.logger.debug(
f'[CrashWatchdog] Request failed after {elapsed:.2f}s: {self._active_requests[request_id].url[:50]}...'
)
del self._active_requests[request_id]
def _on_request_finished_cdp(self, event: dict) -> None:
"""Remove request from tracking when loading is finished."""
request_id = event.get('requestId', '')
self._active_requests.pop(request_id, None)
async def _on_target_crash_cdp(self, target_id: TargetID) -> None:
"""Handle target crash detected via CDP."""
self.logger.debug(f'[CrashWatchdog] Target crashed: {target_id[:8]}..., waiting for detach event')
target = self.browser_session.session_manager.get_target(target_id)
is_agent_focus = (
target
and self.browser_session.agent_focus_target_id
and target.target_id == self.browser_session.agent_focus_target_id
)
if is_agent_focus:
self.logger.error(f'[CrashWatchdog] 💥 Agent focus tab crashed: {target.url} (SessionManager will auto-recover)')
# Emit browser error event
self.event_bus.dispatch(
BrowserErrorEvent(
error_type='TargetCrash',
message=f'Target crashed: {target_id}',
details={
'url': target.url if target else None,
'target_id': target_id,
'was_agent_focus': is_agent_focus,
},
)
)
async def _start_monitoring(self) -> None:
"""Start the monitoring loop."""
assert self.browser_session.cdp_client is not None, 'Root CDP client not initialized - browser may not be connected yet'
if self._monitoring_task and not self._monitoring_task.done():
# logger.info('[CrashWatchdog] Monitoring already running')
return
self._monitoring_task = create_task_with_error_handling(
self._monitoring_loop(), name='crash_monitoring_loop', logger_instance=self.logger, suppress_exceptions=True
)
# logger.debug('[CrashWatchdog] Monitoring loop created and started')
async def _stop_monitoring(self) -> None:
"""Stop the monitoring loop and clean up all tracking."""
if self._monitoring_task and not self._monitoring_task.done():
self._monitoring_task.cancel()
try:
await self._monitoring_task
except asyncio.CancelledError:
pass
self.logger.debug('[CrashWatchdog] Monitoring loop stopped')
# Cancel all CDP event handler tasks
for task in list(self._cdp_event_tasks):
if not task.done():
task.cancel()
# Wait for all tasks to complete cancellation
if self._cdp_event_tasks:
await asyncio.gather(*self._cdp_event_tasks, return_exceptions=True)
self._cdp_event_tasks.clear()
# Clear all tracking
self._active_requests.clear()
self._targets_with_listeners.clear()
self._last_responsive_checks.clear()
async def _monitoring_loop(self) -> None:
"""Main monitoring loop."""
await asyncio.sleep(10) # give browser time to start up and load the first page after first LLM call
while True:
try:
await self._check_network_timeouts()
await self._check_browser_health()
await asyncio.sleep(self.check_interval_seconds)
except asyncio.CancelledError:
break
except Exception as e:
self.logger.error(f'[CrashWatchdog] Error in monitoring loop: {e}')
async def _check_network_timeouts(self) -> None:
"""Check for network requests exceeding timeout."""
current_time = time.time()
timed_out_requests = []
# Debug logging
if self._active_requests:
self.logger.debug(
f'[CrashWatchdog] Checking {len(self._active_requests)} active requests for timeouts (threshold: {self.network_timeout_seconds}s)'
)
for request_id, tracker in self._active_requests.items():
elapsed = current_time - tracker.start_time
self.logger.debug(
f'[CrashWatchdog] Request {tracker.url[:30]}... elapsed: {elapsed:.1f}s, timeout: {self.network_timeout_seconds}s'
)
if elapsed >= self.network_timeout_seconds:
timed_out_requests.append((request_id, tracker))
# Emit events for timed out requests
for request_id, tracker in timed_out_requests:
self.logger.warning(
f'[CrashWatchdog] Network request timeout after {self.network_timeout_seconds}s: '
f'{tracker.method} {tracker.url[:100]}...'
)
self.event_bus.dispatch(
BrowserErrorEvent(
error_type='NetworkTimeout',
message=f'Network request timed out after {self.network_timeout_seconds}s',
details={
'url': tracker.url,
'method': tracker.method,
'resource_type': tracker.resource_type,
'elapsed_seconds': current_time - tracker.start_time,
},
)
)
# Remove from tracking
del self._active_requests[request_id]
async def _check_browser_health(self) -> None:
"""Check if browser and targets are still responsive."""
try:
self.logger.debug(f'[CrashWatchdog] Checking browser health for target {self.browser_session.agent_focus_target_id}')
cdp_session = await self.browser_session.get_or_create_cdp_session()
for target in self.browser_session.session_manager.get_all_page_targets():
if self._is_new_tab_page(target.url) and target.url != 'about:blank':
self.logger.debug(f'[CrashWatchdog] Redirecting chrome://new-tab-page/ to about:blank {target.url}')
cdp_session = await self.browser_session.get_or_create_cdp_session(target_id=target.target_id)
await cdp_session.cdp_client.send.Page.navigate(
params={'url': 'about:blank'}, session_id=cdp_session.session_id
)
# Quick ping to check if session is alive
self.logger.debug(f'[CrashWatchdog] Attempting to run simple JS test expression in session {cdp_session} 1+1')
await asyncio.wait_for(
cdp_session.cdp_client.send.Runtime.evaluate(params={'expression': '1+1'}, session_id=cdp_session.session_id),
timeout=1.0,
)
self.logger.debug(
f'[CrashWatchdog] Browser health check passed for target {self.browser_session.agent_focus_target_id}'
)
except Exception as e:
self.logger.error(
f'[CrashWatchdog] ❌ Crashed/unresponsive session detected for target {self.browser_session.agent_focus_target_id} '
f'error: {type(e).__name__}: {e} (Chrome will send detach event, SessionManager will auto-recover)'
)
# Check browser process if we have PID
if self.browser_session._local_browser_watchdog and (proc := self.browser_session._local_browser_watchdog._subprocess):
try:
if proc.status() in (psutil.STATUS_ZOMBIE, psutil.STATUS_DEAD):
self.logger.error(f'[CrashWatchdog] Browser process {proc.pid} has crashed')
# Browser process crashed - SessionManager will clean up via detach events
# Just dispatch error event and stop monitoring
self.event_bus.dispatch(
BrowserErrorEvent(
error_type='BrowserProcessCrashed',
message=f'Browser process {proc.pid} has crashed',
details={'pid': proc.pid, 'status': proc.status()},
)
)
self.logger.warning('[CrashWatchdog] Browser process dead - stopping health monitoring')
await self._stop_monitoring()
return
except Exception:
pass # psutil not available or process doesn't exist
@staticmethod
def _is_new_tab_page(url: str) -> bool:
"""Check if URL is a new tab page."""
return url in ['about:blank', 'chrome://new-tab-page/', 'chrome://newtab/']
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/browser/watchdogs/crash_watchdog.py",
"license": "MIT License",
"lines": 281,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.