repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1 value | license stringclasses 7 values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2 classes |
|---|---|---|---|---|---|---|---|---|
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/__init__.py | examples/__init__.py | python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false | |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/demo_mode_example.py | examples/demo_mode_example.py | import asyncio
from browser_use import Agent, ChatBrowserUse
async def main() -> None:
agent = Agent(
task='Please find the latest commit on browser-use/browser-use repo and tell me the commit message. Please summarize what it is about.',
llm=ChatBrowserUse(),
demo_mode=True,
)
await agent.run(max_steps=5)
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/apps/news-use/news_monitor.py | examples/apps/news-use/news_monitor.py | #!/usr/bin/env python3
"""
News monitoring agent with browser-use + Gemini Flash.
Automatically extracts and analyzes the latest articles from any news website.
"""
import argparse
import asyncio
import hashlib
import json
import logging
import os
import time
from datetime import datetime
from typing import Literal
from dateutil import parser as dtparser
from pydantic import BaseModel
def setup_environment(debug: bool):
if not debug:
os.environ['BROWSER_USE_SETUP_LOGGING'] = 'false'
os.environ['BROWSER_USE_LOGGING_LEVEL'] = 'critical'
logging.getLogger().setLevel(logging.CRITICAL)
else:
os.environ['BROWSER_USE_SETUP_LOGGING'] = 'true'
os.environ['BROWSER_USE_LOGGING_LEVEL'] = 'info'
parser = argparse.ArgumentParser(description='News extractor using Browser-Use + Gemini')
parser.add_argument('--url', default='https://www.techcrunch.com', help='News site root URL')
parser.add_argument('--interval', type=int, default=300, help='Seconds between checks in monitor mode')
parser.add_argument('--once', action='store_true', help='Run a single extraction and exit')
parser.add_argument('--output', default='news_data.json', help='Path to JSON file where articles are stored')
parser.add_argument('--debug', action='store_true', help='Verbose console output and non-headless browser')
args = parser.parse_args()
setup_environment(args.debug)
from browser_use import Agent, BrowserSession, ChatGoogle
GEMINI_API_KEY = os.getenv('GOOGLE_API_KEY') or 'xxxx'
if GEMINI_API_KEY == 'xxxx':
print('⚠️ WARNING: Please set GOOGLE_API_KEY environment variable')
print(' You can get an API key at: https://makersuite.google.com/app/apikey')
print(" Then run: export GEMINI_API_KEY='your-api-key-here'")
print()
class NewsArticle(BaseModel):
title: str
url: str
posting_time: str
short_summary: str
long_summary: str
sentiment: Literal['positive', 'neutral', 'negative']
# ---------------------------------------------------------
# Core extractor
# ---------------------------------------------------------
async def extract_latest_article(site_url: str, debug: bool = False) -> dict:
"""Open site_url, navigate to the newest article and return structured JSON."""
prompt = (
f'Navigate to {site_url} and find the most recent headline article (usually at the top). '
f'Click on it to open the full article page. Once loaded, scroll & extract ALL required information: '
f'1. title: The article headline '
f'2. url: The full URL of the article page '
f'3. posting_time: The publication date/time as shown on the page '
f"4. short_summary: A 10-word overview of the article's content "
f'5. long_summary: A 100-word detailed summary of the article '
f"6. sentiment: Classify as 'positive', 'neutral', or 'negative' based on the article tone. "
f'When done, call the done action with success=True and put ALL extracted data in the text field '
f'as valid JSON in this exact format: '
f'{{"title": "...", "url": "...", "posting_time": "...", "short_summary": "...", "long_summary": "...", "sentiment": "positive|neutral|negative"}}'
)
llm = ChatGoogle(model='gemini-2.0-flash', temperature=0.1, api_key=GEMINI_API_KEY)
browser_session = BrowserSession(headless=not debug)
agent = Agent(task=prompt, llm=llm, browser_session=browser_session, use_vision=False)
if debug:
print(f'[DEBUG] Starting extraction from {site_url}')
start = time.time()
result = await agent.run(max_steps=25)
raw = result.final_result() if result else None
if debug:
print(f'[DEBUG] Raw result type: {type(raw)}')
print(f'[DEBUG] Raw result: {raw[:500] if isinstance(raw, str) else raw}')
print(f'[DEBUG] Extraction time: {time.time() - start:.2f}s')
if isinstance(raw, dict):
return {'status': 'success', 'data': raw}
text = str(raw).strip() if raw else ''
if '<json>' in text and '</json>' in text:
text = text.split('<json>', 1)[1].split('</json>', 1)[0].strip()
if text.lower().startswith('here is'):
brace = text.find('{')
if brace != -1:
text = text[brace:]
if text.startswith('```'):
text = text.lstrip('`\n ')
if text.lower().startswith('json'):
text = text[4:].lstrip()
def _escape_newlines(src: str) -> str:
out, in_str, esc = [], False, False
for ch in src:
if in_str:
if esc:
esc = False
elif ch == '\\':
esc = True
elif ch == '"':
in_str = False
elif ch == '\n':
out.append('\\n')
continue
elif ch == '\r':
continue
else:
if ch == '"':
in_str = True
out.append(ch)
return ''.join(out)
cleaned = _escape_newlines(text)
def _try_parse(txt: str):
try:
return json.loads(txt)
except Exception:
return None
data = _try_parse(cleaned)
# Fallback: grab first balanced JSON object
if data is None:
brace = 0
start = None
for i, ch in enumerate(text):
if ch == '{':
if brace == 0:
start = i
brace += 1
elif ch == '}':
brace -= 1
if brace == 0 and start is not None:
candidate = _escape_newlines(text[start : i + 1])
data = _try_parse(candidate)
if data is not None:
break
if isinstance(data, dict):
return {'status': 'success', 'data': data}
return {'status': 'error', 'error': f'JSON parse failed. Raw head: {text[:200]}'}
# ---------------------------------------------------------
# Persistence helpers
# ---------------------------------------------------------
def load_seen_hashes(file_path: str = 'news_data.json') -> set:
"""Load already-saved article URL hashes from disk for dedup across restarts."""
if not os.path.exists(file_path):
return set()
try:
with open(file_path) as f:
items = json.load(f)
return {entry['hash'] for entry in items if 'hash' in entry}
except Exception:
return set()
def save_article(article: dict, file_path: str = 'news_data.json'):
"""Append article to disk with a hash for future dedup."""
payload = {
'hash': hashlib.md5(article['url'].encode()).hexdigest(),
'pulled_at': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
'data': article,
}
existing = []
if os.path.exists(file_path):
try:
with open(file_path) as f:
existing = json.load(f)
except Exception:
existing = []
existing.append(payload)
# Keep last 100
existing = existing[-100:]
with open(file_path, 'w') as f:
json.dump(existing, f, ensure_ascii=False, indent=2)
# ---------------------------------------------------------
# CLI functions
# ---------------------------------------------------------
def _fmt(ts_raw: str) -> str:
"""Format timestamp string"""
try:
return dtparser.parse(ts_raw).strftime('%Y-%m-%d %H:%M:%S')
except Exception:
return datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
async def run_once(url: str, output_path: str, debug: bool):
"""Run a single extraction and exit"""
res = await extract_latest_article(url, debug)
if res['status'] == 'success':
art = res['data']
url_val = art.get('url', '')
hash_ = hashlib.md5(url_val.encode()).hexdigest() if url_val else None
if url_val:
save_article(art, output_path)
ts = _fmt(art.get('posting_time', ''))
sentiment = art.get('sentiment', 'neutral')
emoji = {'positive': '🟢', 'negative': '🔴', 'neutral': '🟡'}.get(sentiment, '🟡')
summary = art.get('short_summary', art.get('summary', art.get('title', '')))
if debug:
print(json.dumps(art, ensure_ascii=False, indent=2))
print()
print(f'[{ts}] - {emoji} - {summary}')
if not debug:
print() # Only add spacing in non-debug mode
return hash_
else:
print(f'Error: {res["error"]}')
return None
async def monitor(url: str, interval: int, output_path: str, debug: bool):
"""Continuous monitoring mode"""
seen = load_seen_hashes(output_path)
print(f'Monitoring {url} every {interval}s')
print()
while True:
try:
res = await extract_latest_article(url, debug)
if res['status'] == 'success':
art = res['data']
url_val = art.get('url', '')
hash_ = hashlib.md5(url_val.encode()).hexdigest() if url_val else None
if hash_ and hash_ not in seen:
seen.add(hash_)
ts = _fmt(art.get('posting_time', ''))
sentiment = art.get('sentiment', 'neutral')
emoji = {'positive': '🟢', 'negative': '🔴', 'neutral': '🟡'}.get(sentiment, '🟡')
summary = art.get('short_summary', art.get('title', ''))
save_article(art, output_path)
if debug:
print(json.dumps(art, ensure_ascii=False, indent=2))
print(f'[{ts}] - {emoji} - {summary}')
if not debug:
print() # Add spacing between articles in non-debug mode
elif debug:
print(f'Error: {res["error"]}')
except Exception as e:
if debug:
import traceback
traceback.print_exc()
else:
print(f'Unhandled error: {e}')
await asyncio.sleep(interval)
def main():
"""Main entry point"""
if args.once:
asyncio.run(run_once(args.url, args.output, args.debug))
else:
try:
asyncio.run(monitor(args.url, args.interval, args.output, args.debug))
except KeyboardInterrupt:
print('\nStopped by user')
if __name__ == '__main__':
main()
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/apps/ad-use/ad_generator.py | examples/apps/ad-use/ad_generator.py | import argparse
import asyncio
import logging
import os
import subprocess
import sys
from datetime import datetime
from pathlib import Path
from browser_use.utils import create_task_with_error_handling
def setup_environment(debug: bool):
if not debug:
os.environ['BROWSER_USE_SETUP_LOGGING'] = 'false'
os.environ['BROWSER_USE_LOGGING_LEVEL'] = 'critical'
logging.getLogger().setLevel(logging.CRITICAL)
else:
os.environ['BROWSER_USE_SETUP_LOGGING'] = 'true'
os.environ['BROWSER_USE_LOGGING_LEVEL'] = 'info'
parser = argparse.ArgumentParser(description='Generate ads from landing pages using browser-use + 🍌')
parser.add_argument('--url', nargs='?', help='Landing page URL to analyze')
parser.add_argument('--debug', action='store_true', default=False, help='Enable debug mode (show browser, verbose logs)')
parser.add_argument('--count', type=int, default=1, help='Number of ads to generate in parallel (default: 1)')
group = parser.add_mutually_exclusive_group()
group.add_argument('--instagram', action='store_true', default=False, help='Generate Instagram image ad (default)')
group.add_argument('--tiktok', action='store_true', default=False, help='Generate TikTok video ad using Veo3')
args = parser.parse_args()
if not args.instagram and not args.tiktok:
args.instagram = True
setup_environment(args.debug)
from typing import Any, cast
import aiofiles
from google import genai
from PIL import Image
from browser_use import Agent, BrowserSession
from browser_use.llm.google import ChatGoogle
GOOGLE_API_KEY = os.getenv('GOOGLE_API_KEY')
class LandingPageAnalyzer:
def __init__(self, debug: bool = False):
self.debug = debug
self.llm = ChatGoogle(model='gemini-2.0-flash-exp', api_key=GOOGLE_API_KEY)
self.output_dir = Path('output')
self.output_dir.mkdir(exist_ok=True)
async def analyze_landing_page(self, url: str, mode: str = 'instagram') -> dict:
browser_session = BrowserSession(
headless=not self.debug,
)
agent = Agent(
task=f"""Go to {url} and quickly extract key brand information for Instagram ad creation.
Steps:
1. Navigate to the website
2. From the initial view, extract ONLY these essentials:
- Brand/Product name
- Main tagline or value proposition (one sentence)
- Primary call-to-action text
- Any visible pricing or special offer
3. Scroll down half a page, twice (0.5 pages each) to check for any key info
4. Done - keep it simple and focused on the brand
Return ONLY the key brand info, not page structure details.""",
llm=self.llm,
browser_session=browser_session,
max_actions_per_step=2,
step_timeout=30,
use_thinking=False,
vision_detail_level='high',
)
screenshot_path = None
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
async def screenshot_callback(agent_instance):
nonlocal screenshot_path
await asyncio.sleep(4)
screenshot_path = self.output_dir / f'landing_page_{timestamp}.png'
await agent_instance.browser_session.take_screenshot(path=str(screenshot_path), full_page=False)
screenshot_task = create_task_with_error_handling(
screenshot_callback(agent), name='screenshot_callback', suppress_exceptions=True
)
history = await agent.run()
try:
await screenshot_task
except Exception as e:
print(f'Screenshot task failed: {e}')
analysis = history.final_result() or 'No analysis content extracted'
return {'url': url, 'analysis': analysis, 'screenshot_path': screenshot_path, 'timestamp': timestamp}
class AdGenerator:
def __init__(self, api_key: str | None = GOOGLE_API_KEY, mode: str = 'instagram'):
if not api_key:
raise ValueError('GOOGLE_API_KEY is missing or empty – set the environment variable or pass api_key explicitly')
self.client = genai.Client(api_key=api_key)
self.output_dir = Path('output')
self.output_dir.mkdir(exist_ok=True)
self.mode = mode
async def create_video_concept(self, browser_analysis: str, ad_id: int) -> str:
"""Generate a unique creative concept for each video ad"""
if self.mode != 'tiktok':
return ''
concept_prompt = f"""Based on this brand analysis:
{browser_analysis}
Create a UNIQUE and SPECIFIC TikTok video concept #{ad_id}.
Be creative and different! Consider various approaches like:
- Different visual metaphors and storytelling angles
- Various trending TikTok formats (transitions, reveals, transformations)
- Different emotional appeals (funny, inspiring, surprising, relatable)
- Unique visual styles (neon, retro, minimalist, maximalist, surreal)
- Different perspectives (first-person, aerial, macro, time-lapse)
Return a 2-3 sentence description of a specific, unique video concept that would work for this brand.
Make it visually interesting and different from typical ads. Be specific about visual elements, transitions, and mood."""
response = self.client.models.generate_content(model='gemini-2.0-flash-exp', contents=concept_prompt)
return response.text if response and response.text else ''
def create_ad_prompt(self, browser_analysis: str, video_concept: str = '') -> str:
if self.mode == 'instagram':
prompt = f"""Create an Instagram ad for this brand:
{browser_analysis}
Create a vibrant, eye-catching Instagram ad image with:
- Try to use the colors and style of the logo or brand, else:
- Bold, modern gradient background with bright colors
- Large, playful sans-serif text with the product/service name from the analysis
- Trendy design elements: geometric shapes, sparkles, emojis
- Fun bubbles or badges for any pricing or special offers mentioned
- Call-to-action button with text from the analysis
- Emphasizes the key value proposition from the analysis
- Uses visual elements that match the brand personality
- Square format (1:1 ratio)
- Use color psychology to drive action
Style: Modern Instagram advertisement, (1:1), scroll-stopping, professional but playful, conversion-focused"""
else: # tiktok
if video_concept:
prompt = f"""Create a TikTok video ad based on this specific concept:
{video_concept}
Brand context: {browser_analysis}
Requirements:
- Vertical 9:16 format
- High quality, professional execution
- Bring the concept to life exactly as described
- No text overlays, pure visual storytelling"""
else:
prompt = f"""Create a viral TikTok video ad for this brand:
{browser_analysis}
Create a dynamic, engaging vertical video with:
- Quick hook opening that grabs attention immediately
- Minimal text overlays (focus on visual storytelling)
- Fast-paced but not overwhelming editing
- Authentic, relatable energy that appeals to Gen Z
- Vertical 9:16 format optimized for mobile
- High energy but professional execution
Style: Modern TikTok advertisement, viral potential, authentic energy, minimal text, maximum visual impact"""
return prompt
async def generate_ad_image(self, prompt: str, screenshot_path: Path | None = None) -> bytes | None:
"""Generate ad image bytes using Gemini. Returns None on failure."""
try:
from typing import Any
contents: list[Any] = [prompt]
if screenshot_path and screenshot_path.exists():
img = Image.open(screenshot_path)
w, h = img.size
side = min(w, h)
img = img.crop(((w - side) // 2, (h - side) // 2, (w + side) // 2, (h + side) // 2))
contents = [prompt + '\n\nHere is the actual landing page screenshot to reference for design inspiration:', img]
response = await self.client.aio.models.generate_content(
model='gemini-2.5-flash-image-preview',
contents=contents,
)
cand = getattr(response, 'candidates', None)
if cand:
for part in getattr(cand[0].content, 'parts', []):
inline = getattr(part, 'inline_data', None)
if inline:
return inline.data
except Exception as e:
print(f'❌ Image generation failed: {e}')
return None
async def generate_ad_video(self, prompt: str, screenshot_path: Path | None = None, ad_id: int = 1) -> bytes:
"""Generate ad video using Veo3."""
sync_client = genai.Client(api_key=GOOGLE_API_KEY)
# Commented out image input for now - it was using the screenshot as first frame
# if screenshot_path and screenshot_path.exists():
# import base64
# import io
# img = Image.open(screenshot_path)
# img_buffer = io.BytesIO()
# img.save(img_buffer, format='PNG')
# img_bytes = img_buffer.getvalue()
# operation = sync_client.models.generate_videos(
# model='veo-3.0-generate-001',
# prompt=prompt,
# image=cast(Any, {
# 'imageBytes': base64.b64encode(img_bytes).decode('utf-8'),
# 'mimeType': 'image/png'
# }),
# config=cast(Any, {'aspectRatio': '9:16', 'resolution': '720p'}),
# )
# else:
operation = sync_client.models.generate_videos(
model='veo-3.0-generate-001',
prompt=prompt,
config=cast(Any, {'aspectRatio': '9:16', 'resolution': '720p'}),
)
while not operation.done:
await asyncio.sleep(10)
operation = sync_client.operations.get(operation)
if not operation.response or not operation.response.generated_videos:
raise RuntimeError('No videos generated')
videos = operation.response.generated_videos
video = videos[0]
video_file = getattr(video, 'video', None)
if not video_file:
raise RuntimeError('No video file in response')
sync_client.files.download(file=video_file)
video_bytes = getattr(video_file, 'video_bytes', None)
if not video_bytes:
raise RuntimeError('No video bytes in response')
return video_bytes
async def save_results(self, ad_content: bytes, prompt: str, analysis: str, url: str, timestamp: str) -> str:
if self.mode == 'instagram':
content_path = self.output_dir / f'ad_{timestamp}.png'
else: # tiktok
content_path = self.output_dir / f'ad_{timestamp}.mp4'
async with aiofiles.open(content_path, 'wb') as f:
await f.write(ad_content)
analysis_path = self.output_dir / f'analysis_{timestamp}.txt'
async with aiofiles.open(analysis_path, 'w', encoding='utf-8') as f:
await f.write(f'URL: {url}\n\n')
await f.write('BROWSER-USE ANALYSIS:\n')
await f.write(analysis)
await f.write('\n\nGENERATED PROMPT:\n')
await f.write(prompt)
return str(content_path)
def open_file(file_path: str):
"""Open file with default system viewer"""
try:
if sys.platform.startswith('darwin'):
subprocess.run(['open', file_path], check=True)
elif sys.platform.startswith('win'):
subprocess.run(['cmd', '/c', 'start', '', file_path], check=True)
else:
subprocess.run(['xdg-open', file_path], check=True)
except Exception as e:
print(f'❌ Could not open file: {e}')
async def create_ad_from_landing_page(url: str, debug: bool = False, mode: str = 'instagram', ad_id: int = 1):
analyzer = LandingPageAnalyzer(debug=debug)
try:
if ad_id == 1:
print(f'🚀 Analyzing {url} for {mode.capitalize()} ad...')
page_data = await analyzer.analyze_landing_page(url, mode=mode)
else:
analyzer_temp = LandingPageAnalyzer(debug=debug)
page_data = await analyzer_temp.analyze_landing_page(url, mode=mode)
generator = AdGenerator(mode=mode)
if mode == 'instagram':
prompt = generator.create_ad_prompt(page_data['analysis'])
ad_content = await generator.generate_ad_image(prompt, page_data.get('screenshot_path'))
if ad_content is None:
raise RuntimeError(f'Ad image generation failed for ad #{ad_id}')
else: # tiktok
video_concept = await generator.create_video_concept(page_data['analysis'], ad_id)
prompt = generator.create_ad_prompt(page_data['analysis'], video_concept)
ad_content = await generator.generate_ad_video(prompt, page_data.get('screenshot_path'), ad_id)
result_path = await generator.save_results(ad_content, prompt, page_data['analysis'], url, page_data['timestamp'])
if mode == 'instagram':
print(f'🎨 Generated image ad #{ad_id}: {result_path}')
else:
print(f'🎬 Generated video ad #{ad_id}: {result_path}')
open_file(result_path)
return result_path
except Exception as e:
print(f'❌ Error for ad #{ad_id}: {e}')
raise
finally:
if ad_id == 1 and page_data.get('screenshot_path'):
print(f'📸 Page screenshot: {page_data["screenshot_path"]}')
async def generate_single_ad(page_data: dict, mode: str, ad_id: int):
"""Generate a single ad using pre-analyzed page data"""
generator = AdGenerator(mode=mode)
try:
if mode == 'instagram':
prompt = generator.create_ad_prompt(page_data['analysis'])
ad_content = await generator.generate_ad_image(prompt, page_data.get('screenshot_path'))
if ad_content is None:
raise RuntimeError(f'Ad image generation failed for ad #{ad_id}')
else: # tiktok
video_concept = await generator.create_video_concept(page_data['analysis'], ad_id)
prompt = generator.create_ad_prompt(page_data['analysis'], video_concept)
ad_content = await generator.generate_ad_video(prompt, page_data.get('screenshot_path'), ad_id)
# Create unique timestamp for each ad
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S') + f'_{ad_id}'
result_path = await generator.save_results(ad_content, prompt, page_data['analysis'], page_data['url'], timestamp)
if mode == 'instagram':
print(f'🎨 Generated image ad #{ad_id}: {result_path}')
else:
print(f'🎬 Generated video ad #{ad_id}: {result_path}')
return result_path
except Exception as e:
print(f'❌ Error for ad #{ad_id}: {e}')
raise
async def create_multiple_ads(url: str, debug: bool = False, mode: str = 'instagram', count: int = 1):
"""Generate multiple ads in parallel using asyncio concurrency"""
if count == 1:
return await create_ad_from_landing_page(url, debug, mode, 1)
print(f'🚀 Analyzing {url} for {count} {mode} ads...')
analyzer = LandingPageAnalyzer(debug=debug)
page_data = await analyzer.analyze_landing_page(url, mode=mode)
print(f'🎯 Generating {count} {mode} ads in parallel...')
tasks = []
for i in range(count):
task = create_task_with_error_handling(generate_single_ad(page_data, mode, i + 1), name=f'generate_ad_{i + 1}')
tasks.append(task)
results = await asyncio.gather(*tasks, return_exceptions=True)
successful = []
failed = []
for i, result in enumerate(results):
if isinstance(result, Exception):
failed.append(i + 1)
else:
successful.append(result)
print(f'\n✅ Successfully generated {len(successful)}/{count} ads')
if failed:
print(f'❌ Failed ads: {failed}')
if page_data.get('screenshot_path'):
print(f'📸 Page screenshot: {page_data["screenshot_path"]}')
for ad_path in successful:
open_file(ad_path)
return successful
if __name__ == '__main__':
url = args.url
if not url:
url = input('🔗 Enter URL: ').strip() or 'https://www.apple.com/iphone-17-pro/'
if args.tiktok:
mode = 'tiktok'
else:
mode = 'instagram'
asyncio.run(create_multiple_ads(url, debug=args.debug, mode=mode, count=args.count))
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/apps/msg-use/login.py | examples/apps/msg-use/login.py | import asyncio
import os
from pathlib import Path
from browser_use import Agent, BrowserSession
from browser_use.llm.google import ChatGoogle
GOOGLE_API_KEY = os.getenv('GOOGLE_API_KEY')
# Browser profile directory for persistence (same as main script)
USER_DATA_DIR = Path.home() / '.config' / 'whatsapp_scheduler' / 'browser_profile'
USER_DATA_DIR.mkdir(parents=True, exist_ok=True)
# Storage state file for cookies
STORAGE_STATE_FILE = USER_DATA_DIR / 'storage_state.json'
async def login_to_whatsapp():
"""Open WhatsApp Web and wait for user to scan QR code"""
if not GOOGLE_API_KEY:
print('❌ Error: GOOGLE_API_KEY environment variable is required')
print("Please set it with: export GOOGLE_API_KEY='your-api-key-here'")
return
print('WhatsApp Login Setup')
print('=' * 50)
print(f'Browser profile directory: {USER_DATA_DIR}')
print(f'Storage state file: {STORAGE_STATE_FILE}')
print('=' * 50)
try:
llm = ChatGoogle(model='gemini-2.0-flash-exp', temperature=0.3, api_key=GOOGLE_API_KEY)
task = """
You are helping a user log into WhatsApp Web. Follow these steps:
1. Navigate to https://web.whatsapp.com
2. Wait for the page to load completely
3. If you see a QR code, tell the user to scan it with their phone
4. Wait patiently for the login to complete
5. Once you see the WhatsApp chat interface, confirm successful login
Take your time and be patient with page loads.
"""
print('\nOpening WhatsApp Web...')
print('Please scan the QR code when it appears.\n')
browser_session = BrowserSession(
headless=False, # Show browser
user_data_dir=str(USER_DATA_DIR), # Use persistent profile directory
storage_state=str(STORAGE_STATE_FILE) if STORAGE_STATE_FILE.exists() else None, # Use saved cookies/session
)
agent = Agent(task=task, llm=llm, browser_session=browser_session)
result = await agent.run()
print('\n✅ Login completed!')
print("Note: For now, you'll need to scan the QR code each time.")
print("We'll improve session persistence in a future update.")
print('\nPress Enter to close the browser...')
input()
except Exception as e:
print(f'\n❌ Error during login: {str(e)}')
print('Please try again.')
if __name__ == '__main__':
asyncio.run(login_to_whatsapp())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/apps/msg-use/scheduler.py | examples/apps/msg-use/scheduler.py | #!/usr/bin/env python3
"""
WhatsApp Message Scheduler - Send scheduled messages via WhatsApp Web
"""
import argparse
import asyncio
import json
import logging
import os
import random
import re
from datetime import datetime, timedelta
from pathlib import Path
def setup_environment(debug: bool):
if not debug:
os.environ['BROWSER_USE_SETUP_LOGGING'] = 'false'
os.environ['BROWSER_USE_LOGGING_LEVEL'] = 'critical'
logging.getLogger().setLevel(logging.CRITICAL)
else:
os.environ['BROWSER_USE_SETUP_LOGGING'] = 'true'
os.environ['BROWSER_USE_LOGGING_LEVEL'] = 'info'
parser = argparse.ArgumentParser(description='WhatsApp Scheduler - Send scheduled messages via WhatsApp Web')
parser.add_argument('--debug', action='store_true', help='Debug mode: show browser and verbose logs')
parser.add_argument('--test', action='store_true', help='Test mode: show what messages would be sent without sending them')
parser.add_argument('--auto', action='store_true', help='Auto mode: respond to unread messages every 30 minutes')
args = parser.parse_args()
setup_environment(args.debug)
from browser_use import Agent, BrowserSession
from browser_use.llm.google import ChatGoogle
GOOGLE_API_KEY = os.getenv('GOOGLE_API_KEY') or os.getenv('GEMINI_API_KEY')
USER_DATA_DIR = Path.home() / '.config' / 'whatsapp_scheduler' / 'browser_profile'
USER_DATA_DIR.mkdir(parents=True, exist_ok=True)
STORAGE_STATE_FILE = USER_DATA_DIR / 'storage_state.json'
async def parse_messages():
"""Parse messages.txt and extract scheduling info"""
messages_file = Path('messages.txt')
if not messages_file.exists():
print('❌ messages.txt not found!')
return []
import aiofiles
async with aiofiles.open(messages_file) as f:
content = await f.read()
llm = ChatGoogle(model='gemini-2.0-flash-exp', temperature=0.1, api_key=GOOGLE_API_KEY)
now = datetime.now()
prompt = f"""
Parse these WhatsApp message instructions and extract:
1. Contact name (extract just the name, not descriptions)
2. Message content (what to send)
3. Date and time (when to send)
Current date/time: {now.strftime('%Y-%m-%d %H:%M')}
Today is: {now.strftime('%Y-%m-%d')}
Current time is: {now.strftime('%H:%M')}
Instructions:
{content}
Return ONLY a JSON array with format:
[{{"contact": "name", "message": "text", "datetime": "YYYY-MM-DD HH:MM"}}]
CRITICAL: Transform instructions into actual messages:
QUOTED TEXT → Use exactly as-is:
- Text in "quotes" becomes the exact message
UNQUOTED INSTRUCTIONS → Generate actual content:
- If it's an instruction to write something → write the actual thing
- If it's an instruction to tell someone something → write what to tell them
- If it's an instruction to remind someone → write the actual reminder
- For multi-line content like poems: use single line with spacing, not line breaks
DO NOT copy the instruction - create the actual message content!
Time Rules:
- If only time given (like "at 15:30"), use TODAY
- If no date specified, assume TODAY
- If no year given, use current year
- Default time is 9:00 if not specified
- Extract names from parentheses: "hinge date (Camila)" → "Camila"
- "tomorrow" means {(now + timedelta(days=1)).strftime('%Y-%m-%d')}
- "next tuesday" or similar means the next occurrence of that day
"""
from browser_use.llm.messages import UserMessage
response = await llm.ainvoke([UserMessage(content=prompt)])
response_text = response.completion if hasattr(response, 'completion') else str(response)
# Extract JSON
json_match = re.search(r'\[.*?\]', response_text, re.DOTALL)
if json_match:
try:
messages = json.loads(json_match.group())
for msg in messages:
if 'message' in msg:
msg['message'] = re.sub(r'\n+', ' • ', msg['message'])
msg['message'] = re.sub(r'\s+', ' ', msg['message']).strip()
return messages
except json.JSONDecodeError:
pass
return []
async def send_message(contact, message):
"""Send a WhatsApp message"""
print(f'\n📱 Sending to {contact}: {message}')
llm = ChatGoogle(model='gemini-2.0-flash-exp', temperature=0.3, api_key=GOOGLE_API_KEY)
task = f"""
Send WhatsApp message:
1. Go to https://web.whatsapp.com
2. Search for contact: {contact}
3. Click on the contact
4. Type message: {message}
5. Press Enter to send
6. Confirm sent
"""
browser = BrowserSession(
headless=not args.debug, # headless=False only when debug=True
user_data_dir=str(USER_DATA_DIR),
storage_state=str(STORAGE_STATE_FILE) if STORAGE_STATE_FILE.exists() else None,
)
agent = Agent(task=task, llm=llm, browser_session=browser)
await agent.run()
print(f'✅ Sent to {contact}')
async def auto_respond_to_unread():
"""Click unread tab and respond to messages"""
print('\nAuto-responding to unread messages...')
llm = ChatGoogle(model='gemini-2.0-flash-exp', temperature=0.3, api_key=GOOGLE_API_KEY)
task = """
1. Go to https://web.whatsapp.com
2. Wait for page to load
3. Click on the "Unread" filter tab
4. If there are unread messages:
- Click on each unread chat
- Read the last message
- Generate and send a friendly, contextual response
- Move to next unread chat
5. Report how many messages were responded to
"""
browser = BrowserSession(
headless=not args.debug,
user_data_dir=str(USER_DATA_DIR),
storage_state=str(STORAGE_STATE_FILE) if STORAGE_STATE_FILE.exists() else None,
)
agent = Agent(task=task, llm=llm, browser_session=browser)
result = await agent.run()
print('✅ Auto-response complete')
return result
async def main():
if not GOOGLE_API_KEY:
print('❌ Set GOOGLE_API_KEY or GEMINI_API_KEY environment variable')
return
print('WhatsApp Scheduler')
print(f'Profile: {USER_DATA_DIR}')
print()
# Auto mode - respond to unread messages periodically
if args.auto:
print('AUTO MODE - Responding to unread messages every ~30 minutes')
print('Press Ctrl+C to stop.\n')
while True:
try:
await auto_respond_to_unread()
# Wait 30 minutes +/- 5 minutes randomly
wait_minutes = 30 + random.randint(-5, 5)
print(f'\n⏰ Next check in {wait_minutes} minutes...')
await asyncio.sleep(wait_minutes * 60)
except KeyboardInterrupt:
print('\n\nAuto mode stopped by user')
break
except Exception as e:
print(f'\n❌ Error in auto mode: {e}')
print('Waiting 5 minutes before retry...')
await asyncio.sleep(300)
return
# Parse messages
print('Parsing messages.txt...')
messages = await parse_messages()
if not messages:
print('No messages found')
return
print(f'\nFound {len(messages)} messages:')
for msg in messages:
print(f' • {msg["datetime"]}: {msg["message"][:30]}... to {msg["contact"]}')
now = datetime.now()
immediate = []
future = []
for msg in messages:
msg_time = datetime.strptime(msg['datetime'], '%Y-%m-%d %H:%M')
if msg_time <= now:
immediate.append(msg)
else:
future.append(msg)
if args.test:
print('\n=== TEST MODE - Preview ===')
if immediate:
print(f'\nWould send {len(immediate)} past-due messages NOW:')
for msg in immediate:
print(f' 📱 To {msg["contact"]}: {msg["message"]}')
if future:
print(f'\nWould monitor {len(future)} future messages:')
for msg in future:
print(f' ⏰ {msg["datetime"]}: To {msg["contact"]}: {msg["message"]}')
print('\nTest mode complete. No messages sent.')
return
if immediate:
print(f'\nSending {len(immediate)} past-due messages NOW...')
for msg in immediate:
await send_message(msg['contact'], msg['message'])
if future:
print(f'\n⏰ Monitoring {len(future)} future messages...')
print('Press Ctrl+C to stop.\n')
last_status = None
while future:
now = datetime.now()
due = []
remaining = []
for msg in future:
msg_time = datetime.strptime(msg['datetime'], '%Y-%m-%d %H:%M')
if msg_time <= now:
due.append(msg)
else:
remaining.append(msg)
for msg in due:
print(f'\n⏰ Time reached for {msg["contact"]}')
await send_message(msg['contact'], msg['message'])
future = remaining
if future:
next_msg = min(future, key=lambda x: datetime.strptime(x['datetime'], '%Y-%m-%d %H:%M'))
current_status = f'Next: {next_msg["datetime"]} to {next_msg["contact"]}'
if current_status != last_status:
print(current_status)
last_status = current_status
await asyncio.sleep(30) # Check every 30 seconds
print('\n✅ All messages processed!')
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/integrations/gmail_2fa_integration.py | examples/integrations/gmail_2fa_integration.py | """
Gmail 2FA Integration Example with Grant Mechanism
This example demonstrates how to use the Gmail integration for handling 2FA codes
during web automation with a robust credential grant and re-authentication system.
Features:
- Automatic credential validation and setup
- Interactive OAuth grant flow when credentials are missing/invalid
- Fallback re-authentication mechanisms
- Clear error handling and user guidance
Setup:
1. Enable Gmail API in Google Cloud Console
2. Create OAuth 2.0 credentials and download JSON
3. Save credentials as ~/.config/browseruse/gmail_credentials.json
4. Run this example - it will guide you through OAuth setup if needed
"""
import asyncio
import json
import os
import sys
from dotenv import load_dotenv
# Add the parent directory to the path so we can import browser_use
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
load_dotenv()
from browser_use import Agent, ChatOpenAI, Tools
from browser_use.config import CONFIG
from browser_use.integrations.gmail import GmailService, register_gmail_actions
class GmailGrantManager:
"""
Manages Gmail OAuth credential grants and authentication flows.
Provides a robust mechanism for setting up and maintaining Gmail API access.
"""
def __init__(self):
self.config_dir = CONFIG.BROWSER_USE_CONFIG_DIR
self.credentials_file = self.config_dir / 'gmail_credentials.json'
self.token_file = self.config_dir / 'gmail_token.json'
print(f'GmailGrantManager initialized with config_dir: {self.config_dir}')
print(f'GmailGrantManager initialized with credentials_file: {self.credentials_file}')
print(f'GmailGrantManager initialized with token_file: {self.token_file}')
def check_credentials_exist(self) -> bool:
"""Check if OAuth credentials file exists."""
return self.credentials_file.exists()
def check_token_exists(self) -> bool:
"""Check if saved token file exists."""
return self.token_file.exists()
def validate_credentials_format(self) -> tuple[bool, str]:
"""
Validate that the credentials file has the correct format.
Returns (is_valid, error_message)
"""
if not self.check_credentials_exist():
return False, 'Credentials file not found'
try:
with open(self.credentials_file) as f:
creds = json.load(f)
# Accept if either 'web' or 'installed' section exists and is not empty
if creds.get('web') or creds.get('installed'):
return True, 'Credentials file is valid'
return False, "Invalid credentials format - neither 'web' nor 'installed' sections found"
except json.JSONDecodeError:
return False, 'Credentials file is not valid JSON'
except Exception as e:
return False, f'Error reading credentials file: {e}'
async def setup_oauth_credentials(self) -> bool:
"""
Guide user through OAuth credentials setup process.
Returns True if setup is successful.
"""
print('\n🔐 Gmail OAuth Credentials Setup Required')
print('=' * 50)
if not self.check_credentials_exist():
print('❌ Gmail credentials file not found')
else:
is_valid, error = self.validate_credentials_format()
if not is_valid:
print(f'❌ Gmail credentials file is invalid: {error}')
print('\n📋 To set up Gmail API access:')
print('1. Go to https://console.cloud.google.com/')
print('2. Create a new project or select an existing one')
print('3. Enable the Gmail API:')
print(' - Go to "APIs & Services" > "Library"')
print(' - Search for "Gmail API" and enable it')
print('4. Create OAuth 2.0 credentials:')
print(' - Go to "APIs & Services" > "Credentials"')
print(' - Click "Create Credentials" > "OAuth client ID"')
print(' - Choose "Desktop application"')
print(' - Download the JSON file')
print(f'5. Save the JSON file as: {self.credentials_file}')
print(f'6. Ensure the directory exists: {self.config_dir}')
# Create config directory if it doesn't exist
self.config_dir.mkdir(parents=True, exist_ok=True)
print(f'\n✅ Created config directory: {self.config_dir}')
# Wait for user to set up credentials
while True:
user_input = input('\n❓ Have you saved the credentials file? (y/n/skip): ').lower().strip()
if user_input == 'skip':
print('⏭️ Skipping credential validation for now')
return False
elif user_input == 'y':
if self.check_credentials_exist():
is_valid, error = self.validate_credentials_format()
if is_valid:
print('✅ Credentials file found and validated!')
return True
else:
print(f'❌ Credentials file is invalid: {error}')
print('Please check the file format and try again.')
else:
print(f'❌ Credentials file still not found at: {self.credentials_file}')
elif user_input == 'n':
print('⏳ Please complete the setup steps above and try again.')
else:
print('Please enter y, n, or skip')
async def test_authentication(self, gmail_service: GmailService) -> tuple[bool, str]:
"""
Test Gmail authentication and return status.
Returns (success, message)
"""
try:
print('🔍 Testing Gmail authentication...')
success = await gmail_service.authenticate()
if success and gmail_service.is_authenticated():
print('✅ Gmail authentication successful!')
return True, 'Authentication successful'
else:
return False, 'Authentication failed - invalid credentials or OAuth flow failed'
except Exception as e:
return False, f'Authentication error: {e}'
async def handle_authentication_failure(self, gmail_service: GmailService, error_msg: str) -> bool:
"""
Handle authentication failures with fallback mechanisms.
Returns True if recovery was successful.
"""
print(f'\n❌ Gmail authentication failed: {error_msg}')
print('\n🔧 Attempting recovery...')
# Option 1: Try removing old token file
if self.token_file.exists():
print('🗑️ Removing old token file to force re-authentication...')
try:
self.token_file.unlink()
print('✅ Old token file removed')
# Try authentication again
success = await gmail_service.authenticate()
if success:
print('✅ Re-authentication successful!')
return True
except Exception as e:
print(f'❌ Failed to remove token file: {e}')
# Option 2: Validate and potentially re-setup credentials
is_valid, cred_error = self.validate_credentials_format()
if not is_valid:
print(f'\n❌ Credentials file issue: {cred_error}')
print('🔧 Initiating credential re-setup...')
return await self.setup_oauth_credentials()
# Option 3: Provide manual troubleshooting steps
print('\n🔧 Manual troubleshooting steps:')
print('1. Check that Gmail API is enabled in Google Cloud Console')
print('2. Verify OAuth consent screen is configured')
print('3. Ensure redirect URIs include http://localhost:8080')
print('4. Check if credentials file is for the correct project')
print('5. Try regenerating OAuth credentials in Google Cloud Console')
retry = input('\n❓ Would you like to retry authentication? (y/n): ').lower().strip()
if retry == 'y':
success = await gmail_service.authenticate()
return success
return False
async def main():
print('🚀 Gmail 2FA Integration Example with Grant Mechanism')
print('=' * 60)
# Initialize grant manager
grant_manager = GmailGrantManager()
# Step 1: Check and validate credentials
print('🔍 Step 1: Validating Gmail credentials...')
if not grant_manager.check_credentials_exist():
print('❌ No Gmail credentials found')
setup_success = await grant_manager.setup_oauth_credentials()
if not setup_success:
print('⏹️ Setup cancelled or failed. Exiting...')
return
else:
is_valid, error = grant_manager.validate_credentials_format()
if not is_valid:
print(f'❌ Invalid credentials: {error}')
setup_success = await grant_manager.setup_oauth_credentials()
if not setup_success:
print('⏹️ Setup cancelled or failed. Exiting...')
return
else:
print('✅ Gmail credentials file found and validated')
# Step 2: Initialize Gmail service and test authentication
print('\n🔍 Step 2: Testing Gmail authentication...')
gmail_service = GmailService()
auth_success, auth_message = await grant_manager.test_authentication(gmail_service)
if not auth_success:
print(f'❌ Initial authentication failed: {auth_message}')
recovery_success = await grant_manager.handle_authentication_failure(gmail_service, auth_message)
if not recovery_success:
print('❌ Failed to recover Gmail authentication. Please check your setup.')
return
# Step 3: Initialize tools with authenticated service
print('\n🔍 Step 3: Registering Gmail actions...')
tools = Tools()
register_gmail_actions(tools, gmail_service=gmail_service)
print('✅ Gmail actions registered with tools')
print('Available Gmail actions:')
print('- get_recent_emails: Get recent emails with filtering')
print()
# Initialize LLM
llm = ChatOpenAI(model='gpt-4.1-mini')
# Step 4: Test Gmail functionality
print('🔍 Step 4: Testing Gmail email retrieval...')
agent = Agent(task='Get recent emails from Gmail to test the integration is working properly', llm=llm, tools=tools)
try:
history = await agent.run()
print('✅ Gmail email retrieval test completed')
except Exception as e:
print(f'❌ Gmail email retrieval test failed: {e}')
# Try one more recovery attempt
print('🔧 Attempting final recovery...')
recovery_success = await grant_manager.handle_authentication_failure(gmail_service, str(e))
if recovery_success:
print('✅ Recovery successful, re-running test...')
history = await agent.run()
else:
print('❌ Final recovery failed. Please check your Gmail API setup.')
return
print('\n' + '=' * 60)
# Step 5: Demonstrate 2FA code finding
print('🔍 Step 5: Testing 2FA code detection...')
agent2 = Agent(
task='Search for any 2FA verification codes or OTP codes in recent Gmail emails from the last 30 minutes',
llm=llm,
tools=tools,
)
history2 = await agent2.run()
print('✅ 2FA code search completed')
print('\n' + '=' * 60)
# Step 6: Simulate complete login flow
print('🔍 Step 6: Demonstrating complete 2FA login flow...')
agent3 = Agent(
task="""
Demonstrate a complete 2FA-enabled login flow:
1. Check for any existing 2FA codes in recent emails
2. Explain how the agent would handle a typical login:
- Navigate to a login page
- Enter credentials
- Wait for 2FA prompt
- Use get_recent_emails to find the verification code
- Extract and enter the 2FA code
3. Show what types of emails and codes can be detected
""",
llm=llm,
tools=tools,
)
history3 = await agent3.run()
print('✅ Complete 2FA flow demonstration completed')
print('\n' + '=' * 60)
print('🎉 Gmail 2FA Integration with Grant Mechanism completed successfully!')
print('\n💡 Key features demonstrated:')
print('- ✅ Automatic credential validation and setup')
print('- ✅ Robust error handling and recovery mechanisms')
print('- ✅ Interactive OAuth grant flow')
print('- ✅ Token refresh and re-authentication')
print('- ✅ 2FA code detection and extraction')
print('\n🔧 Grant mechanism benefits:')
print('- Handles missing or invalid credentials gracefully')
print('- Provides clear setup instructions')
print('- Automatically recovers from authentication failures')
print('- Validates credential format before use')
print('- Offers multiple fallback options')
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/integrations/discord/discord_example.py | examples/integrations/discord/discord_example.py | """
This examples requires you to have a Discord bot token and the bot already added to a server.
Five Steps to create and invite a Discord bot:
1. Create a Discord Application:
* Go to the Discord Developer Portal: https://discord.com/developers/applications
* Log in to the Discord website.
* Click on "New Application".
* Give the application a name and click "Create".
2. Configure the Bot:
* Navigate to the "Bot" tab on the left side of the screen.
* Make sure "Public Bot" is ticked if you want others to invite your bot.
* Generate your bot token by clicking on "Reset Token", Copy the token and save it securely.
* Do not share the bot token. Treat it like a password. If the token is leaked, regenerate it.
3. Enable Privileged Intents:
* Scroll down to the "Privileged Gateway Intents" section.
* Enable the necessary intents (e.g., "Server Members Intent" and "Message Content Intent").
--> Note: Enabling privileged intents for bots in over 100 guilds requires bot verification. You may need to contact Discord support to enable privileged intents for verified bots.
4. Generate Invite URL:
* Go to "OAuth2" tab and "OAuth2 URL Generator" section.
* Under "scopes", tick the "bot" checkbox.
* Tick the permissions required for your bot to function under “Bot Permissions”.
* e.g. "Send Messages", "Send Messages in Threads", "Read Message History", "Mention Everyone".
* Copy the generated URL under the "GENERATED URL" section at the bottom.
5. Invite the Bot:
* Paste the URL into your browser.
* Choose a server to invite the bot to.
* Click “Authorize”.
--> Note: The person adding the bot needs "Manage Server" permissions.
6. Run the code below to start the bot with your bot token.
7. Write e.g. "/bu what's the weather in Tokyo?" to start a browser-use task and get a response inside the Discord channel.
"""
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))))
from dotenv import load_dotenv
load_dotenv()
from browser_use.browser import BrowserProfile
from browser_use.llm import ChatGoogle
from examples.integrations.discord.discord_api import DiscordBot
# load credentials from environment variables
bot_token = os.getenv('DISCORD_BOT_TOKEN')
if not bot_token:
raise ValueError('Discord bot token not found in .env file.')
api_key = os.getenv('GOOGLE_API_KEY')
if not api_key:
raise ValueError('GOOGLE_API_KEY is not set')
llm = ChatGoogle(model='gemini-2.0-flash-exp', api_key=api_key)
bot = DiscordBot(
llm=llm, # required; instance of BaseChatModel
prefix='$bu', # optional; prefix of messages to trigger browser-use, defaults to "$bu"
ack=True, # optional; whether to acknowledge task receipt with a message, defaults to False
browser_profile=BrowserProfile(
headless=False
), # optional; useful for changing headless mode or other browser configs, defaults to headless mode
)
bot.run(
token=bot_token, # required; Discord bot token
)
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/integrations/discord/discord_api.py | examples/integrations/discord/discord_api.py | import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))))
from dotenv import load_dotenv
load_dotenv()
import discord # type: ignore
from discord.ext import commands # type: ignore
from browser_use.agent.service import Agent
from browser_use.browser import BrowserProfile, BrowserSession
from browser_use.llm import BaseChatModel
class DiscordBot(commands.Bot):
"""Discord bot implementation for Browser-Use tasks.
This bot allows users to run browser automation tasks through Discord messages.
Processes tasks asynchronously and sends the result back to the user in response to the message.
Messages must start with the configured prefix (default: "$bu") followed by the task description.
Args:
llm (BaseChatModel): Language model instance to use for task processing
prefix (str, optional): Command prefix for triggering browser tasks. Defaults to "$bu"
ack (bool, optional): Whether to acknowledge task receipt with a message. Defaults to False
browser_profile (BrowserProfile, optional): Browser profile settings.
Defaults to headless mode
Usage:
```python
from browser_use import ChatOpenAI
llm = ChatOpenAI()
bot = DiscordBot(llm=llm, prefix='$bu', ack=True)
bot.run('YOUR_DISCORD_TOKEN')
```
Discord Usage:
Send messages starting with the prefix:
"$bu search for python tutorials"
"""
def __init__(
self,
llm: BaseChatModel,
prefix: str = '$bu',
ack: bool = False,
browser_profile: BrowserProfile = BrowserProfile(headless=True),
):
self.llm = llm
self.prefix = prefix.strip()
self.ack = ack
self.browser_profile = browser_profile
# Define intents.
intents = discord.Intents.default() # type: ignore
intents.message_content = True # Enable message content intent
intents.members = True # Enable members intent for user info
# Initialize the bot with a command prefix and intents.
super().__init__(command_prefix='!', intents=intents) # You may not need prefix, just here for flexibility
# self.tree = app_commands.CommandTree(self) # Initialize command tree for slash commands.
async def on_ready(self):
"""Called when the bot is ready."""
try:
print(f'We have logged in as {self.user}')
cmds = await self.tree.sync() # Sync the command tree with discord
except Exception as e:
print(f'Error during bot startup: {e}')
async def on_message(self, message):
"""Called when a message is received."""
try:
if message.author == self.user: # Ignore the bot's messages
return
if message.content.strip().startswith(f'{self.prefix} '):
if self.ack:
try:
await message.reply(
'Starting browser use task...',
mention_author=True, # Don't ping the user
)
except Exception as e:
print(f'Error sending start message: {e}')
try:
agent_message = await self.run_agent(message.content.replace(f'{self.prefix} ', '').strip())
await message.channel.send(content=f'{agent_message}', reference=message, mention_author=True)
except Exception as e:
await message.channel.send(
content=f'Error during task execution: {str(e)}',
reference=message,
mention_author=True,
)
except Exception as e:
print(f'Error in message handling: {e}')
# await self.process_commands(message) # Needed to process bot commands
async def run_agent(self, task: str) -> str:
try:
browser_session = BrowserSession(browser_profile=self.browser_profile)
agent = Agent(task=(task), llm=self.llm, browser_session=browser_session)
result = await agent.run()
agent_message = None
if result.is_done():
agent_message = result.history[-1].result[0].extracted_content
if agent_message is None:
agent_message = 'Oops! Something went wrong while running Browser-Use.'
return agent_message
except Exception as e:
raise Exception(f'Browser-use task failed: {str(e)}')
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/integrations/slack/slack_api.py | examples/integrations/slack/slack_api.py | import logging
import os
import sys
from typing import Annotated
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from fastapi import Depends, FastAPI, HTTPException, Request
from slack_sdk.errors import SlackApiError # type: ignore
from slack_sdk.signature import SignatureVerifier # type: ignore
from slack_sdk.web.async_client import AsyncWebClient # type: ignore
from browser_use.agent.service import Agent
from browser_use.browser import BrowserProfile, BrowserSession
from browser_use.llm import BaseChatModel
from browser_use.logging_config import setup_logging
setup_logging()
logger = logging.getLogger('slack')
app = FastAPI()
class SlackBot:
def __init__(
self,
llm: BaseChatModel,
bot_token: str,
signing_secret: str,
ack: bool = False,
browser_profile: BrowserProfile = BrowserProfile(headless=True),
):
if not bot_token or not signing_secret:
raise ValueError('Bot token and signing secret must be provided')
self.llm = llm
self.ack = ack
self.browser_profile = browser_profile
self.client = AsyncWebClient(token=bot_token)
self.signature_verifier = SignatureVerifier(signing_secret)
self.processed_events = set()
logger.info('SlackBot initialized')
async def handle_event(self, event, event_id):
try:
logger.info(f'Received event id: {event_id}')
if not event_id:
logger.warning('Event ID missing in event data')
return
if event_id in self.processed_events:
logger.info(f'Event {event_id} already processed')
return
self.processed_events.add(event_id)
if 'subtype' in event and event['subtype'] == 'bot_message':
return
text = event.get('text')
user_id = event.get('user')
if text and text.startswith('$bu '):
task = text[len('$bu ') :].strip()
if self.ack:
try:
await self.send_message(
event['channel'], f'<@{user_id}> Starting browser use task...', thread_ts=event.get('ts')
)
except Exception as e:
logger.error(f'Error sending start message: {e}')
try:
agent_message = await self.run_agent(task)
await self.send_message(event['channel'], f'<@{user_id}> {agent_message}', thread_ts=event.get('ts'))
except Exception as e:
await self.send_message(event['channel'], f'Error during task execution: {str(e)}', thread_ts=event.get('ts'))
except Exception as e:
logger.error(f'Error in handle_event: {str(e)}')
async def run_agent(self, task: str) -> str:
try:
browser_session = BrowserSession(browser_profile=self.browser_profile)
agent = Agent(task=task, llm=self.llm, browser_session=browser_session)
result = await agent.run()
agent_message = None
if result.is_done():
agent_message = result.history[-1].result[0].extracted_content
if agent_message is None:
agent_message = 'Oops! Something went wrong while running Browser-Use.'
return agent_message
except Exception as e:
logger.error(f'Error during task execution: {str(e)}')
return f'Error during task execution: {str(e)}'
async def send_message(self, channel, text, thread_ts=None):
try:
await self.client.chat_postMessage(channel=channel, text=text, thread_ts=thread_ts)
except SlackApiError as e:
logger.error(f'Error sending message: {e.response["error"]}')
@app.post('/slack/events')
async def slack_events(request: Request, slack_bot: Annotated[SlackBot, Depends()]):
try:
if not slack_bot.signature_verifier.is_valid_request(await request.body(), dict(request.headers)):
logger.warning('Request verification failed')
raise HTTPException(status_code=400, detail='Request verification failed')
event_data = await request.json()
logger.info(f'Received event data: {event_data}')
if 'challenge' in event_data:
return {'challenge': event_data['challenge']}
if 'event' in event_data:
try:
await slack_bot.handle_event(event_data.get('event'), event_data.get('event_id'))
except Exception as e:
logger.error(f'Error handling event: {str(e)}')
return {}
except Exception as e:
logger.error(f'Error in slack_events: {str(e)}')
raise HTTPException(status_code=500, detail='Internal Server Error')
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/integrations/slack/slack_example.py | examples/integrations/slack/slack_example.py | import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from browser_use.browser import BrowserProfile
from browser_use.llm import ChatGoogle
from examples.integrations.slack.slack_api import SlackBot, app
# load credentials from environment variables
bot_token = os.getenv('SLACK_BOT_TOKEN')
if not bot_token:
raise ValueError('Slack bot token not found in .env file.')
signing_secret = os.getenv('SLACK_SIGNING_SECRET')
if not signing_secret:
raise ValueError('Slack signing secret not found in .env file.')
api_key = os.getenv('GOOGLE_API_KEY')
if not api_key:
raise ValueError('GOOGLE_API_KEY is not set')
llm = ChatGoogle(model='gemini-2.0-flash-exp', api_key=api_key)
slack_bot = SlackBot(
llm=llm, # required; instance of BaseChatModel
bot_token=bot_token, # required; Slack bot token
signing_secret=signing_secret, # required; Slack signing secret
ack=True, # optional; whether to acknowledge task receipt with a message, defaults to False
browser_profile=BrowserProfile(
headless=True
), # optional; useful for changing headless mode or other browser configs, defaults to headless mode
)
app.dependency_overrides[SlackBot] = lambda: slack_bot
if __name__ == '__main__':
import uvicorn
uvicorn.run('integrations.slack.slack_api:app', host='0.0.0.0', port=3000)
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/integrations/agentmail/2fa.py | examples/integrations/agentmail/2fa.py | import asyncio
import os
import sys
from agentmail import AsyncAgentMail # type: ignore
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent, Browser, ChatBrowserUse
from examples.integrations.agentmail.email_tools import EmailTools
TASK = """
Go to reddit.com, create a new account (use the get_email_address), make up password and all other information, confirm the 2fa with get_latest_email, and like latest post on r/elon subreddit.
"""
async def main():
# Create email inbox
# Get an API key from https://agentmail.to/
email_client = AsyncAgentMail()
inbox = await email_client.inboxes.create()
print(f'Your email address is: {inbox.inbox_id}\n\n')
# Initialize the tools for browser-use agent
tools = EmailTools(email_client=email_client, inbox=inbox)
# Initialize the LLM for browser-use agent
llm = ChatBrowserUse()
# Set your local browser path
browser = Browser(executable_path='/Applications/Google Chrome.app/Contents/MacOS/Google Chrome')
agent = Agent(task=TASK, tools=tools, llm=llm, browser=browser)
await agent.run()
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/integrations/agentmail/email_tools.py | examples/integrations/agentmail/email_tools.py | """
Email management to enable 2fa.
"""
import asyncio
import logging
# run `pip install agentmail` to install the library
from agentmail import AsyncAgentMail, Message, MessageReceivedEvent, Subscribe # type: ignore
from agentmail.inboxes.types.inbox import Inbox # type: ignore
from agentmail.inboxes.types.inbox_id import InboxId # type: ignore
from browser_use import Tools
# Configure basic logging if not already configured
if not logging.getLogger().handlers:
logging.basicConfig(level=logging.INFO, format='%(levelname)s - %(name)s - %(message)s')
logger = logging.getLogger(__name__)
class EmailTools(Tools):
def __init__(
self,
email_client: AsyncAgentMail | None = None,
email_timeout: int = 30,
inbox: Inbox | None = None,
):
super().__init__()
self.email_client = email_client or AsyncAgentMail()
self.email_timeout = email_timeout
self.register_email_tools()
self.inbox: Inbox | None = inbox
def _serialize_message_for_llm(self, message: Message) -> str:
"""
Serialize a message for the LLM
"""
# Use text if available, otherwise convert HTML to simple text
body_content = message.text
if not body_content and message.html:
body_content = self._html_to_text(message.html)
msg = f'From: {message.from_}\nTo: {message.to}\nTimestamp: {message.timestamp.isoformat()}\nSubject: {message.subject}\nBody: {body_content}'
return msg
def _html_to_text(self, html: str) -> str:
"""
Simple HTML to text conversion
"""
import re
# Remove script and style elements - handle spaces in closing tags
html = re.sub(r'<script\b[^>]*>.*?</script\s*>', '', html, flags=re.DOTALL | re.IGNORECASE)
html = re.sub(r'<style\b[^>]*>.*?</style\s*>', '', html, flags=re.DOTALL | re.IGNORECASE)
# Remove HTML tags
html = re.sub(r'<[^>]+>', '', html)
# Decode HTML entities
html = html.replace(' ', ' ')
html = html.replace('&', '&')
html = html.replace('<', '<')
html = html.replace('>', '>')
html = html.replace('"', '"')
html = html.replace(''', "'")
# Clean up whitespace
html = re.sub(r'\s+', ' ', html)
html = html.strip()
return html
async def get_or_create_inbox_client(self) -> Inbox:
"""
Create a default inbox profile for this API key (assume that agent is on free tier)
If you are not on free tier it is recommended to create 1 inbox per agent.
"""
if self.inbox:
return self.inbox
return await self.create_inbox_client()
async def create_inbox_client(self) -> Inbox:
"""
Create a default inbox profile for this API key (assume that agent is on free tier)
If you are not on free tier it is recommended to create 1 inbox per agent.
"""
inbox = await self.email_client.inboxes.create()
self.inbox = inbox
return inbox
async def wait_for_message(self, inbox_id: InboxId) -> Message:
"""
Wait for a message to be received in the inbox
"""
async with self.email_client.websockets.connect() as ws:
await ws.send_subscribe(message=Subscribe(inbox_ids=[inbox_id]))
try:
while True:
data = await asyncio.wait_for(ws.recv(), timeout=self.email_timeout)
if isinstance(data, MessageReceivedEvent):
await self.email_client.inboxes.messages.update(
inbox_id=inbox_id, message_id=data.message.message_id, remove_labels=['unread']
)
msg = data.message
logger.info(f'Received new message from: {msg.from_} with subject: {msg.subject}')
return msg
# If not MessageReceived, continue waiting for the next event
except TimeoutError:
raise TimeoutError(f'No email received in the inbox in {self.email_timeout}s')
def register_email_tools(self):
"""Register all email-related controller actions"""
@self.action('Get email address for login. You can use this email to login to any service with email and password')
async def get_email_address() -> str:
"""
Get the email address of the inbox
"""
inbox = await self.get_or_create_inbox_client()
logger.info(f'Email address: {inbox.inbox_id}')
return inbox.inbox_id
@self.action(
'Get the latest unread email from the inbox from the last max_age_minutes (default 5 minutes). Waits some seconds for new emails if none found. Use for 2FA codes.'
)
async def get_latest_email(max_age_minutes: int = 5) -> str:
"""
1. Check for unread emails within the last max_age_minutes
2. If no recent unread email, wait 30 seconds for new email via websocket
"""
from datetime import datetime, timedelta, timezone
inbox = await self.get_or_create_inbox_client()
# Get unread emails
emails = await self.email_client.inboxes.messages.list(inbox_id=inbox.inbox_id, labels=['unread'])
# Filter unread emails by time window - use UTC timezone to match email timestamps
time_cutoff = datetime.now(timezone.utc) - timedelta(minutes=max_age_minutes)
logger.debug(f'Time cutoff: {time_cutoff}')
logger.info(f'Found {len(emails.messages)} unread emails for inbox {inbox.inbox_id}')
recent_unread_emails = []
for i, email_summary in enumerate(emails.messages):
# Get full email details to check timestamp
full_email = await self.email_client.inboxes.messages.get(
inbox_id=inbox.inbox_id, message_id=email_summary.message_id
)
# Handle timezone comparison properly
email_timestamp = full_email.timestamp
if email_timestamp.tzinfo is None:
# If email timestamp is naive, assume UTC
email_timestamp = email_timestamp.replace(tzinfo=timezone.utc)
if email_timestamp >= time_cutoff:
recent_unread_emails.append(full_email)
# If we have recent unread emails, return the latest one
if recent_unread_emails:
# Sort by timestamp and get the most recent
recent_unread_emails.sort(key=lambda x: x.timestamp, reverse=True)
logger.info(f'Found {len(recent_unread_emails)} recent unread emails for inbox {inbox.inbox_id}')
latest_email = recent_unread_emails[0]
# Mark as read
await self.email_client.inboxes.messages.update(
inbox_id=inbox.inbox_id, message_id=latest_email.message_id, remove_labels=['unread']
)
logger.info(f'Latest email from: {latest_email.from_} with subject: {latest_email.subject}')
return self._serialize_message_for_llm(latest_email)
else:
logger.info('No recent unread emails, waiting for a new one')
# No recent unread emails, wait for new one
try:
latest_message = await self.wait_for_message(inbox_id=inbox.inbox_id)
except TimeoutError:
return f'No email received in the inbox in {self.email_timeout}s'
# logger.info(f'Latest message: {latest_message}')
return self._serialize_message_for_llm(latest_message)
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/use-cases/captcha.py | examples/use-cases/captcha.py | """
Goal: Automates CAPTCHA solving on a demo website.
Simple try of the agent.
@dev You need to add OPENAI_API_KEY to your environment variables.
NOTE: captchas are hard. For this example it works. But e.g. for iframes it does not.
for this example it helps to zoom in.
"""
import asyncio
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent, ChatOpenAI
async def main():
llm = ChatOpenAI(model='gpt-4.1-mini')
agent = Agent(
task='go to https://captcha.com/demos/features/captcha-demo.aspx and solve the captcha',
llm=llm,
)
await agent.run()
input('Press Enter to exit')
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/use-cases/pcpartpicker.py | examples/use-cases/pcpartpicker.py | import asyncio
from browser_use import Agent, Browser, ChatBrowserUse, Tools
async def main():
browser = Browser(cdp_url='http://localhost:9222')
llm = ChatBrowserUse()
tools = Tools()
task = """
Design me a mid-range water-cooled ITX computer
Keep the total budget under $2000
Go to https://pcpartpicker.com/
Make sure the build is complete and has no incompatibilities.
Provide the full list of parts with prices and a link to the completed build.
"""
agent = Agent(
task=task,
browser=browser,
tools=tools,
llm=llm,
)
history = await agent.run(max_steps=100000)
return history
if __name__ == '__main__':
history = asyncio.run(main())
final_result = history.final_result()
print(final_result)
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/use-cases/phone_comparison.py | examples/use-cases/phone_comparison.py | import asyncio
from pydantic import BaseModel, Field
from browser_use import Agent, Browser, ChatBrowserUse
class ProductListing(BaseModel):
"""A single product listing"""
title: str = Field(..., description='Product title')
url: str = Field(..., description='Full URL to listing')
price: float = Field(..., description='Price as number')
condition: str | None = Field(None, description='Condition: Used, New, Refurbished, etc')
source: str = Field(..., description='Source website: Amazon, eBay, or Swappa')
class PriceComparison(BaseModel):
"""Price comparison results"""
search_query: str = Field(..., description='The search query used')
listings: list[ProductListing] = Field(default_factory=list, description='All product listings')
async def find(item: str = 'Used iPhone 12'):
"""
Search for an item across multiple marketplaces and compare prices.
Args:
item: The item to search for (e.g., "Used iPhone 12")
Returns:
PriceComparison object with structured results
"""
browser = Browser(cdp_url='http://localhost:9222')
llm = ChatBrowserUse()
# Task prompt
task = f"""
Search for "{item}" on eBay, Amazon, and Swappa. Get any 2-3 listings from each site.
For each site:
1. Search for "{item}"
2. Extract ANY 2-3 listings you find (sponsored, renewed, used - all are fine)
3. Get: title, price (number only, if range use lower number), source, full URL, condition
4. Move to next site
Sites:
- eBay: https://www.ebay.com/
- Amazon: https://www.amazon.com/
- Swappa: https://swappa.com/
"""
# Create agent with structured output
agent = Agent(
browser=browser,
llm=llm,
task=task,
output_model_schema=PriceComparison,
)
# Run the agent
result = await agent.run()
return result
if __name__ == '__main__':
# Get user input
query = input('What item would you like to compare prices for? ').strip()
if not query:
query = 'Used iPhone 12'
print(f'Using default query: {query}')
result = asyncio.run(find(query))
# Access structured output
if result and result.structured_output:
comparison = result.structured_output
print(f'\n{"=" * 60}')
print(f'Price Comparison Results: {comparison.search_query}')
print(f'{"=" * 60}\n')
for listing in comparison.listings:
print(f'Title: {listing.title}')
print(f'Price: ${listing.price}')
print(f'Source: {listing.source}')
print(f'URL: {listing.url}')
print(f'Condition: {listing.condition or "N/A"}')
print(f'{"-" * 60}')
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/use-cases/check_appointment.py | examples/use-cases/check_appointment.py | # Goal: Checks for available visa appointment slots on the Greece MFA website.
import asyncio
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from pydantic import BaseModel
from browser_use import ChatOpenAI
from browser_use.agent.service import Agent
from browser_use.tools.service import Tools
if not os.getenv('OPENAI_API_KEY'):
raise ValueError('OPENAI_API_KEY is not set. Please add it to your environment variables.')
tools = Tools()
class WebpageInfo(BaseModel):
"""Model for webpage link."""
link: str = 'https://appointment.mfa.gr/en/reservations/aero/ireland-grcon-dub/'
@tools.action('Go to the webpage', param_model=WebpageInfo)
def go_to_webpage(webpage_info: WebpageInfo):
"""Returns the webpage link."""
return webpage_info.link
async def main():
"""Main function to execute the agent task."""
task = (
'Go to the Greece MFA webpage via the link I provided you.'
'Check the visa appointment dates. If there is no available date in this month, check the next month.'
'If there is no available date in both months, tell me there is no available date.'
)
model = ChatOpenAI(model='gpt-4.1-mini')
agent = Agent(task, model, tools=tools, use_vision=True)
await agent.run()
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/use-cases/shopping.py | examples/use-cases/shopping.py | import asyncio
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent, ChatOpenAI
task = """
### Prompt for Shopping Agent – Migros Online Grocery Order
**Objective:**
Visit [Migros Online](https://www.migros.ch/en), search for the required grocery items, add them to the cart, select an appropriate delivery window, and complete the checkout process using TWINT.
**Important:**
- Make sure that you don't buy more than it's needed for each article.
- After your search, if you click the "+" button, it adds the item to the basket.
- if you open the basket sidewindow menu, you can close it by clicking the X button on the top right. This will help you navigate easier.
---
### Step 1: Navigate to the Website
- Open [Migros Online](https://www.migros.ch/en).
- You should be logged in as Nikolaos Kaliorakis
---
### Step 2: Add Items to the Basket
#### Shopping List:
**Meat & Dairy:**
- Beef Minced meat (1 kg)
- Gruyère cheese (grated preferably)
- 2 liters full-fat milk
- Butter (cheapest available)
**Vegetables:**
- Carrots (1kg pack)
- Celery
- Leeks (1 piece)
- 1 kg potatoes
At this stage, check the basket on the top right (indicates the price) and check if you bought the right items.
**Fruits:**
- 2 lemons
- Oranges (for snacking)
**Pantry Items:**
- Lasagna sheets
- Tahini
- Tomato paste (below CHF2)
- Black pepper refill (not with the mill)
- 2x 1L Oatly Barista(oat milk)
- 1 pack of eggs (10 egg package)
#### Ingredients I already have (DO NOT purchase):
- Olive oil, garlic, canned tomatoes, dried oregano, bay leaves, salt, chili flakes, flour, nutmeg, cumin.
---
### Step 3: Handling Unavailable Items
- If an item is **out of stock**, find the best alternative.
- Use the following recipe contexts to choose substitutions:
- **Pasta Bolognese & Lasagna:** Minced meat, tomato paste, lasagna sheets, milk (for béchamel), Gruyère cheese.
- **Hummus:** Tahini, chickpeas, lemon juice, olive oil.
- **Chickpea Curry Soup:** Chickpeas, leeks, curry, lemons.
- **Crispy Slow-Cooked Pork Belly with Vegetables:** Potatoes, butter.
- Example substitutions:
- If Gruyère cheese is unavailable, select another semi-hard cheese.
- If Tahini is unavailable, a sesame-based alternative may work.
---
### Step 4: Adjusting for Minimum Order Requirement
- If the total order **is below CHF 99**, add **a liquid soap refill** to reach the minimum. If it;s still you can buy some bread, dark chockolate.
- At this step, check if you have bought MORE items than needed. If the price is more then CHF200, you MUST remove items.
- If an item is not available, choose an alternative.
- if an age verification is needed, remove alcoholic products, we haven't verified yet.
---
### Step 5: Select Delivery Window
- Choose a **delivery window within the current week**. It's ok to pay up to CHF2 for the window selection.
- Preferably select a slot within the workweek.
---
### Step 6: Checkout
- Proceed to checkout.
- Select **TWINT** as the payment method.
- Check out.
-
- if it's needed the username is: nikoskalio.dev@gmail.com
- and the password is : TheCircuit.Migros.dev!
---
### Step 7: Confirm Order & Output Summary
- Once the order is placed, output a summary including:
- **Final list of items purchased** (including any substitutions).
- **Total cost**.
- **Chosen delivery time**.
**Important:** Ensure efficiency and accuracy throughout the process."""
agent = Agent(task=task, llm=ChatOpenAI(model='gpt-4.1-mini'))
async def main():
await agent.run()
input('Press Enter to close the browser...')
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/use-cases/find_influencer_profiles.py | examples/use-cases/find_influencer_profiles.py | """
Show how to use custom outputs.
@dev You need to add OPENAI_API_KEY to your environment variables.
"""
import asyncio
import json
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
import httpx
from pydantic import BaseModel
from browser_use import Agent, ChatOpenAI, Tools
from browser_use.agent.views import ActionResult
class Profile(BaseModel):
platform: str
profile_url: str
class Profiles(BaseModel):
profiles: list[Profile]
tools = Tools(exclude_actions=['search'], output_model=Profiles)
BEARER_TOKEN = os.getenv('BEARER_TOKEN')
if not BEARER_TOKEN:
# use the api key for ask tessa
# you can also use other apis like exa, xAI, perplexity, etc.
raise ValueError('BEARER_TOKEN is not set - go to https://www.heytessa.ai/ and create an api key')
@tools.registry.action('Search the web for a specific query')
async def search_web(query: str):
keys_to_use = ['url', 'title', 'content', 'author', 'score']
headers = {'Authorization': f'Bearer {BEARER_TOKEN}'}
async with httpx.AsyncClient() as client:
response = await client.post(
'https://asktessa.ai/api/search',
headers=headers,
json={'query': query},
)
final_results = [
{key: source[key] for key in keys_to_use if key in source}
for source in await response.json()['sources']
if source['score'] >= 0.2
]
# print(json.dumps(final_results, indent=4))
result_text = json.dumps(final_results, indent=4)
print(result_text)
return ActionResult(extracted_content=result_text, include_in_memory=True)
async def main():
task = (
'Go to this tiktok video url, open it and extract the @username from the resulting url. Then do a websearch for this username to find all his social media profiles. Return me the links to the social media profiles with the platform name.'
' https://www.tiktokv.com/share/video/7470981717659110678/ '
)
model = ChatOpenAI(model='gpt-4.1-mini')
agent = Agent(task=task, llm=model, tools=tools)
history = await agent.run()
result = history.final_result()
if result:
parsed: Profiles = Profiles.model_validate_json(result)
for profile in parsed.profiles:
print('\n--------------------------------')
print(f'Platform: {profile.platform}')
print(f'Profile URL: {profile.profile_url}')
else:
print('No result')
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/use-cases/onepassword.py | examples/use-cases/onepassword.py | import os
from onepassword.client import Client
from browser_use import ActionResult, Agent, Browser, ChatOpenAI, Tools
from browser_use.browser.session import BrowserSession
"""
Use Case: Securely log into a website using credentials stored in 1Password vault.
- Use fill_field action to fill in username and password fields with values retrieved from 1Password. The LLM never sees the actual credentials.
- Use blur_page and unblur_page actions to visually obscure sensitive information on the page while filling in credentials for extra security.
**SETUP**
How to setup 1Password with Browser Use
- Get Individual Plan for 1Password
- Go to the Home page and click “New Vault”
- Add the credentials you need for any websites you want to log into
- Go to “Developer” tab, navigate to “Directory” and create a Service Account
- Give the service account access to the vault
- Copy the Service Account Token and set it as environment variable OP_SERVICE_ACCOUNT_TOKEN
- Install the onepassword package: pip install onepassword-sdk
Note: In this example, we assume that you created a vault named "prod-secrets" and added an item named "X" with fields "username" and "password".
"""
async def main():
# Gets your service account token from environment variable
token = os.getenv('OP_SERVICE_ACCOUNT_TOKEN')
# Authenticate with 1Password
op_client = await Client.authenticate(auth=token, integration_name='Browser Use Secure Login', integration_version='v1.0.0')
# Initialize tools
tools = Tools()
@tools.registry.action('Apply CSS blur filter to entire page content')
async def blur_page(browser_session: BrowserSession):
"""
Applies CSS blur filter directly to document.body to obscure all page content.
The blur will remain until unblur_page is called.
DOM remains accessible for element finding while page is visually blurred.
"""
try:
# Get CDP session
cdp_session = await browser_session.get_or_create_cdp_session()
# Apply blur filter to document.body
result = await cdp_session.cdp_client.send.Runtime.evaluate(
params={
'expression': """
(function() {
// Check if already blurred
if (document.body.getAttribute('data-page-blurred') === 'true') {
console.log('[BLUR] Page already blurred');
return true;
}
// Apply CSS blur filter to body
document.body.style.filter = 'blur(15px)';
document.body.style.webkitFilter = 'blur(15px)'; // Safari support
document.body.style.transition = 'filter 0.3s ease';
document.body.setAttribute('data-page-blurred', 'true');
console.log('[BLUR] Applied CSS blur to page');
return true;
})();
""",
'returnByValue': True,
},
session_id=cdp_session.session_id,
)
success = result.get('result', {}).get('value', False)
if success:
print('[BLUR] Applied CSS blur to page')
return ActionResult(extracted_content='Successfully applied CSS blur to page', include_in_memory=True)
else:
return ActionResult(error='Failed to apply blur', include_in_memory=True)
except Exception as e:
print(f'[BLUR ERROR] {e}')
return ActionResult(error=f'Failed to blur page: {str(e)}', include_in_memory=True)
@tools.registry.action('Remove CSS blur filter from page')
async def unblur_page(browser_session: BrowserSession):
"""
Removes the CSS blur filter from document.body, restoring normal page visibility.
"""
try:
# Get CDP session
cdp_session = await browser_session.get_or_create_cdp_session()
# Remove blur filter from body
result = await cdp_session.cdp_client.send.Runtime.evaluate(
params={
'expression': """
(function() {
if (document.body.getAttribute('data-page-blurred') !== 'true') {
console.log('[BLUR] Page not blurred');
return false;
}
// Remove CSS blur filter
document.body.style.filter = 'none';
document.body.style.webkitFilter = 'none';
document.body.removeAttribute('data-page-blurred');
console.log('[BLUR] Removed CSS blur from page');
return true;
})();
""",
'returnByValue': True,
},
session_id=cdp_session.session_id,
)
removed = result.get('result', {}).get('value', False)
if removed:
print('[BLUR] Removed CSS blur from page')
return ActionResult(extracted_content='Successfully removed CSS blur from page', include_in_memory=True)
else:
print('[BLUR] Page was not blurred')
return ActionResult(
extracted_content='Page was not blurred (may have already been removed)', include_in_memory=True
)
except Exception as e:
print(f'[BLUR ERROR] {e}')
return ActionResult(error=f'Failed to unblur page: {str(e)}', include_in_memory=True)
# LLM can call this action to use actors to fill in sensitive fields using 1Password values.
@tools.registry.action('Fill in a specific field for a website using value from 1Password vault')
async def fill_field(vault_name: str, item_name: str, field_name: str, browser_session: BrowserSession):
"""
Fills in a specific field for a website using the value from 1Password.
Note: Use blur_page before calling this if you want visual security.
"""
try:
# Resolve field value from 1Password
field_value = await op_client.secrets.resolve(f'op://{vault_name}/{item_name}/{field_name}')
# Get current page
page = await browser_session.must_get_current_page()
# Find and fill the element
target_field = await page.must_get_element_by_prompt(f'{field_name} input field', llm)
await target_field.fill(field_value)
return ActionResult(
extracted_content=f'Successfully filled {field_name} field for {vault_name}/{item_name}', include_in_memory=True
)
except Exception as e:
return ActionResult(error=f'Failed to fill {field_name} field: {str(e)}', include_in_memory=True)
browser_session = Browser()
llm = ChatOpenAI(model='o3')
agent = Agent(
task="""
Navigate to https://x.com/i/flow/login
Wait for the page to load.
Use fill_field action with vault_name='prod-secrets' and item_name='X' and field_name='username'.
Click the Next button.
Use fill_field action with vault_name='prod-secrets' and item_name='X' and field_name='password'.
Click the Log in button.
Give me the latest 5 tweets from the logged in user's timeline.
**IMPORTANT** Use blur_page action if you anticipate filling sensitive fields.
Only use unblur_page action after you see the logged in user's X timeline.
Your priority is to keep the username and password hidden while filling sensitive fields.
""",
browser_session=browser_session,
llm=llm,
tools=tools,
file_system_path='./agent_data',
)
await agent.run()
if __name__ == '__main__':
import asyncio
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/use-cases/apply_to_job.py | examples/use-cases/apply_to_job.py | import argparse
import asyncio
import json
import os
from dotenv import load_dotenv
from browser_use import Agent, Browser, ChatOpenAI, Tools
from browser_use.tools.views import UploadFileAction
load_dotenv()
async def apply_to_rochester_regional_health(info: dict, resume_path: str):
"""
json format:
{
"first_name": "John",
"last_name": "Doe",
"email": "john.doe@example.com",
"phone": "555-555-5555",
"age": "21",
"US_citizen": boolean,
"sponsorship_needed": boolean,
"resume": "Link to resume",
"postal_code": "12345",
"country": "USA",
"city": "Rochester",
"address": "123 Main St",
"gender": "Male",
"race": "Asian",
"Veteran_status": "Not a veteran",
"disability_status": "No disability"
}
"""
llm = ChatOpenAI(model='o3')
tools = Tools()
@tools.action(description='Upload resume file')
async def upload_resume(browser_session):
params = UploadFileAction(path=resume_path, index=0)
return 'Ready to upload resume'
browser = Browser(cross_origin_iframes=True)
task = f"""
- Your goal is to fill out and submit a job application form with the provided information.
- Navigate to https://apply.appcast.io/jobs/50590620606/applyboard/apply/
- Scroll through the entire application and use extract_structured_data action to extract all the relevant information needed to fill out the job application form. use this information and return a structured output that can be used to fill out the entire form: {info}. Use the done action to finish the task. Fill out the job application form with the following information.
- Before completing every step, refer to this information for accuracy. It is structured in a way to help you fill out the form and is the source of truth.
- Follow these instructions carefully:
- if anything pops up that blocks the form, close it out and continue filling out the form.
- Do not skip any fields, even if they are optional. If you do not have the information, make your best guess based on the information provided.
Fill out the form from top to bottom, never skip a field to come back to it later. When filling out a field, only focus on one field per step. For each of these steps, scroll to the related text. These are the steps:
1) use input_text action to fill out the following:
- "First name"
- "Last name"
- "Email"
- "Phone number"
2) use the upload_file_to_element action to fill out the following:
- Resume upload field
3) use input_text action to fill out the following:
- "Postal code"
- "Country"
- "State"
- "City"
- "Address"
- "Age"
4) use click action to select the following options:
- "Are you legally authorized to work in the country for which you are applying?"
- "Will you now or in the future require sponsorship for employment visa status (e.g., H-1B visa status, etc.) to work legally for Rochester Regional Health?"
- "Do you have, or are you in the process of obtaining, a professional license?"
- SELECT NO FOR THIS FIELD
5) use input_text action to fill out the following:
- "What drew you to healthcare?"
6) use click action to select the following options:
- "How many years of experience do you have in a related role?"
- "Gender"
- "Race"
- "Hispanic/Latino"
- "Veteran status"
- "Disability status"
7) use input_text action to fill out the following:
- "Today's date"
8) CLICK THE SUBMIT BUTTON AND CHECK FOR A SUCCESS SCREEN. Once there is a success screen, complete your end task of writing final_result and outputting it.
- Before you start, create a step-by-step plan to complete the entire task. Make sure to delegate a step for each field to be filled out.
*** IMPORTANT ***:
- You are not done until you have filled out every field of the form.
- When you have completed the entire form, press the submit button to submit the application and use the done action once you have confirmed that the application is submitted
- PLACE AN EMPHASIS ON STEP 4, the click action. That section should be filled out.
- At the end of the task, structure your final_result as 1) a human-readable summary of all detections and actions performed on the page with 2) a list with all questions encountered in the page. Do not say "see above." Include a fully written out, human-readable summary at the very end.
"""
available_file_paths = [resume_path]
agent = Agent(
task=task,
llm=llm,
browser=browser,
tools=tools,
available_file_paths=available_file_paths,
)
history = await agent.run()
return history.final_result()
async def main(test_data_path: str, resume_path: str):
# Verify files exist
if not os.path.exists(test_data_path):
raise FileNotFoundError(f'Test data file not found at: {test_data_path}')
if not os.path.exists(resume_path):
raise FileNotFoundError(f'Resume file not found at: {resume_path}')
with open(test_data_path) as f: # noqa: ASYNC230
mock_info = json.load(f)
results = await apply_to_rochester_regional_health(mock_info, resume_path=resume_path)
print('Search Results:', results)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Apply to Rochester Regional Health job')
parser.add_argument('--test-data', required=True, help='Path to test data JSON file')
parser.add_argument('--resume', required=True, help='Path to resume PDF file')
args = parser.parse_args()
asyncio.run(main(args.test_data, args.resume))
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/use-cases/buy_groceries.py | examples/use-cases/buy_groceries.py | import asyncio
from pydantic import BaseModel, Field
from browser_use import Agent, Browser, ChatBrowserUse
class GroceryItem(BaseModel):
"""A single grocery item"""
name: str = Field(..., description='Item name')
price: float = Field(..., description='Price as number')
brand: str | None = Field(None, description='Brand name')
size: str | None = Field(None, description='Size or quantity')
url: str = Field(..., description='Full URL to item')
class GroceryCart(BaseModel):
"""Grocery cart results"""
items: list[GroceryItem] = Field(default_factory=list, description='All grocery items found')
async def add_to_cart(items: list[str] = ['milk', 'eggs', 'bread']):
browser = Browser(cdp_url='http://localhost:9222')
llm = ChatBrowserUse()
# Task prompt
task = f"""
Search for "{items}" on Instacart at the nearest store.
You will buy all of the items at the same store.
For each item:
1. Search for the item
2. Find the best match (closest name, lowest price)
3. Add the item to the cart
Site:
- Instacart: https://www.instacart.com/
"""
# Create agent with structured output
agent = Agent(
browser=browser,
llm=llm,
task=task,
output_model_schema=GroceryCart,
)
# Run the agent
result = await agent.run()
return result
if __name__ == '__main__':
# Get user input
items_input = input('What items would you like to add to cart (comma-separated)? ').strip()
if not items_input:
items = ['milk', 'eggs', 'bread']
print(f'Using default items: {items}')
else:
items = [item.strip() for item in items_input.split(',')]
result = asyncio.run(add_to_cart(items))
# Access structured output
if result and result.structured_output:
cart = result.structured_output
print(f'\n{"=" * 60}')
print('Items Added to Cart')
print(f'{"=" * 60}\n')
for item in cart.items:
print(f'Name: {item.name}')
print(f'Price: ${item.price}')
if item.brand:
print(f'Brand: {item.brand}')
if item.size:
print(f'Size: {item.size}')
print(f'URL: {item.url}')
print(f'{"-" * 60}')
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/use-cases/extract_pdf_content.py | examples/use-cases/extract_pdf_content.py | #!/usr/bin/env -S uv run --script
# /// script
# requires-python = ">=3.11"
# dependencies = ["browser-use", "mistralai"]
# ///
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
import asyncio
import logging
from browser_use import Agent, ChatOpenAI
logger = logging.getLogger(__name__)
async def main():
agent = Agent(
task="""
Objective: Navigate to the following UR, what is on page 3?
URL: https://docs.house.gov/meetings/GO/GO00/20220929/115171/HHRG-117-GO00-20220929-SD010.pdf
""",
llm=ChatOpenAI(model='gpt-4.1-mini'),
)
result = await agent.run()
logger.info(result)
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/models/skills.py | examples/models/skills.py | import asyncio
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
from browser_use import Agent
load_dotenv()
async def run_search():
agent = Agent(
# llm=llm,
task='How many stars does the browser-use repo have?',
flash_mode=True,
skills=['502af156-2a75-4b4e-816d-b2dc138b6647'], # skill for fetching the number of stars of any Github repository
)
await agent.run()
if __name__ == '__main__':
asyncio.run(run_search())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/models/gpt-5-mini.py | examples/models/gpt-5-mini.py | """
Simple try of the agent.
@dev You need to add OPENAI_API_KEY to your environment variables.
"""
import asyncio
from dotenv import load_dotenv
from browser_use import Agent, ChatOpenAI
load_dotenv()
# All the models are type safe from OpenAI in case you need a list of supported models
llm = ChatOpenAI(model='gpt-5-mini')
agent = Agent(
llm=llm,
task='Find out which one is cooler: the monkey park or a dolphin tour in Tenerife?',
)
async def main():
await agent.run(max_steps=20)
input('Press Enter to continue...')
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/models/ollama.py | examples/models/ollama.py | # 1. Install Ollama: https://github.com/ollama/ollama
# 2. Run `ollama serve` to start the server
# 3. In a new terminal, install the model you want to use: `ollama pull llama3.1:8b` (this has 4.9GB)
from browser_use import Agent, ChatOllama
llm = ChatOllama(model='llama3.1:8b')
Agent('find the founders of browser-use', llm=llm).run_sync()
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/models/openrouter.py | examples/models/openrouter.py | """
Simple try of the agent.
@dev You need to add OPENAI_API_KEY to your environment variables.
"""
import asyncio
import os
from dotenv import load_dotenv
from browser_use import Agent, ChatOpenAI
load_dotenv()
# All the models are type safe from OpenAI in case you need a list of supported models
llm = ChatOpenAI(
# model='x-ai/grok-4',
model='deepcogito/cogito-v2.1-671b',
base_url='https://openrouter.ai/api/v1',
api_key=os.getenv('OPENROUTER_API_KEY'),
)
agent = Agent(
task='Find the number of stars of the browser-use repo',
llm=llm,
use_vision=False,
)
async def main():
await agent.run(max_steps=10)
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/models/aws.py | examples/models/aws.py | """
AWS Bedrock Examples
This file demonstrates how to use AWS Bedrock models with browser-use.
We provide two classes:
1. ChatAnthropicBedrock - Convenience class for Anthropic Claude models
2. ChatAWSBedrock - General AWS Bedrock client supporting all providers
Requirements:
- AWS credentials configured via environment variables
- boto3 installed: pip install boto3
- Access to AWS Bedrock models in your region
"""
import asyncio
from browser_use import Agent
from browser_use.llm import ChatAnthropicBedrock, ChatAWSBedrock
async def example_anthropic_bedrock():
"""Example using ChatAnthropicBedrock - convenience class for Claude models."""
print('🔹 ChatAnthropicBedrock Example')
# Initialize with Anthropic Claude via AWS Bedrock
llm = ChatAnthropicBedrock(
model='us.anthropic.claude-sonnet-4-20250514-v1:0',
aws_region='us-east-1',
temperature=0.7,
)
print(f'Model: {llm.name}')
print(f'Provider: {llm.provider}')
# Create agent
agent = Agent(
task="Navigate to google.com and search for 'AWS Bedrock pricing'",
llm=llm,
)
print("Task: Navigate to google.com and search for 'AWS Bedrock pricing'")
# Run the agent
result = await agent.run(max_steps=2)
print(f'Result: {result}')
async def example_aws_bedrock():
"""Example using ChatAWSBedrock - general client for any Bedrock model."""
print('\n🔹 ChatAWSBedrock Example')
# Initialize with any AWS Bedrock model (using Meta Llama as example)
llm = ChatAWSBedrock(
model='us.meta.llama4-maverick-17b-instruct-v1:0',
aws_region='us-east-1',
temperature=0.5,
)
print(f'Model: {llm.name}')
print(f'Provider: {llm.provider}')
# Create agent
agent = Agent(
task='Go to github.com and find the most popular Python repository',
llm=llm,
)
print('Task: Go to github.com and find the most popular Python repository')
# Run the agent
result = await agent.run(max_steps=2)
print(f'Result: {result}')
async def main():
"""Run AWS Bedrock examples."""
print('🚀 AWS Bedrock Examples')
print('=' * 40)
print('Make sure you have AWS credentials configured:')
print('export AWS_ACCESS_KEY_ID=your_key')
print('export AWS_SECRET_ACCESS_KEY=your_secret')
print('export AWS_DEFAULT_REGION=us-east-1')
print('=' * 40)
try:
# Run both examples
await example_aws_bedrock()
await example_anthropic_bedrock()
except Exception as e:
print(f'❌ Error: {e}')
print('Make sure you have:')
print('- Valid AWS credentials configured')
print('- Access to AWS Bedrock in your region')
print('- boto3 installed: pip install boto3')
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/models/qwen.py | examples/models/qwen.py | import os
from dotenv import load_dotenv
from browser_use import Agent, ChatOpenAI
load_dotenv()
import asyncio
# get an api key from https://modelstudio.console.alibabacloud.com/?tab=playground#/api-key
api_key = os.getenv('ALIBABA_CLOUD')
base_url = 'https://dashscope-intl.aliyuncs.com/compatible-mode/v1'
# so far we only had success with qwen-vl-max
# other models, even qwen-max, do not return the right output format. They confuse the action schema.
# E.g. they return actions: [{"navigate": "google.com"}] instead of [{"navigate": {"url": "google.com"}}]
# If you want to use smaller models and you see they mix up the action schema, add concrete examples to your prompt of the right format.
llm = ChatOpenAI(model='qwen-vl-max', api_key=api_key, base_url=base_url)
async def main():
agent = Agent(task='go find the founders of browser-use', llm=llm, use_vision=True, max_actions_per_step=1)
await agent.run()
if '__main__' == __name__:
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/models/bu_oss.py | examples/models/bu_oss.py | """
Setup:
1. Get your API key from https://cloud.browser-use.com/new-api-key
2. Set environment variable: export BROWSER_USE_API_KEY="your-key"
"""
from dotenv import load_dotenv
from browser_use import Agent, ChatBrowserUse
load_dotenv()
try:
from lmnr import Laminar
Laminar.initialize()
except ImportError:
pass
# Point to local llm-use server for testing
llm = ChatBrowserUse(
model='browser-use/bu-30b-a3b-preview', # BU Open Source Model!!
)
agent = Agent(
task='Find the number of stars of browser-use and stagehand. Tell me which one has more stars :)',
llm=llm,
flash_mode=True,
)
agent.run_sync()
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/models/llama4-groq.py | examples/models/llama4-groq.py | import asyncio
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent
from browser_use.llm import ChatGroq
groq_api_key = os.environ.get('GROQ_API_KEY')
llm = ChatGroq(
model='meta-llama/llama-4-maverick-17b-128e-instruct',
# temperature=0.1,
)
# llm = ChatGroq(
# model='meta-llama/llama-4-maverick-17b-128e-instruct',
# api_key=os.environ.get('GROQ_API_KEY'),
# temperature=0.0,
# )
task = 'Go to amazon.com, search for laptop, sort by best rating, and give me the price of the first result'
async def main():
agent = Agent(
task=task,
llm=llm,
)
await agent.run()
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/models/cerebras_example.py | examples/models/cerebras_example.py | """
Example of using Cerebras with browser-use.
To use this example:
1. Set your CEREBRAS_API_KEY environment variable
2. Run this script
Cerebras integration is working great for:
- Direct text generation
- Simple tasks without complex structured output
- Fast inference for web automation
Available Cerebras models (9 total):
Small/Fast models (8B-32B):
- cerebras_llama3_1_8b (8B parameters, fast)
- cerebras_llama_4_scout_17b_16e_instruct (17B, instruction-tuned)
- cerebras_llama_4_maverick_17b_128e_instruct (17B, extended context)
- cerebras_qwen_3_32b (32B parameters)
Large/Capable models (70B-480B):
- cerebras_llama3_3_70b (70B parameters, latest version)
- cerebras_gpt_oss_120b (120B parameters, OpenAI's model)
- cerebras_qwen_3_235b_a22b_instruct_2507 (235B, instruction-tuned)
- cerebras_qwen_3_235b_a22b_thinking_2507 (235B, complex reasoning)
- cerebras_qwen_3_coder_480b (480B, code generation)
Note: Cerebras has some limitations with complex structured output due to JSON schema compatibility.
"""
import asyncio
import os
from browser_use import Agent
async def main():
# Set your API key (recommended to use environment variable)
api_key = os.getenv('CEREBRAS_API_KEY')
if not api_key:
raise ValueError('Please set CEREBRAS_API_KEY environment variable')
# Option 1: Use the pre-configured model instance (recommended)
from browser_use import llm
# Choose your model:
# Small/Fast models:
# model = llm.cerebras_llama3_1_8b # 8B, fast
# model = llm.cerebras_llama_4_scout_17b_16e_instruct # 17B, instruction-tuned
# model = llm.cerebras_llama_4_maverick_17b_128e_instruct # 17B, extended context
# model = llm.cerebras_qwen_3_32b # 32B
# Large/Capable models:
# model = llm.cerebras_llama3_3_70b # 70B, latest
# model = llm.cerebras_gpt_oss_120b # 120B, OpenAI's model
# model = llm.cerebras_qwen_3_235b_a22b_instruct_2507 # 235B, instruction-tuned
model = llm.cerebras_qwen_3_235b_a22b_thinking_2507 # 235B, complex reasoning
# model = llm.cerebras_qwen_3_coder_480b # 480B, code generation
# Option 2: Create the model instance directly
# model = ChatCerebras(
# model="qwen-3-coder-480b", # or any other model ID
# api_key=os.getenv("CEREBRAS_API_KEY"),
# temperature=0.2,
# max_tokens=4096,
# )
# Create and run the agent with a simple task
task = 'Explain the concept of quantum entanglement in simple terms.'
agent = Agent(task=task, llm=model)
print(f'Running task with Cerebras {model.name} (ID: {model.model}): {task}')
history = await agent.run(max_steps=3)
result = history.final_result()
print(f'Result: {result}')
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/models/gpt-4.1.py | examples/models/gpt-4.1.py | """
Simple try of the agent.
@dev You need to add OPENAI_API_KEY to your environment variables.
"""
import asyncio
from dotenv import load_dotenv
from browser_use import Agent, ChatOpenAI
load_dotenv()
# All the models are type safe from OpenAI in case you need a list of supported models
llm = ChatOpenAI(model='gpt-4.1-mini')
agent = Agent(
task='Go to amazon.com, click on the first link, and give me the title of the page',
llm=llm,
)
async def main():
await agent.run(max_steps=10)
input('Press Enter to continue...')
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/models/moonshot.py | examples/models/moonshot.py | import asyncio
import os
from dotenv import load_dotenv
from browser_use import Agent, ChatOpenAI
load_dotenv()
# Get API key from environment variable
api_key = os.getenv('MOONSHOT_API_KEY')
if api_key is None:
print('Make sure you have MOONSHOT_API_KEY set in your .env file')
print('Get your API key from https://platform.moonshot.ai/console/api-keys ')
exit(1)
# Configure Moonshot AI model
llm = ChatOpenAI(
model='kimi-k2-thinking',
base_url='https://api.moonshot.ai/v1',
api_key=api_key,
add_schema_to_system_prompt=True,
remove_min_items_from_schema=True, # Moonshot doesn't support minItems in JSON schema
remove_defaults_from_schema=True, # Moonshot doesn't allow default values with anyOf
)
async def main():
agent = Agent(
task='Search for the latest news about AI and summarize the top 3 articles',
llm=llm,
flash_mode=True,
)
await agent.run()
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/models/claude-4-sonnet.py | examples/models/claude-4-sonnet.py | """
Simple script that runs the task of opening amazon and searching.
@dev Ensure we have a `ANTHROPIC_API_KEY` variable in our `.env` file.
"""
import asyncio
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent
from browser_use.llm import ChatAnthropic
llm = ChatAnthropic(model='claude-sonnet-4-0', temperature=0.0)
agent = Agent(
task='Go to amazon.com, search for laptop, sort by best rating, and give me the price of the first result',
llm=llm,
)
async def main():
await agent.run(max_steps=10)
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/models/vercel_ai_gateway.py | examples/models/vercel_ai_gateway.py | """
Example using Vercel AI Gateway with browser-use.
Vercel AI Gateway provides an OpenAI-compatible API endpoint that can proxy
requests to various AI providers. This allows you to use Vercel's infrastructure
for rate limiting, caching, and monitoring.
Prerequisites:
1. Set VERCEL_API_KEY in your environment variables
To see all available models, visit: https://ai-gateway.vercel.sh/v1/models
"""
import asyncio
import os
from dotenv import load_dotenv
from browser_use import Agent, ChatVercel
load_dotenv()
api_key = os.getenv('VERCEL_API_KEY')
if not api_key:
raise ValueError('VERCEL_API_KEY is not set')
# Basic usage
llm = ChatVercel(
model='openai/gpt-4o',
api_key=api_key,
)
# Example with provider options - control which providers are used and in what order
# This will try Vertex AI first, then fall back to Anthropic if Vertex fails
llm_with_provider_options = ChatVercel(
model='anthropic/claude-sonnet-4',
api_key=api_key,
provider_options={
'gateway': {
'order': ['vertex', 'anthropic'] # Try Vertex AI first, then Anthropic
}
},
)
agent = Agent(
task='Go to example.com and summarize the main content',
llm=llm,
)
agent_with_provider_options = Agent(
task='Go to example.com and summarize the main content',
llm=llm_with_provider_options,
)
async def main():
await agent.run(max_steps=10)
await agent_with_provider_options.run(max_steps=10)
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/models/oci_models.py | examples/models/oci_models.py | """
Oracle Cloud Infrastructure (OCI) Raw API Example
This example demonstrates how to use OCI's Generative AI service with browser-use
using the raw API integration (ChatOCIRaw) without Langchain dependencies.
@dev You need to:
1. Set up OCI configuration file at ~/.oci/config
2. Have access to OCI Generative AI models in your tenancy
3. Install the OCI Python SDK: uv add oci
Requirements:
- OCI account with Generative AI service access
- Proper OCI configuration and authentication
- Model deployment in your OCI compartment
"""
import asyncio
import os
import sys
from pydantic import BaseModel
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from browser_use import Agent
from browser_use.llm import ChatOCIRaw
class SearchSummary(BaseModel):
query: str
results_found: int
top_result_title: str
summary: str
relevance_score: float
# Configuration examples for different providers
compartment_id = 'ocid1.tenancy.oc1..aaaaaaaayeiis5uk2nuubznrekd6xsm56k3m4i7tyvkxmr2ftojqfkpx2ura'
endpoint = 'https://inference.generativeai.us-chicago-1.oci.oraclecloud.com'
# Example 1: Meta Llama model (uses GenericChatRequest)
meta_model_id = 'ocid1.generativeaimodel.oc1.us-chicago-1.amaaaaaask7dceyarojgfh6msa452vziycwfymle5gxdvpwwxzara53topmq'
meta_llm = ChatOCIRaw(
model_id=meta_model_id,
service_endpoint=endpoint,
compartment_id=compartment_id,
provider='meta', # Meta Llama model
temperature=0.7,
max_tokens=800,
frequency_penalty=0.0,
presence_penalty=0.0,
top_p=0.9,
auth_type='API_KEY',
auth_profile='DEFAULT',
)
cohere_model_id = 'ocid1.generativeaimodel.oc1.us-chicago-1.amaaaaaask7dceyanrlpnq5ybfu5hnzarg7jomak3q6kyhkzjsl4qj24fyoq'
# Example 2: Cohere model (uses CohereChatRequest)
# cohere_model_id = "ocid1.generativeaimodel.oc1.us-chicago-1.amaaaaaask7dceyapnibwg42qjhwaxrlqfpreueirtwghiwvv2whsnwmnlva"
cohere_llm = ChatOCIRaw(
model_id=cohere_model_id,
service_endpoint=endpoint,
compartment_id=compartment_id,
provider='cohere', # Cohere model
temperature=1.0,
max_tokens=600,
frequency_penalty=0.0,
top_p=0.75,
top_k=0, # Cohere-specific parameter
auth_type='API_KEY',
auth_profile='DEFAULT',
)
# Example 3: xAI model (uses GenericChatRequest)
xai_model_id = 'ocid1.generativeaimodel.oc1.us-chicago-1.amaaaaaask7dceya3bsfz4ogiuv3yc7gcnlry7gi3zzx6tnikg6jltqszm2q'
xai_llm = ChatOCIRaw(
model_id=xai_model_id,
service_endpoint=endpoint,
compartment_id=compartment_id,
provider='xai', # xAI model
temperature=1.0,
max_tokens=20000,
top_p=1.0,
top_k=0,
auth_type='API_KEY',
auth_profile='DEFAULT',
)
# Use Meta model by default for this example
llm = xai_llm
async def basic_example():
"""Basic example using ChatOCIRaw with a simple task."""
print('🔹 Basic ChatOCIRaw Example')
print('=' * 40)
print(f'Model: {llm.name}')
print(f'Provider: {llm.provider_name}')
# Create agent with a simple task
agent = Agent(
task="Go to google.com and search for 'Oracle Cloud Infrastructure pricing'",
llm=llm,
)
print("Task: Go to google.com and search for 'Oracle Cloud Infrastructure pricing'")
# Run the agent
try:
result = await agent.run(max_steps=5)
print('✅ Task completed successfully!')
print(f'Final result: {result}')
except Exception as e:
print(f'❌ Error: {e}')
async def structured_output_example():
"""Example demonstrating structured output with Pydantic models."""
print('\n🔹 Structured Output Example')
print('=' * 40)
# Create agent that will return structured data
agent = Agent(
task="""Go to github.com, search for 'browser automation python',
find the most popular repository, and return structured information about it""",
llm=llm,
output_format=SearchSummary, # This will enforce structured output
)
print('Task: Search GitHub for browser automation and return structured data')
try:
result = await agent.run(max_steps=5)
if isinstance(result, SearchSummary):
print('✅ Structured output received!')
print(f'Query: {result.query}')
print(f'Results Found: {result.results_found}')
print(f'Top Result: {result.top_result_title}')
print(f'Summary: {result.summary}')
print(f'Relevance Score: {result.relevance_score}')
else:
print(f'Result: {result}')
except Exception as e:
print(f'❌ Error: {e}')
async def advanced_configuration_example():
"""Example showing advanced configuration options."""
print('\n🔹 Advanced Configuration Example')
print('=' * 40)
print(f'Model: {llm.name}')
print(f'Provider: {llm.provider_name}')
print('Configuration: Cohere model with instance principal auth')
# Create agent with a more complex task
agent = Agent(
task="""Navigate to stackoverflow.com, search for questions about 'python web scraping' and tap search help,
analyze the top 3 questions, and provide a detailed summary of common challenges""",
llm=llm,
)
print('Task: Analyze StackOverflow questions about Python web scraping')
try:
result = await agent.run(max_steps=8)
print('✅ Advanced task completed!')
print(f'Analysis result: {result}')
except Exception as e:
print(f'❌ Error: {e}')
async def provider_compatibility_test():
"""Test different provider formats to verify compatibility."""
print('\n🔹 Provider Compatibility Test')
print('=' * 40)
providers_to_test = [('Meta', meta_llm), ('Cohere', cohere_llm), ('xAI', xai_llm)]
for provider_name, model in providers_to_test:
print(f'\nTesting {provider_name} model...')
print(f'Model ID: {model.model_id}')
print(f'Provider: {model.provider}')
print(f'Uses Cohere format: {model._uses_cohere_format()}')
# Create a simple agent to test the model
agent = Agent(
task='Go to google.com and tell me what you see',
llm=model,
)
try:
result = await agent.run(max_steps=3)
print(f'✅ {provider_name} model works correctly!')
print(f'Result: {str(result)[:100]}...')
except Exception as e:
print(f'❌ {provider_name} model failed: {e}')
async def main():
"""Run all OCI Raw examples."""
print('🚀 Oracle Cloud Infrastructure (OCI) Raw API Examples')
print('=' * 60)
print('\n📋 Prerequisites:')
print('1. OCI account with Generative AI service access')
print('2. OCI configuration file at ~/.oci/config')
print('3. Model deployed in your OCI compartment')
print('4. Proper IAM permissions for Generative AI')
print('5. OCI Python SDK installed: uv add oci')
print('=' * 60)
print('\n⚙️ Configuration Notes:')
print('• Update model_id, service_endpoint, and compartment_id with your values')
print('• Supported providers: "meta", "cohere", "xai"')
print('• Auth types: "API_KEY", "INSTANCE_PRINCIPAL", "RESOURCE_PRINCIPAL"')
print('• Default OCI config profile: "DEFAULT"')
print('=' * 60)
print('\n🔧 Provider-Specific API Formats:')
print('• Meta/xAI models: Use GenericChatRequest with messages array')
print('• Cohere models: Use CohereChatRequest with single message string')
print('• The integration automatically detects and uses the correct format')
print('=' * 60)
try:
# Run all examples
await basic_example()
await structured_output_example()
await advanced_configuration_example()
# await provider_compatibility_test()
print('\n🎉 All examples completed successfully!')
except Exception as e:
print(f'\n❌ Example failed: {e}')
print('\n🔧 Troubleshooting:')
print('• Verify OCI configuration: oci setup config')
print('• Check model OCID and availability')
print('• Ensure compartment access and IAM permissions')
print('• Verify service endpoint URL')
print('• Check OCI Python SDK installation')
print("• Ensure you're using the correct provider name in ChatOCIRaw")
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/models/gemini-3.py | examples/models/gemini-3.py | import asyncio
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
from browser_use import Agent, ChatGoogle
load_dotenv()
api_key = os.getenv('GOOGLE_API_KEY')
if not api_key:
raise ValueError('GOOGLE_API_KEY is not set')
async def run_search():
llm = ChatGoogle(model='gemini-3-pro-preview', api_key=api_key)
agent = Agent(
llm=llm,
task='How many stars does the browser-use repo have?',
flash_mode=True,
)
await agent.run()
if __name__ == '__main__':
asyncio.run(run_search())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/models/browser_use_llm.py | examples/models/browser_use_llm.py | """
Example of the fastest + smartest LLM for browser automation.
Setup:
1. Get your API key from https://cloud.browser-use.com/new-api-key
2. Set environment variable: export BROWSER_USE_API_KEY="your-key"
"""
import asyncio
import os
from dotenv import load_dotenv
from browser_use import Agent, ChatBrowserUse
load_dotenv()
if not os.getenv('BROWSER_USE_API_KEY'):
raise ValueError('BROWSER_USE_API_KEY is not set')
async def main():
agent = Agent(
task='Find the number of stars of the browser-use repo',
llm=ChatBrowserUse(),
)
# Run the agent
await agent.run()
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/models/novita.py | examples/models/novita.py | """
Simple try of the agent.
@dev You need to add NOVITA_API_KEY to your environment variables.
"""
import asyncio
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent, ChatOpenAI
api_key = os.getenv('NOVITA_API_KEY', '')
if not api_key:
raise ValueError('NOVITA_API_KEY is not set')
async def run_search():
agent = Agent(
task=(
'1. Go to https://www.reddit.com/r/LocalLLaMA '
"2. Search for 'browser use' in the search bar"
'3. Click on first result'
'4. Return the first comment'
),
llm=ChatOpenAI(
base_url='https://api.novita.ai/v3/openai',
model='deepseek/deepseek-v3-0324',
api_key=api_key,
),
use_vision=False,
)
await agent.run()
if __name__ == '__main__':
asyncio.run(run_search())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/models/azure_openai.py | examples/models/azure_openai.py | """
Simple try of the agent with Azure OpenAI.
@dev You need to add AZURE_OPENAI_KEY and AZURE_OPENAI_ENDPOINT to your environment variables.
For GPT-5.1 Codex models (gpt-5.1-codex-mini, etc.), use:
llm = ChatAzureOpenAI(
model='gpt-5.1-codex-mini',
api_version='2025-03-01-preview', # Required for Responses API
# use_responses_api='auto', # Default: auto-detects based on model
)
The Responses API is automatically used for models that require it.
"""
import asyncio
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent
from browser_use.llm import ChatAzureOpenAI
# Make sure your deployment exists, double check the region and model name
api_key = os.getenv('AZURE_OPENAI_KEY')
azure_endpoint = os.getenv('AZURE_OPENAI_ENDPOINT')
llm = ChatAzureOpenAI(
model='gpt-5.1-codex-mini', api_key=api_key, azure_endpoint=azure_endpoint, api_version='2025-03-01-preview'
)
TASK = """
Go to google.com/travel/flights and find the cheapest flight from New York to Paris on next Sunday
"""
agent = Agent(
task=TASK,
llm=llm,
)
async def main():
await agent.run(max_steps=25)
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/models/mistral.py | examples/models/mistral.py | """
Simple agent run with Mistral.
You need to set MISTRAL_API_KEY in your environment (and optionally MISTRAL_BASE_URL).
"""
import asyncio
from dotenv import load_dotenv
from browser_use import Agent
from browser_use.llm.mistral import ChatMistral
load_dotenv()
llm = ChatMistral(model='mistral-small-2506', temperature=0.6)
agent = Agent(
llm=llm,
task='List two fun weekend activities in Barcelona.',
)
async def main():
await agent.run(max_steps=10)
input('Press Enter to continue...')
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/models/lazy_import.py | examples/models/lazy_import.py | from browser_use import Agent, models
# available providers for this import style: openai, azure, google
agent = Agent(task='Find founders of browser-use', llm=models.azure_gpt_4_1_mini)
agent.run_sync()
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/models/deepseek-chat.py | examples/models/deepseek-chat.py | import asyncio
import os
from browser_use import Agent
from browser_use.llm import ChatDeepSeek
# Add your custom instructions
extend_system_message = """
Remember the most important rules:
1. When performing a search task, open https://www.google.com/ first for search.
2. Final output.
"""
deepseek_api_key = os.getenv('DEEPSEEK_API_KEY')
if deepseek_api_key is None:
print('Make sure you have DEEPSEEK_API_KEY:')
print('export DEEPSEEK_API_KEY=your_key')
exit(0)
async def main():
llm = ChatDeepSeek(
base_url='https://api.deepseek.com/v1',
model='deepseek-chat',
api_key=deepseek_api_key,
)
agent = Agent(
task='What should we pay attention to in the recent new rules on tariffs in China-US trade?',
llm=llm,
use_vision=False,
extend_system_message=extend_system_message,
)
await agent.run()
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/models/modelscope_example.py | examples/models/modelscope_example.py | """
Simple try of the agent.
@dev You need to add MODELSCOPE_API_KEY to your environment variables.
"""
import asyncio
import os
from dotenv import load_dotenv
from browser_use import Agent, ChatOpenAI
# dotenv
load_dotenv()
api_key = os.getenv('MODELSCOPE_API_KEY', '')
if not api_key:
raise ValueError('MODELSCOPE_API_KEY is not set')
async def run_search():
agent = Agent(
# task=('go to amazon.com, search for laptop'),
task=('go to google, search for modelscope'),
llm=ChatOpenAI(base_url='https://api-inference.modelscope.cn/v1/', model='Qwen/Qwen2.5-VL-72B-Instruct', api_key=api_key),
use_vision=False,
)
await agent.run()
if __name__ == '__main__':
asyncio.run(run_search())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/models/gemini.py | examples/models/gemini.py | import asyncio
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
from browser_use import Agent, ChatGoogle
load_dotenv()
api_key = os.getenv('GOOGLE_API_KEY')
if not api_key:
raise ValueError('GOOGLE_API_KEY is not set')
async def run_search():
llm = ChatGoogle(model='gemini-flash-latest', api_key=api_key)
agent = Agent(
llm=llm,
task='How many stars does the browser-use repo have?',
flash_mode=True,
)
await agent.run()
if __name__ == '__main__':
asyncio.run(run_search())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/models/langchain/serializer.py | examples/models/langchain/serializer.py | import json
from typing import overload
from langchain_core.messages import ( # pyright: ignore
AIMessage,
HumanMessage,
SystemMessage,
)
from langchain_core.messages import ( # pyright: ignore
ToolCall as LangChainToolCall,
)
from langchain_core.messages.base import BaseMessage as LangChainBaseMessage # pyright: ignore
from browser_use.llm.messages import (
AssistantMessage,
BaseMessage,
ContentPartImageParam,
ContentPartRefusalParam,
ContentPartTextParam,
ToolCall,
UserMessage,
)
from browser_use.llm.messages import (
SystemMessage as BrowserUseSystemMessage,
)
class LangChainMessageSerializer:
"""Serializer for converting between browser-use message types and LangChain message types."""
@staticmethod
def _serialize_user_content(
content: str | list[ContentPartTextParam | ContentPartImageParam],
) -> str | list[str | dict]:
"""Convert user message content for LangChain compatibility."""
if isinstance(content, str):
return content
serialized_parts = []
for part in content:
if part.type == 'text':
serialized_parts.append(
{
'type': 'text',
'text': part.text,
}
)
elif part.type == 'image_url':
# LangChain format for images
serialized_parts.append(
{'type': 'image_url', 'image_url': {'url': part.image_url.url, 'detail': part.image_url.detail}}
)
return serialized_parts
@staticmethod
def _serialize_system_content(
content: str | list[ContentPartTextParam],
) -> str:
"""Convert system message content to text string for LangChain compatibility."""
if isinstance(content, str):
return content
text_parts = []
for part in content:
if part.type == 'text':
text_parts.append(part.text)
return '\n'.join(text_parts)
@staticmethod
def _serialize_assistant_content(
content: str | list[ContentPartTextParam | ContentPartRefusalParam] | None,
) -> str:
"""Convert assistant message content to text string for LangChain compatibility."""
if content is None:
return ''
if isinstance(content, str):
return content
text_parts = []
for part in content:
if part.type == 'text':
text_parts.append(part.text)
# elif part.type == 'refusal':
# # Include refusal content as text
# text_parts.append(f'[Refusal: {part.refusal}]')
return '\n'.join(text_parts)
@staticmethod
def _serialize_tool_call(tool_call: ToolCall) -> LangChainToolCall:
"""Convert browser-use ToolCall to LangChain ToolCall."""
# Parse the arguments string to a dict for LangChain
try:
args_dict = json.loads(tool_call.function.arguments)
except json.JSONDecodeError:
# If parsing fails, wrap in a dict
args_dict = {'arguments': tool_call.function.arguments}
return LangChainToolCall(
name=tool_call.function.name,
args=args_dict,
id=tool_call.id,
)
# region - Serialize overloads
@overload
@staticmethod
def serialize(message: UserMessage) -> HumanMessage: ...
@overload
@staticmethod
def serialize(message: BrowserUseSystemMessage) -> SystemMessage: ...
@overload
@staticmethod
def serialize(message: AssistantMessage) -> AIMessage: ...
@staticmethod
def serialize(message: BaseMessage) -> LangChainBaseMessage:
"""Serialize a browser-use message to a LangChain message."""
if isinstance(message, UserMessage):
content = LangChainMessageSerializer._serialize_user_content(message.content)
return HumanMessage(content=content, name=message.name)
elif isinstance(message, BrowserUseSystemMessage):
content = LangChainMessageSerializer._serialize_system_content(message.content)
return SystemMessage(content=content, name=message.name)
elif isinstance(message, AssistantMessage):
# Handle content
content = LangChainMessageSerializer._serialize_assistant_content(message.content)
# For simplicity, we'll ignore tool calls in LangChain integration
# as requested by the user
return AIMessage(
content=content,
name=message.name,
)
else:
raise ValueError(f'Unknown message type: {type(message)}')
@staticmethod
def serialize_messages(messages: list[BaseMessage]) -> list[LangChainBaseMessage]:
"""Serialize a list of browser-use messages to LangChain messages."""
return [LangChainMessageSerializer.serialize(m) for m in messages]
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/models/langchain/chat.py | examples/models/langchain/chat.py | from dataclasses import dataclass
from typing import TYPE_CHECKING, TypeVar, overload
from pydantic import BaseModel
from browser_use.llm.base import BaseChatModel
from browser_use.llm.exceptions import ModelProviderError
from browser_use.llm.messages import BaseMessage
from browser_use.llm.views import ChatInvokeCompletion, ChatInvokeUsage
from examples.models.langchain.serializer import LangChainMessageSerializer
if TYPE_CHECKING:
from langchain_core.language_models.chat_models import BaseChatModel as LangChainBaseChatModel # type: ignore
from langchain_core.messages import AIMessage as LangChainAIMessage # type: ignore
T = TypeVar('T', bound=BaseModel)
@dataclass
class ChatLangchain(BaseChatModel):
"""
A wrapper around LangChain BaseChatModel that implements the browser-use BaseChatModel protocol.
This class allows you to use any LangChain-compatible model with browser-use.
"""
# The LangChain model to wrap
chat: 'LangChainBaseChatModel'
@property
def model(self) -> str:
return self.name
@property
def provider(self) -> str:
"""Return the provider name based on the LangChain model class."""
model_class_name = self.chat.__class__.__name__.lower()
if 'openai' in model_class_name:
return 'openai'
elif 'anthropic' in model_class_name or 'claude' in model_class_name:
return 'anthropic'
elif 'google' in model_class_name or 'gemini' in model_class_name:
return 'google'
elif 'groq' in model_class_name:
return 'groq'
elif 'ollama' in model_class_name:
return 'ollama'
elif 'deepseek' in model_class_name:
return 'deepseek'
else:
return 'langchain'
@property
def name(self) -> str:
"""Return the model name."""
# Try to get model name from the LangChain model using getattr to avoid type errors
model_name = getattr(self.chat, 'model_name', None)
if model_name:
return str(model_name)
model_attr = getattr(self.chat, 'model', None)
if model_attr:
return str(model_attr)
return self.chat.__class__.__name__
def _get_usage(self, response: 'LangChainAIMessage') -> ChatInvokeUsage | None:
usage = response.usage_metadata
if usage is None:
return None
prompt_tokens = usage['input_tokens'] or 0
completion_tokens = usage['output_tokens'] or 0
total_tokens = usage['total_tokens'] or 0
input_token_details = usage.get('input_token_details', None)
if input_token_details is not None:
prompt_cached_tokens = input_token_details.get('cache_read', None)
prompt_cache_creation_tokens = input_token_details.get('cache_creation', None)
else:
prompt_cached_tokens = None
prompt_cache_creation_tokens = None
return ChatInvokeUsage(
prompt_tokens=prompt_tokens,
prompt_cached_tokens=prompt_cached_tokens,
prompt_cache_creation_tokens=prompt_cache_creation_tokens,
prompt_image_tokens=None,
completion_tokens=completion_tokens,
total_tokens=total_tokens,
)
@overload
async def ainvoke(self, messages: list[BaseMessage], output_format: None = None) -> ChatInvokeCompletion[str]: ...
@overload
async def ainvoke(self, messages: list[BaseMessage], output_format: type[T]) -> ChatInvokeCompletion[T]: ...
async def ainvoke(
self, messages: list[BaseMessage], output_format: type[T] | None = None
) -> ChatInvokeCompletion[T] | ChatInvokeCompletion[str]:
"""
Invoke the LangChain model with the given messages.
Args:
messages: List of browser-use chat messages
output_format: Optional Pydantic model class for structured output (not supported in basic LangChain integration)
Returns:
Either a string response or an instance of output_format
"""
# Convert browser-use messages to LangChain messages
langchain_messages = LangChainMessageSerializer.serialize_messages(messages)
try:
if output_format is None:
# Return string response
response = await self.chat.ainvoke(langchain_messages) # type: ignore
# Import at runtime for isinstance check
from langchain_core.messages import AIMessage as LangChainAIMessage # type: ignore
if not isinstance(response, LangChainAIMessage):
raise ModelProviderError(
message=f'Response is not an AIMessage: {type(response)}',
model=self.name,
)
# Extract content from LangChain response
content = response.content if hasattr(response, 'content') else str(response)
usage = self._get_usage(response)
return ChatInvokeCompletion(
completion=str(content),
usage=usage,
)
else:
# Use LangChain's structured output capability
try:
structured_chat = self.chat.with_structured_output(output_format)
parsed_object = await structured_chat.ainvoke(langchain_messages)
# For structured output, usage metadata is typically not available
# in the parsed object since it's a Pydantic model, not an AIMessage
usage = None
# Type cast since LangChain's with_structured_output returns the correct type
return ChatInvokeCompletion(
completion=parsed_object, # type: ignore
usage=usage,
)
except AttributeError:
# Fall back to manual parsing if with_structured_output is not available
response = await self.chat.ainvoke(langchain_messages) # type: ignore
if not isinstance(response, 'LangChainAIMessage'):
raise ModelProviderError(
message=f'Response is not an AIMessage: {type(response)}',
model=self.name,
)
content = response.content if hasattr(response, 'content') else str(response)
try:
if isinstance(content, str):
import json
parsed_data = json.loads(content)
if isinstance(parsed_data, dict):
parsed_object = output_format(**parsed_data)
else:
raise ValueError('Parsed JSON is not a dictionary')
else:
raise ValueError('Content is not a string and structured output not supported')
except Exception as e:
raise ModelProviderError(
message=f'Failed to parse response as {output_format.__name__}: {e}',
model=self.name,
) from e
usage = self._get_usage(response)
return ChatInvokeCompletion(
completion=parsed_object,
usage=usage,
)
except Exception as e:
# Convert any LangChain errors to browser-use ModelProviderError
raise ModelProviderError(
message=f'LangChain model error: {str(e)}',
model=self.name,
) from e
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/models/langchain/example.py | examples/models/langchain/example.py | """
Example of using LangChain models with browser-use.
This example demonstrates how to:
1. Wrap a LangChain model with ChatLangchain
2. Use it with a browser-use Agent
3. Run a simple web automation task
@file purpose: Example usage of LangChain integration with browser-use
"""
import asyncio
from langchain_openai import ChatOpenAI # pyright: ignore
from browser_use import Agent
from examples.models.langchain.chat import ChatLangchain
async def main():
"""Basic example using ChatLangchain with OpenAI through LangChain."""
# Create a LangChain model (OpenAI)
langchain_model = ChatOpenAI(
model='gpt-4.1-mini',
temperature=0.1,
)
# Wrap it with ChatLangchain to make it compatible with browser-use
llm = ChatLangchain(chat=langchain_model)
# Create a simple task
task = "Go to google.com and search for 'browser automation with Python'"
# Create and run the agent
agent = Agent(
task=task,
llm=llm,
)
print(f'🚀 Starting task: {task}')
print(f'🤖 Using model: {llm.name} (provider: {llm.provider})')
# Run the agent
history = await agent.run()
print(f'✅ Task completed! Steps taken: {len(history.history)}')
# Print the final result if available
if history.final_result():
print(f'📋 Final result: {history.final_result()}')
return history
if __name__ == '__main__':
print('🌐 Browser-use LangChain Integration Example')
print('=' * 45)
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/models/langchain/__init__.py | examples/models/langchain/__init__.py | python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false | |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/observability/openLLMetry.py | examples/observability/openLLMetry.py | import asyncio
import os
from dotenv import load_dotenv
# test if traceloop is installed
try:
from traceloop.sdk import Traceloop # type: ignore
except ImportError:
print('Traceloop is not installed')
exit(1)
from browser_use import Agent
load_dotenv()
api_key = os.getenv('TRACELOOP_API_KEY')
Traceloop.init(api_key=api_key, disable_batch=True)
async def main():
await Agent('Find the founders of browser-use').run()
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/features/small_model_for_extraction.py | examples/features/small_model_for_extraction.py | import asyncio
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent, ChatOpenAI
# This uses a bigger model for the planning
# And a smaller model for the page content extraction
# THink of it like a subagent which only task is to extract content from the current page
llm = ChatOpenAI(model='gpt-4.1')
small_llm = ChatOpenAI(model='gpt-4.1-mini')
task = 'Find the founders of browser-use in ycombinator, extract all links and open the links one by one'
agent = Agent(task=task, llm=llm, page_extraction_llm=small_llm)
async def main():
await agent.run()
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/features/process_agent_output.py | examples/features/process_agent_output.py | import asyncio
import os
import sys
from pprint import pprint
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent, ChatOpenAI
from browser_use.agent.views import AgentHistoryList
from browser_use.browser import BrowserProfile, BrowserSession
from browser_use.browser.profile import ViewportSize
llm = ChatOpenAI(model='gpt-4.1-mini')
async def main():
browser_session = BrowserSession(
browser_profile=BrowserProfile(
headless=False,
traces_dir='./tmp/result_processing',
window_size=ViewportSize(width=1280, height=1000),
user_data_dir='~/.config/browseruse/profiles/default',
)
)
await browser_session.start()
try:
agent = Agent(
task="go to google.com and type 'OpenAI' click search and give me the first url",
llm=llm,
browser_session=browser_session,
)
history: AgentHistoryList = await agent.run(max_steps=3)
print('Final Result:')
pprint(history.final_result(), indent=4)
print('\nErrors:')
pprint(history.errors(), indent=4)
# e.g. xPaths the model clicked on
print('\nModel Outputs:')
pprint(history.model_actions(), indent=4)
print('\nThoughts:')
pprint(history.model_thoughts(), indent=4)
finally:
await browser_session.stop()
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/features/stop_externally.py | examples/features/stop_externally.py | import asyncio
import os
import random
import sys
from browser_use.llm.google.chat import ChatGoogle
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent
llm = ChatGoogle(model='gemini-flash-latest', temperature=1.0)
def check_is_task_stopped():
async def _internal_check_is_task_stopped() -> bool:
if random.random() < 0.1:
print('[TASK STOPPER] Task is stopped')
return True
else:
print('[TASK STOPPER] Task is not stopped')
return False
return _internal_check_is_task_stopped
task = """
Go to https://browser-use.github.io/stress-tests/challenges/wufoo-style-form.html and complete the Wufoo-style form by filling in all required fields and submitting.
"""
agent = Agent(task=task, llm=llm, flash_mode=True, register_should_stop_callback=check_is_task_stopped(), max_actions_per_step=1)
async def main():
await agent.run(max_steps=30)
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/features/custom_output.py | examples/features/custom_output.py | """
Show how to use custom outputs.
@dev You need to add OPENAI_API_KEY to your environment variables.
"""
import asyncio
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from pydantic import BaseModel
from browser_use import Agent, ChatOpenAI
class Post(BaseModel):
post_title: str
post_url: str
num_comments: int
hours_since_post: int
class Posts(BaseModel):
posts: list[Post]
async def main():
task = 'Go to hackernews show hn and give me the first 5 posts'
model = ChatOpenAI(model='gpt-4.1-mini')
agent = Agent(task=task, llm=model, output_model_schema=Posts)
history = await agent.run()
result = history.final_result()
if result:
parsed: Posts = Posts.model_validate_json(result)
for post in parsed.posts:
print('\n--------------------------------')
print(f'Title: {post.post_title}')
print(f'URL: {post.post_url}')
print(f'Comments: {post.num_comments}')
print(f'Hours since post: {post.hours_since_post}')
else:
print('No result')
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/features/blocked_domains.py | examples/features/blocked_domains.py | import asyncio
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent, ChatOpenAI
from browser_use.browser import BrowserProfile, BrowserSession
llm = ChatOpenAI(model='gpt-4o-mini')
# Example task: Try to navigate to various sites including blocked ones
task = 'Navigate to example.com, then try to go to x.com, then facebook.com, and finally visit google.com. Tell me which sites you were able to access.'
prohibited_domains = [
'x.com', # Block X (formerly Twitter) - "locked the f in"
'twitter.com', # Block Twitter (redirects to x.com anyway)
'facebook.com', # Lock the F in Facebook too
'*.meta.com', # Block all Meta properties (wildcard pattern)
'*.adult-site.com', # Block all subdomains of adult sites
'https://explicit-content.org', # Block specific protocol/domain
'gambling-site.net', # Block gambling sites
]
# Note: For lists with 100+ domains, automatic optimization kicks in:
# - Converts list to set for O(1) lookup (blazingly fast!)
# - Pattern matching (*.domain) is disabled for large lists
# - Both www.example.com and example.com variants are checked automatically
# Perfect for ad blockers or large malware domain lists (e.g., 400k+ domains)
browser_session = BrowserSession(
browser_profile=BrowserProfile(
prohibited_domains=prohibited_domains,
headless=False, # Set to True to run without visible browser
user_data_dir='~/.config/browseruse/profiles/blocked-demo',
),
)
agent = Agent(
task=task,
llm=llm,
browser_session=browser_session,
)
async def main():
print('Demo: Blocked Domains Feature - "Lock the F in" Edition')
print("We're literally locking the F in Facebook and X!")
print(f'Prohibited domains: {prohibited_domains}')
print('The agent will try to visit various sites, but blocked domains will be prevented.')
print()
await agent.run(max_steps=10)
input('Press Enter to close the browser...')
await browser_session.kill()
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/features/multi_tab.py | examples/features/multi_tab.py | """
Simple try of the agent.
@dev You need to add OPENAI_API_KEY to your environment variables.
"""
import asyncio
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent, ChatOpenAI
# video: https://preview.screen.studio/share/clenCmS6
llm = ChatOpenAI(model='gpt-4.1-mini')
agent = Agent(
task='open 3 tabs with elon musk, sam altman, and steve jobs, then go back to the first and stop',
llm=llm,
)
async def main():
await agent.run()
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/features/scrolling_page.py | examples/features/scrolling_page.py | # Goal: Automates webpage scrolling with various scrolling actions, including element-specific scrolling.
import asyncio
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent, ChatOpenAI
from browser_use.browser import BrowserProfile, BrowserSession
if not os.getenv('OPENAI_API_KEY'):
raise ValueError('OPENAI_API_KEY is not set')
"""
Example: Enhanced 'Scroll' action with page amounts and element-specific scrolling.
This script demonstrates the new enhanced scrolling capabilities:
1. PAGE-LEVEL SCROLLING:
- Scrolling by specific page amounts using 'num_pages' parameter (0.5, 1.0, 2.0, etc.)
- Scrolling up or down using the 'down' parameter
- Uses JavaScript window.scrollBy() or smart container detection
2. ELEMENT-SPECIFIC SCROLLING:
- NEW: Optional 'index' parameter to scroll within specific elements
- Perfect for dropdowns, sidebars, and custom UI components
- Uses direct scrollTop manipulation (no mouse events that might close dropdowns)
- Automatically finds scroll containers in the element hierarchy
- Falls back to page scrolling if no container found
3. IMPLEMENTATION DETAILS:
- Does NOT use mouse movement or wheel events
- Direct DOM manipulation for precision and reliability
- Container-aware scrolling prevents unwanted side effects
"""
llm = ChatOpenAI(model='gpt-4.1-mini')
browser_profile = BrowserProfile(headless=False)
browser_session = BrowserSession(browser_profile=browser_profile)
# Example 1: Basic page scrolling with custom amounts
agent1 = Agent(
task="Navigate to 'https://en.wikipedia.org/wiki/Internet' and scroll down by one page - then scroll up by 0.5 pages - then scroll down by 0.25 pages - then scroll down by 2 pages.",
llm=llm,
browser_session=browser_session,
)
# Example 2: Element-specific scrolling (dropdowns and containers)
agent2 = Agent(
task="""Go to https://semantic-ui.com/modules/dropdown.html#/definition and:
1. Scroll down in the left sidebar by 2 pages
2. Then scroll down 1 page in the main content area
3. Click on the State dropdown and scroll down 1 page INSIDE the dropdown to see more states
4. The dropdown should stay open while scrolling inside it""",
llm=llm,
browser_session=browser_session,
)
# Example 3: Text-based scrolling alternative
agent3 = Agent(
task="Navigate to 'https://en.wikipedia.org/wiki/Internet' and scroll to the text 'The vast majority of computer'",
llm=llm,
browser_session=browser_session,
)
async def main():
print('Choose which scrolling example to run:')
print('1. Basic page scrolling with custom amounts (Wikipedia)')
print('2. Element-specific scrolling (Semantic UI dropdowns)')
print('3. Text-based scrolling (Wikipedia)')
choice = input('Enter choice (1-3): ').strip()
if choice == '1':
print('🚀 Running Example 1: Basic page scrolling...')
await agent1.run()
elif choice == '2':
print('🚀 Running Example 2: Element-specific scrolling...')
await agent2.run()
elif choice == '3':
print('🚀 Running Example 3: Text-based scrolling...')
await agent3.run()
else:
print('❌ Invalid choice. Running Example 1 by default...')
await agent1.run()
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/features/add_image_context.py | examples/features/add_image_context.py | """
Show how to use sample_images to add image context for your task
"""
import asyncio
import base64
from pathlib import Path
from typing import Any
from dotenv import load_dotenv
from browser_use import Agent
from browser_use.llm import ChatOpenAI
from browser_use.llm.messages import ContentPartImageParam, ContentPartTextParam, ImageURL
# Load environment variables
load_dotenv()
def image_to_base64(image_path: str) -> str:
"""
Convert image file to base64 string.
Args:
image_path: Path to the image file
Returns:
Base64 encoded string of the image
Raises:
FileNotFoundError: If image file doesn't exist
IOError: If image file cannot be read
"""
image_file = Path(image_path)
if not image_file.exists():
raise FileNotFoundError(f'Image file not found: {image_path}')
try:
with open(image_file, 'rb') as f:
encoded_string = base64.b64encode(f.read())
return encoded_string.decode('utf-8')
except OSError as e:
raise OSError(f'Failed to read image file: {e}')
def create_sample_images() -> list[ContentPartTextParam | ContentPartImageParam]:
"""
Create image context for the agent.
Returns:
list of content parts containing text and image data
"""
# Image path - replace with your actual image path
image_path = 'sample_image.png'
# Image context configuration
image_context: list[dict[str, Any]] = [
{
'type': 'text',
'value': (
'The following image explains the google layout. '
'The image highlights several buttons with red boxes, '
'and next to them are corresponding labels in red text.\n'
'Each label corresponds to a button as follows:\n'
'Label 1 is the "image" button.'
),
},
{'type': 'image', 'value': image_to_base64(image_path)},
]
# Convert to content parts
content_parts = []
for item in image_context:
if item['type'] == 'text':
content_parts.append(ContentPartTextParam(text=item['value']))
elif item['type'] == 'image':
content_parts.append(
ContentPartImageParam(
image_url=ImageURL(
url=f'data:image/jpeg;base64,{item["value"]}',
media_type='image/jpeg',
),
)
)
return content_parts
async def main() -> None:
"""
Main function to run the browser agent with image context.
"""
# Task configuration
task_str = 'goto https://www.google.com/ and click image button'
# Initialize the language model
model = ChatOpenAI(model='gpt-4.1')
# Create sample images for context
try:
sample_images = create_sample_images()
except (FileNotFoundError, OSError) as e:
print(f'Error loading sample images: {e}')
print('Continuing without sample images...')
sample_images = []
# Initialize and run the agent
agent = Agent(task=task_str, llm=model, sample_images=sample_images)
await agent.run()
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/features/rerun_history.py | examples/features/rerun_history.py | """
Example: Rerunning saved agent history with variable detection and substitution
This example shows how to:
1. Run an agent and save its history (including initial URL navigation)
2. Detect variables in the saved history (emails, names, dates, etc.)
3. Rerun the history with substituted values (different data)
4. Get AI-generated summary of rerun completion (with screenshot analysis)
Useful for:
- Debugging agent behavior
- Testing changes with consistent scenarios
- Replaying successful workflows with different data
- Understanding what values can be substituted in reruns
- Getting automated verification of rerun success
Note: Initial actions (like opening URLs from tasks) are now automatically
saved to history and will be replayed during rerun, so you don't need to
worry about manually specifying URLs when rerunning.
AI Features During Rerun:
1. AI Step for Extract Actions:
When an 'extract' action is replayed, the rerun automatically uses AI to
re-analyze the current page content (since it may have changed with new data).
This ensures the extracted content reflects the current state, not cached results.
2. AI Summary:
At the end of the rerun, an AI summary analyzes the final screenshot and
execution statistics to determine success/failure.
Custom LLM Usage:
# Option 1: Use agent's LLM (default)
results = await agent.load_and_rerun(history_file)
# Option 2: Use custom LLMs for AI steps and summary
from browser_use.llm import ChatOpenAI
custom_llm = ChatOpenAI(model='gpt-4.1-mini')
results = await agent.load_and_rerun(
history_file,
ai_step_llm=custom_llm, # For extract action re-evaluation
summary_llm=custom_llm, # For final summary
)
The AI summary will be the last item in results and will have:
- extracted_content: The summary text
- success: Whether rerun was successful
- is_done: Always True for summary
"""
import asyncio
from pathlib import Path
from browser_use import Agent
from browser_use.llm import ChatBrowserUse
async def main():
# Example task to demonstrate history saving and rerunning
history_file = Path('agent_history.json')
task = 'Go to https://browser-use.github.io/stress-tests/challenges/reference-number-form.html and fill the form with example data and submit and extract the refernence number.'
llm = ChatBrowserUse()
# Optional: Use custom LLMs for AI features during rerun
# Uncomment to use a custom LLM:
# from browser_use.llm import ChatOpenAI
# custom_llm = ChatOpenAI(model='gpt-4.1-mini')
# ai_step_llm = custom_llm # For re-evaluating extract actions
# summary_llm = custom_llm # For final summary
ai_step_llm = None # Set to None to use agent's LLM (default)
summary_llm = None # Set to None to use agent's LLM (default)
# Step 1: Run the agent and save history
print('=== Running Agent ===')
agent = Agent(task=task, llm=llm, max_actions_per_step=1)
await agent.run(max_steps=10)
agent.save_history(history_file)
print(f'✓ History saved to {history_file}')
# Step 2: Detect variables in the saved history
print('\n=== Detecting Variables ===')
variables = agent.detect_variables()
if variables:
print(f'Found {len(variables)} variable(s):')
for var_name, var_info in variables.items():
format_info = f' (format: {var_info.format})' if var_info.format else ''
print(f' • {var_name}: "{var_info.original_value}"{format_info}')
else:
print('No variables detected in history')
# Step 3: Rerun the history with substituted values
if variables:
print('\n=== Rerunning History (Substituted Values) ===')
# Create new values for the detected variables
new_values = {}
for var_name, var_info in variables.items():
# Map detected variables to new values
if var_name == 'email':
new_values[var_name] = 'jane.smith@example.com'
elif var_name == 'full_name':
new_values[var_name] = 'Jane Smith'
elif var_name.startswith('full_name_'):
new_values[var_name] = 'General Information'
elif var_name == 'first_name':
new_values[var_name] = 'Jane'
elif var_name == 'date':
new_values[var_name] = '1995-05-15'
elif var_name == 'country':
new_values[var_name] = 'Canada'
# You can add more variable substitutions as needed
if new_values:
print(f'Substituting {len(new_values)} variable(s):')
for var_name, new_value in new_values.items():
old_value = variables[var_name].original_value
print(f' • {var_name}: "{old_value}" → "{new_value}"')
# Rerun with substituted values and optional custom LLMs
substitute_agent = Agent(task='', llm=llm)
results = await substitute_agent.load_and_rerun(
history_file,
variables=new_values,
ai_step_llm=ai_step_llm, # For extract action re-evaluation
summary_llm=summary_llm, # For final summary
)
# Display AI-generated summary (last result)
if results and results[-1].is_done:
summary = results[-1]
print('\n📊 AI Summary:')
print(f' Summary: {summary.extracted_content}')
print(f' Success: {summary.success}')
print('✓ History rerun with substituted values complete')
else:
print('\n⚠️ No variables detected, skipping substitution rerun')
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/features/parallel_agents.py | examples/features/parallel_agents.py | import asyncio
import os
import sys
from pathlib import Path
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import ChatOpenAI
from browser_use.agent.service import Agent
from browser_use.browser import BrowserProfile, BrowserSession
browser_session = BrowserSession(
browser_profile=BrowserProfile(
keep_alive=True,
headless=False,
record_video_dir=Path('./tmp/recordings'),
user_data_dir='~/.config/browseruse/profiles/default',
)
)
llm = ChatOpenAI(model='gpt-4.1-mini')
# NOTE: This is experimental - you will have multiple agents running in the same browser session
async def main():
await browser_session.start()
agents = [
Agent(task=task, llm=llm, browser_session=browser_session)
for task in [
'Search Google for weather in Tokyo',
'Check Reddit front page title',
'Look up Bitcoin price on Coinbase',
# 'Find NASA image of the day',
# 'Check top story on CNN',
# 'Search latest SpaceX launch date',
# 'Look up population of Paris',
# 'Find current time in Sydney',
# 'Check who won last Super Bowl',
# 'Search trending topics on Twitter',
]
]
print(await asyncio.gather(*[agent.run() for agent in agents]))
await browser_session.kill()
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/features/sensitive_data.py | examples/features/sensitive_data.py | import asyncio
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent, ChatOpenAI
# Initialize the model
llm = ChatOpenAI(
model='gpt-4.1',
temperature=0.0,
)
# Simple case: the model will see x_name and x_password, but never the actual values.
# sensitive_data = {'x_name': 'my_x_name', 'x_password': 'my_x_password'}
# Advanced case: domain-specific credentials with reusable data
# Define a single credential set that can be reused
company_credentials: dict[str, str] = {'telephone': '9123456789', 'email': 'user@example.com', 'name': 'John Doe'}
# Map the same credentials to multiple domains for secure access control
# Type annotation to satisfy pyright
sensitive_data: dict[str, str | dict[str, str]] = {
# 'https://example.com': company_credentials,
# 'https://admin.example.com': company_credentials,
# 'https://*.example-staging.com': company_credentials,
# 'http*://test.example.com': company_credentials,
'httpbin.org': company_credentials,
# # You can also add domain-specific credentials
# 'https://google.com': {'g_email': 'user@gmail.com', 'g_pass': 'google_password'}
}
# Update task to use one of the credentials above
task = 'Go to https://httpbin.org/forms/post and put the secure information in the relevant fields.'
agent = Agent(task=task, llm=llm, sensitive_data=sensitive_data)
async def main():
await agent.run()
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/features/judge_trace.py | examples/features/judge_trace.py | """
Setup:
1. Get your API key from https://cloud.browser-use.com/new-api-key
2. Set environment variable: export BROWSER_USE_API_KEY="your-key"
"""
import asyncio
import os
import sys
# Add the parent directory to the path so we can import browser_use
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent
from browser_use.llm.browser_use.chat import ChatBrowserUse
# task from GAIA
task = """
If Eliud Kipchoge could maintain his record-making marathon pace indefinitely, how many thousand hours would it take him to run the distance between the Earth and the Moon its closest approach?
Please use the minimum perigee value on the Wikipedia page for the Moon when carrying out your calculation.
Round your result to the nearest 1000 hours and do not use any comma separators if necessary.
"""
async def main():
llm = ChatBrowserUse()
agent = Agent(
task=task,
llm=llm,
use_judge=True,
judge_llm=llm,
ground_truth='16', # The TRUE answer is 17 but we put 16 to demonstrate judge can detect when the answer is wrong.
)
history = await agent.run()
# Get the judgement result
if history.is_judged():
judgement = history.judgement()
print(f'Agent history judgement: {judgement}')
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/features/secure.py | examples/features/secure.py | """
Azure OpenAI example with data privacy and high-scale configuration.
Environment Variables Required:
- AZURE_OPENAI_KEY (or AZURE_OPENAI_API_KEY)
- AZURE_OPENAI_ENDPOINT
- AZURE_OPENAI_DEPLOYMENT (optional)
DATA PRIVACY WITH AZURE OPENAI:
✅ Good News: No Training on Your Data by Default
Azure OpenAI Service already protects your data:
✅ NOT used to train OpenAI models
✅ NOT shared with other customers
✅ NOT accessible to OpenAI directly
✅ NOT used to improve Microsoft/third-party products
✅ Hosted entirely within Azure (not OpenAI's servers)
⚠️ Default Data Retention (30 Days)
- Prompts and completions stored for up to 30 days
- Purpose: Abuse monitoring and compliance
- Access: Microsoft authorized personnel (only if abuse detected)
🔒 How to Disable Data Logging Completely
Apply for Microsoft's "Limited Access Program":
1. Contact Microsoft Azure support
2. Submit Limited Access Program request
3. Demonstrate legitimate business need
4. After approval: Zero data logging, immediate deletion, no human review
For high-scale deployments (500+ agents), consider:
- Multiple deployments across regions
How to Verify This Yourself, that there is no data logging:
- Network monitoring: Run with network monitoring tools
- Firewall rules: Block all domains except Azure OpenAI and your target sites
Contact us if you need help with this: support@browser-use.com
"""
import asyncio
import os
import sys
from dotenv import load_dotenv
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
load_dotenv()
os.environ['ANONYMIZED_TELEMETRY'] = 'false'
from browser_use import Agent, BrowserProfile, ChatAzureOpenAI
# Configuration LLM
api_key = os.getenv('AZURE_OPENAI_KEY')
azure_endpoint = os.getenv('AZURE_OPENAI_ENDPOINT')
llm = ChatAzureOpenAI(model='gpt-4.1-mini', api_key=api_key, azure_endpoint=azure_endpoint)
# Configuration Task
task = 'Find the founders of the sensitive company_name'
# Configuration Browser (optional)
browser_profile = BrowserProfile(allowed_domains=['*google.com', 'browser-use.com'], enable_default_extensions=False)
# Sensitive data (optional) - {key: sensitive_information} - we filter out the sensitive_information from any input to the LLM, it will only work with placeholder.
# By default we pass screenshots to the LLM which can contain your information. Set use_vision=False to disable this.
# If you trust your LLM endpoint, you don't need to worry about this.
sensitive_data = {'company_name': 'browser-use'}
# Create Agent
agent = Agent(task=task, llm=llm, browser_profile=browser_profile, sensitive_data=sensitive_data) # type: ignore
async def main():
await agent.run(max_steps=10)
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/features/download_file.py | examples/features/download_file.py | import asyncio
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent, Browser, ChatGoogle
api_key = os.getenv('GOOGLE_API_KEY')
if not api_key:
raise ValueError('GOOGLE_API_KEY is not set')
llm = ChatGoogle(model='gemini-2.5-flash', api_key=api_key)
browser = Browser(downloads_path='~/Downloads/tmp')
async def run_download():
agent = Agent(
task='Go to "https://file-examples.com/" and download the smallest doc file. then go back and get the next file.',
llm=llm,
browser=browser,
)
await agent.run(max_steps=25)
if __name__ == '__main__':
asyncio.run(run_download())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/features/initial_actions.py | examples/features/initial_actions.py | import asyncio
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent, ChatOpenAI
llm = ChatOpenAI(model='gpt-4.1-mini')
initial_actions = [
{'navigate': {'url': 'https://www.google.com', 'new_tab': True}},
{'navigate': {'url': 'https://en.wikipedia.org/wiki/Randomness', 'new_tab': True}},
]
agent = Agent(
task='What theories are displayed on the page?',
initial_actions=initial_actions,
llm=llm,
)
async def main():
await agent.run(max_steps=10)
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/features/video_recording.py | examples/features/video_recording.py | import asyncio
from pathlib import Path
from browser_use import Agent, Browser, ChatOpenAI
# NOTE: To use this example, install imageio[ffmpeg], e.g. with uv pip install "browser-use[video]"
async def main():
browser_session = Browser(record_video_dir=Path('./tmp/recordings'))
agent = Agent(
task='Go to github.com/trending then navigate to the first trending repository and report how many commits it has.',
llm=ChatOpenAI(model='gpt-4.1-mini'),
browser_session=browser_session,
)
await agent.run(max_steps=5)
# The video will be saved automatically when the agent finishes and the session closes.
print('Agent run finished. Check the ./tmp/recordings directory for the video.')
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/features/custom_system_prompt.py | examples/features/custom_system_prompt.py | import asyncio
import json
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent, ChatOpenAI
extend_system_message = (
'REMEMBER the most important RULE: ALWAYS open first a new tab and go first to url wikipedia.com no matter the task!!!'
)
# or use override_system_message to completely override the system prompt
async def main():
task = 'do google search to find images of Elon Musk'
model = ChatOpenAI(model='gpt-4.1-mini')
agent = Agent(task=task, llm=model, extend_system_message=extend_system_message)
print(
json.dumps(
agent.message_manager.system_prompt.model_dump(exclude_unset=True),
indent=4,
)
)
await agent.run()
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/features/follow_up_tasks.py | examples/features/follow_up_tasks.py | import asyncio
import os
import sys
from browser_use.browser.profile import BrowserProfile
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent
profile = BrowserProfile(keep_alive=True)
task = """Go to reddit.com"""
async def main():
agent = Agent(task=task, browser_profile=profile)
await agent.run(max_steps=1)
while True:
user_response = input('\n👤 New task or "q" to quit: ')
agent.add_new_task(f'New task: {user_response}')
await agent.run()
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/features/fallback_model.py | examples/features/fallback_model.py | """
Example: Using a fallback LLM model.
When the primary LLM fails with rate limits (429), authentication errors (401),
payment/credit errors (402), or server errors (500, 502, 503, 504), the agent
automatically switches to the fallback model and continues execution.
Note: The primary LLM will first exhaust its own retry logic (typically 5 attempts
with exponential backoff) before the fallback is triggered. This means transient errors
are handled by the provider's built-in retries, and the fallback only kicks in when
the provider truly can't recover.
This is useful for:
- High availability: Keep your agent running even when one provider has issues
- Cost optimization: Use a cheaper model as fallback when the primary is rate limited
- Multi-provider resilience: Switch between OpenAI, Anthropic, Google, etc.
@dev You need to add OPENAI_API_KEY and ANTHROPIC_API_KEY to your environment variables.
"""
import asyncio
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent
from browser_use.llm import ChatAnthropic, ChatOpenAI
llm = ChatAnthropic(model='claude-sonnet-4-0')
fallback_llm = ChatOpenAI(model='gpt-4o')
agent = Agent(
task='Go to github.com and find the browser-use repository',
llm=llm,
fallback_llm=fallback_llm,
)
async def main():
result = await agent.run()
print(result)
# You can check if fallback was used:
if agent.is_using_fallback_llm:
print('Note: Agent switched to fallback LLM during execution')
print(f'Current model: {agent.current_llm_model}')
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/features/restrict_urls.py | examples/features/restrict_urls.py | import asyncio
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent, ChatOpenAI
from browser_use.browser import BrowserProfile, BrowserSession
llm = ChatOpenAI(model='gpt-4.1-mini')
task = (
"go to google.com and search for openai.com and click on the first link then extract content and scroll down - what's there?"
)
allowed_domains = ['google.com']
browser_session = BrowserSession(
browser_profile=BrowserProfile(
executable_path='/Applications/Google Chrome.app/Contents/MacOS/Google Chrome',
allowed_domains=allowed_domains,
user_data_dir='~/.config/browseruse/profiles/default',
),
)
agent = Agent(
task=task,
llm=llm,
browser_session=browser_session,
)
async def main():
await agent.run(max_steps=25)
input('Press Enter to close the browser...')
await browser_session.kill()
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/features/follow_up_task.py | examples/features/follow_up_task.py | from dotenv import load_dotenv
from browser_use import Agent, Browser
load_dotenv()
import asyncio
async def main():
browser = Browser(keep_alive=True)
await browser.start()
agent = Agent(task='search for browser-use.', browser_session=browser)
await agent.run(max_steps=2)
agent.add_new_task('return the title of first result')
await agent.run()
await browser.kill()
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/features/large_blocklist.py | examples/features/large_blocklist.py | """
Example: Using large blocklists (400k+ domains) with automatic optimization
This example demonstrates:
1. Loading a real-world blocklist (HaGeZi's Pro++ with 439k+ domains)
2. Automatic conversion to set for O(1) lookup performance
3. Testing that blocked domains are actually blocked
Performance: ~0.02ms per domain check (50,000+ checks/second!)
"""
import asyncio
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent, ChatOpenAI
from browser_use.browser import BrowserProfile, BrowserSession
llm = ChatOpenAI(model='gpt-4.1-mini')
def load_blocklist_from_url(url: str) -> list[str]:
"""Load and parse a blocklist from a URL.
Args:
url: URL to the blocklist file
Returns:
List of domain strings (comments and empty lines removed)
"""
import urllib.request
print(f'📥 Downloading blocklist from {url}...')
domains = []
with urllib.request.urlopen(url) as response:
for line in response:
line = line.decode('utf-8').strip()
# Skip comments and empty lines
if line and not line.startswith('#'):
domains.append(line)
print(f'✅ Loaded {len(domains):,} domains')
return domains
async def main():
# Load HaGeZi's Pro++ blocklist (blocks ads, tracking, malware, etc.)
# Source: https://github.com/hagezi/dns-blocklists
blocklist_url = 'https://gitlab.com/hagezi/mirror/-/raw/main/dns-blocklists/domains/pro.plus.txt'
print('=' * 70)
print('🚀 Large Blocklist Demo - 439k+ Blocked Domains')
print('=' * 70)
print()
# Load the blocklist
prohibited_domains = load_blocklist_from_url(blocklist_url)
# Sample some blocked domains to test
test_blocked = [prohibited_domains[0], prohibited_domains[1000], prohibited_domains[-1]]
print(f'\n📋 Sample blocked domains: {", ".join(test_blocked[:3])}')
print(f'\n🔧 Creating browser with {len(prohibited_domains):,} blocked domains...')
print(' (Auto-optimizing to set for O(1) lookup performance)')
# Create browser with the blocklist
# The list will be automatically optimized to a set for fast lookups
browser_session = BrowserSession(
browser_profile=BrowserProfile(
prohibited_domains=prohibited_domains,
headless=False,
user_data_dir='~/.config/browseruse/profiles/blocklist-demo',
),
)
# Task: Try to visit a blocked domain and a safe domain
blocked_site = test_blocked[0] # Will be blocked
safe_site = 'github.com' # Will be allowed
task = f"""
Try to navigate to these websites and report what happens:
1. First, try to visit https://{blocked_site}
2. Then, try to visit https://{safe_site}
Tell me which sites you were able to access and which were blocked.
"""
agent = Agent(
task=task,
llm=llm,
browser_session=browser_session,
)
print(f'\n🤖 Agent task: Try to visit {blocked_site} (blocked) and {safe_site} (allowed)')
print('\n' + '=' * 70)
await agent.run(max_steps=5)
print('\n' + '=' * 70)
print('✅ Demo complete!')
print(f'💡 The blocklist with {len(prohibited_domains):,} domains was optimized to a set')
print(' for instant O(1) domain checking (vs slow O(n) pattern matching)')
print('=' * 70)
input('\nPress Enter to close the browser...')
await browser_session.kill()
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/file_system/file_system.py | examples/file_system/file_system.py | import asyncio
import os
import pathlib
import shutil
from dotenv import load_dotenv
from browser_use import Agent, ChatOpenAI
load_dotenv()
SCRIPT_DIR = pathlib.Path(os.path.dirname(os.path.abspath(__file__)))
agent_dir = SCRIPT_DIR / 'file_system'
agent_dir.mkdir(exist_ok=True)
conversation_dir = agent_dir / 'conversations' / 'conversation'
print(f'Agent logs directory: {agent_dir}')
task = """
Go to https://mertunsall.github.io/posts/post1.html
Save the title of the article in "data.md"
Then, use append_file to add the first sentence of the article to "data.md"
Then, read the file to see its content and make sure it's correct.
Finally, share the file with me.
NOTE: DO NOT USE extract action - everything is visible in browser state.
""".strip('\n')
llm = ChatOpenAI(model='gpt-4.1-mini')
agent = Agent(
task=task,
llm=llm,
save_conversation_path=str(conversation_dir),
file_system_path=str(agent_dir / 'fs'),
)
async def main():
agent_history = await agent.run()
print(f'Final result: {agent_history.final_result()}', flush=True)
input('Press Enter to clean the file system...')
# clean the file system
shutil.rmtree(str(agent_dir / 'fs'))
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/file_system/alphabet_earnings.py | examples/file_system/alphabet_earnings.py | import asyncio
import os
import pathlib
import shutil
from dotenv import load_dotenv
from browser_use import Agent, ChatOpenAI
load_dotenv()
SCRIPT_DIR = pathlib.Path(os.path.dirname(os.path.abspath(__file__)))
agent_dir = SCRIPT_DIR / 'alphabet_earnings'
agent_dir.mkdir(exist_ok=True)
task = """
Go to https://abc.xyz/assets/cc/27/3ada14014efbadd7a58472f1f3f4/2025q2-alphabet-earnings-release.pdf.
Read the PDF and save 3 interesting data points in "alphabet_earnings.pdf" and share it with me!
""".strip('\n')
agent = Agent(
task=task,
llm=ChatOpenAI(model='o4-mini'),
file_system_path=str(agent_dir / 'fs'),
flash_mode=True,
)
async def main():
await agent.run()
input(f'Press Enter to clean the file system at {agent_dir}...')
# clean the file system
shutil.rmtree(str(agent_dir / 'fs'))
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/file_system/excel_sheet.py | examples/file_system/excel_sheet.py | import asyncio
import os
import sys
from browser_use.llm.openai.chat import ChatOpenAI
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent
llm = ChatOpenAI(model='o4-mini')
task = (
'Find current stock price of companies Meta and Amazon. Then, make me a CSV file with 2 columns: company name, stock price.'
)
agent = Agent(task=task, llm=llm)
async def main():
import time
start_time = time.time()
history = await agent.run()
# token usage
print(history.usage)
end_time = time.time()
print(f'Time taken: {end_time - start_time} seconds')
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/code_agent/extract_products.py | examples/code_agent/extract_products.py | """
Example: Using code-use mode to extract products from multiple pages.
This example demonstrates the new code-use mode, which works like a Jupyter notebook
where the LLM writes Python code that gets executed in a persistent namespace.
The agent can:
- Navigate to pages
- Extract data using JavaScript
- Combine results from multiple pages
- Save data to files
- Export the session as a Jupyter notebook
This solves the problem from the brainstorm where extraction of multiple items
was difficult with the extract tool alone.
"""
import asyncio
from lmnr import Laminar
from browser_use.code_use import CodeAgent
Laminar.initialize()
async def main():
task = """
Go to https://www.flipkart.com. Continue collecting products from Flipkart in the following categories. I need approximately 50 products from:\n\n1. Books & Media (books, stationery) - 15 products\n2. Sports & Fitness (equipment, clothing, accessories) - 15 products \n3. Beauty & Personal Care (cosmetics, skincare, grooming) - 10 products\nAnd 2 other categories you find interesting.\nNavigate to these categories and collect products with:\n- Product URL (working link)\n- Product name/description\n- Actual price (MRP)\n- Deal price (current selling price) \n- Discount percentage\n\nFocus on products with good discounts and clear pricing. Target around 40 products total from these three categories.
"""
# Create code-use agent (uses ChatBrowserUse automatically)
agent = CodeAgent(
task=task,
max_steps=30,
)
try:
# Run the agent
print('Running code-use agent...')
session = await agent.run()
finally:
await agent.close()
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/code_agent/filter_webvoyager_dataset.py | examples/code_agent/filter_webvoyager_dataset.py | import asyncio
from browser_use.code_use import CodeAgent
async def main():
task = """
Find the WebVoyager dataset, download it and create a new version where you remove all tasks which have older dates than today.
"""
# Create code-use agent
agent = CodeAgent(
task=task,
max_steps=25,
)
try:
# Run the agent
print('Running code-use agent to filter WebVoyager dataset...')
session = await agent.run()
finally:
await agent.close()
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/sandbox/example.py | examples/sandbox/example.py | """Example of using sandbox execution with Browser-Use Agent
This example demonstrates how to use the @sandbox decorator to run
browser automation tasks with the Agent in a sandbox environment.
To run this example:
1. Set your BROWSER_USE_API_KEY environment variable
2. Set your LLM API key (OPENAI_API_KEY, ANTHROPIC_API_KEY, etc.)
3. Run: python examples/sandbox_execution.py
"""
import asyncio
import os
from browser_use import Browser, ChatBrowserUse, sandbox
from browser_use.agent.service import Agent
# Example with event callbacks to monitor execution
def on_browser_ready(data):
"""Callback when browser session is created"""
print('\n🌐 Browser session created!')
print(f' Session ID: {data.session_id}')
print(f' Live view: {data.live_url}')
print(' Click the link above to watch the AI agent work!\n')
@sandbox(
log_level='INFO',
on_browser_created=on_browser_ready,
# server_url='http://localhost:8080/sandbox-stream',
# cloud_profile_id='21182245-590f-4712-8888-9611651a024c',
# cloud_proxy_country_code='us',
# cloud_timeout=60,
)
async def pydantic_example(browser: Browser):
agent = Agent(
"""go and check my ip address and the location. return the result in json format""",
browser=browser,
llm=ChatBrowserUse(),
)
res = await agent.run()
return res.final_result()
async def main():
"""Run examples"""
# Check if API keys are set
if not os.getenv('BROWSER_USE_API_KEY'):
print('❌ Please set BROWSER_USE_API_KEY environment variable')
return
print('\n\n=== Search with AI Agent (with live browser view) ===')
search_result = await pydantic_example()
print('\nResults:')
print(search_result)
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/sandbox/structured_output.py | examples/sandbox/structured_output.py | """Example of using structured output with sandbox execution
To run:
export BROWSER_USE_API_KEY=your_key
python examples/sandbox/structured_output.py
"""
import asyncio
import os
from pydantic import BaseModel, Field
from browser_use import Agent, Browser, ChatBrowserUse, sandbox
from browser_use.agent.views import AgentHistoryList
class IPLocation(BaseModel):
"""Structured output for IP location data"""
ip_address: str = Field(description='The public IP address')
country: str = Field(description='Country name')
city: str | None = Field(default=None, description='City name if available')
region: str | None = Field(default=None, description='Region/state if available')
@sandbox(log_level='INFO')
async def get_ip_location(browser: Browser) -> AgentHistoryList:
"""Get IP location using sandbox"""
agent = Agent(
task='Go to ipinfo.io and extract my IP address and location details (country, city, region)',
browser=browser,
llm=ChatBrowserUse(),
output_model_schema=IPLocation,
)
return await agent.run(max_steps=10)
async def main():
if not os.getenv('BROWSER_USE_API_KEY'):
print('❌ Please set BROWSER_USE_API_KEY environment variable')
print(' Get a key at: https://cloud.browser-use.com/new-api-key')
return
result = await get_ip_location()
location = result.get_structured_output(IPLocation)
if location:
print(f'IP: {location.ip_address}')
print(f'Country: {location.country}')
print(f'City: {location.city or "N/A"}')
print(f'Region: {location.region or "N/A"}')
else:
print(f'No structured output. Final result: {result.final_result()}')
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/cloud/02_fast_mode_gemini.py | examples/cloud/02_fast_mode_gemini.py | """
Cloud Example 2: Ultra-Fast Mode with Gemini Flash ⚡
====================================================
This example demonstrates the fastest and most cost-effective configuration:
- Gemini 2.5 Flash model ($0.01 per step)
- No proxy (faster execution, but no captcha solving)
- No element highlighting (better performance)
- Optimized viewport size
- Maximum speed configuration
Perfect for: Quick content generation, humor tasks, fast web scraping
Cost: ~$0.03 (1 task + 2-3 steps with Gemini Flash)
Speed: 2-3x faster than default configuration
Fun Factor: 💯 (Creates hilarious tech commentary)
"""
import argparse
import os
import time
from typing import Any
import requests
from requests.exceptions import RequestException
# Configuration
API_KEY = os.getenv('BROWSER_USE_API_KEY')
if not API_KEY:
raise ValueError(
'Please set BROWSER_USE_API_KEY environment variable. You can also create an API key at https://cloud.browser-use.com/new-api-key'
)
BASE_URL = os.getenv('BROWSER_USE_BASE_URL', 'https://api.browser-use.com/api/v1')
TIMEOUT = int(os.getenv('BROWSER_USE_TIMEOUT', '30'))
HEADERS = {'Authorization': f'Bearer {API_KEY}', 'Content-Type': 'application/json'}
def _request_with_retry(method: str, url: str, **kwargs) -> requests.Response:
"""Make HTTP request with timeout and retry logic."""
kwargs.setdefault('timeout', TIMEOUT)
for attempt in range(3):
try:
response = requests.request(method, url, **kwargs)
response.raise_for_status()
return response
except RequestException as e:
if attempt == 2: # Last attempt
raise
sleep_time = 2**attempt
print(f'⚠️ Request failed (attempt {attempt + 1}/3), retrying in {sleep_time}s: {e}')
time.sleep(sleep_time)
raise RuntimeError('Unexpected error in retry logic')
def create_fast_task(instructions: str) -> str:
"""
Create a browser automation task optimized for speed and cost.
Args:
instructions: Natural language description of what the agent should do
Returns:
task_id: Unique identifier for the created task
"""
print(f'⚡ Creating FAST task: {instructions}')
# Ultra-fast configuration
payload = {
'task': instructions,
# Model: Fastest and cheapest
'llm_model': 'gemini-2.5-flash',
# Performance optimizations
'use_proxy': False, # No proxy = faster execution
'highlight_elements': False, # No highlighting = better performance
'use_adblock': True, # Block ads for faster loading
# Viewport optimization (smaller = faster)
'browser_viewport_width': 1024,
'browser_viewport_height': 768,
# Cost control
'max_agent_steps': 25, # Reasonable limit for fast tasks
# Enable sharing for viewing execution
'enable_public_share': True, # Get shareable URLs
# Optional: Speed up with domain restrictions
# "allowed_domains": ["google.com", "*.google.com"]
}
response = _request_with_retry('post', f'{BASE_URL}/run-task', headers=HEADERS, json=payload)
task_id = response.json()['id']
print(f'✅ Fast task created with ID: {task_id}')
print('⚡ Configuration: Gemini Flash + No Proxy + No Highlighting')
return task_id
def monitor_fast_task(task_id: str) -> dict[str, Any]:
"""
Monitor task with optimized polling for fast execution.
Args:
task_id: The task to monitor
Returns:
Complete task details with output
"""
print(f'🚀 Fast monitoring task {task_id}...')
start_time = time.time()
step_count = 0
last_step_time = start_time
# Faster polling for quick tasks
poll_interval = 1 # Check every second for fast tasks
while True:
response = _request_with_retry('get', f'{BASE_URL}/task/{task_id}', headers=HEADERS)
details = response.json()
status = details['status']
# Show progress with timing
current_steps = len(details.get('steps', []))
elapsed = time.time() - start_time
# Build status message
if current_steps > step_count:
step_time = time.time() - last_step_time
last_step_time = time.time()
step_count = current_steps
step_msg = f'🔥 Step {current_steps} | ⚡ {step_time:.1f}s | Total: {elapsed:.1f}s'
else:
if status == 'running':
step_msg = f'🚀 Step {current_steps} | ⏱️ {elapsed:.1f}s | Fast processing...'
else:
step_msg = f'🚀 Step {current_steps} | ⏱️ {elapsed:.1f}s | Status: {status}'
# Clear line and show progress
print(f'\r{step_msg:<80}', end='', flush=True)
# Check completion
if status == 'finished':
total_time = time.time() - start_time
if current_steps > 0:
avg_msg = f'⚡ Average: {total_time / current_steps:.1f}s per step'
else:
avg_msg = '⚡ No steps recorded'
print(f'\r🏁 Task completed in {total_time:.1f}s! {avg_msg}' + ' ' * 20)
return details
elif status in ['failed', 'stopped']:
print(f'\r❌ Task {status} after {elapsed:.1f}s' + ' ' * 30)
return details
time.sleep(poll_interval)
def run_speed_comparison():
"""Run multiple tasks to compare speed vs accuracy."""
print('\n🏃♂️ Speed Comparison Demo')
print('=' * 40)
tasks = [
'Go to ProductHunt and roast the top product like a sarcastic tech reviewer',
'Visit Reddit r/ProgrammerHumor and summarize the top post as a dramatic news story',
"Check GitHub trending and write a conspiracy theory about why everyone's switching to Rust",
]
results = []
for i, task in enumerate(tasks, 1):
print(f'\n📝 Fast Task {i}/{len(tasks)}')
print(f'Task: {task}')
start = time.time()
task_id = create_fast_task(task)
result = monitor_fast_task(task_id)
end = time.time()
results.append(
{
'task': task,
'duration': end - start,
'steps': len(result.get('steps', [])),
'status': result['status'],
'output': result.get('output', '')[:100] + '...' if result.get('output') else 'No output',
}
)
# Summary
print('\n📊 Speed Summary')
print('=' * 50)
total_time = sum(r['duration'] for r in results)
total_steps = sum(r['steps'] for r in results)
for i, result in enumerate(results, 1):
print(f'Task {i}: {result["duration"]:.1f}s ({result["steps"]} steps) - {result["status"]}')
print(f'\n⚡ Total time: {total_time:.1f}s')
print(f'🔥 Average per task: {total_time / len(results):.1f}s')
if total_steps > 0:
print(f'💨 Average per step: {total_time / total_steps:.1f}s')
else:
print('💨 Average per step: N/A (no steps recorded)')
def main():
"""Demonstrate ultra-fast cloud automation."""
print('⚡ Browser Use Cloud - Ultra-Fast Mode with Gemini Flash')
print('=' * 60)
print('🎯 Configuration Benefits:')
print('• Gemini Flash: $0.01 per step (cheapest)')
print('• No proxy: 30% faster execution')
print('• No highlighting: Better performance')
print('• Optimized viewport: Faster rendering')
try:
# Single fast task
print('\n🚀 Single Fast Task Demo')
print('-' * 30)
task = """
Go to Hacker News (news.ycombinator.com) and get the top 3 articles from the front page.
Then, write a funny tech news segment in the style of Fireship YouTube channel:
- Be sarcastic and witty about tech trends
- Use developer humor and memes
- Make fun of common programming struggles
- Include phrases like "And yes, it runs on JavaScript" or "Plot twist: it's written in Rust"
- Keep it under 250 words but make it entertaining
- Structure it like a news anchor delivering breaking tech news
Make each story sound dramatic but also hilarious, like you're reporting on the most important events in human history.
"""
task_id = create_fast_task(task)
result = monitor_fast_task(task_id)
print(f'\n📊 Result: {result.get("output", "No output")}')
# Show execution URLs
if result.get('live_url'):
print(f'\n🔗 Live Preview: {result["live_url"]}')
if result.get('public_share_url'):
print(f'🌐 Share URL: {result["public_share_url"]}')
elif result.get('share_url'):
print(f'🌐 Share URL: {result["share_url"]}')
# Optional: Run speed comparison with --compare flag
parser = argparse.ArgumentParser(description='Fast mode demo with Gemini Flash')
parser.add_argument('--compare', action='store_true', help='Run speed comparison with 3 tasks')
args = parser.parse_args()
if args.compare:
print('\n🏃♂️ Running speed comparison...')
run_speed_comparison()
except requests.exceptions.RequestException as e:
print(f'❌ API Error: {e}')
except Exception as e:
print(f'❌ Error: {e}')
if __name__ == '__main__':
main()
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/cloud/04_proxy_usage.py | examples/cloud/04_proxy_usage.py | """
Cloud Example 4: Proxy Usage 🌍
===============================
This example demonstrates reliable proxy usage scenarios:
- Different country proxies for geo-restrictions
- IP address and location verification
- Region-specific content access (streaming, news)
- Search result localization by country
- Mobile/residential proxy benefits
Perfect for: Geo-restricted content, location testing, regional analysis
Cost: ~$0.08 (1 task + 6-8 steps with proxy enabled)
"""
import argparse
import os
import time
from typing import Any
import requests
from requests.exceptions import RequestException
# Configuration
API_KEY = os.getenv('BROWSER_USE_API_KEY')
if not API_KEY:
raise ValueError(
'Please set BROWSER_USE_API_KEY environment variable. You can also create an API key at https://cloud.browser-use.com/new-api-key'
)
BASE_URL = os.getenv('BROWSER_USE_BASE_URL', 'https://api.browser-use.com/api/v1')
TIMEOUT = int(os.getenv('BROWSER_USE_TIMEOUT', '30'))
HEADERS = {'Authorization': f'Bearer {API_KEY}', 'Content-Type': 'application/json'}
def _request_with_retry(method: str, url: str, **kwargs) -> requests.Response:
"""Make HTTP request with timeout and retry logic."""
kwargs.setdefault('timeout', TIMEOUT)
for attempt in range(3):
try:
response = requests.request(method, url, **kwargs)
response.raise_for_status()
return response
except RequestException as e:
if attempt == 2: # Last attempt
raise
sleep_time = 2**attempt
print(f'⚠️ Request failed (attempt {attempt + 1}/3), retrying in {sleep_time}s: {e}')
time.sleep(sleep_time)
raise RuntimeError('Unexpected error in retry logic')
def create_task_with_proxy(instructions: str, country_code: str = 'us') -> str:
"""
Create a task with proxy enabled from a specific country.
Args:
instructions: Task description
country_code: Proxy country ('us', 'fr', 'it', 'jp', 'au', 'de', 'fi', 'ca')
Returns:
task_id: Unique identifier for the created task
"""
print(f'🌍 Creating task with {country_code.upper()} proxy')
print(f'📝 Task: {instructions}')
payload = {
'task': instructions,
'llm_model': 'gpt-4.1-mini',
# Proxy configuration
'use_proxy': True, # Required for captcha solving
'proxy_country_code': country_code, # Choose proxy location
# Standard settings
'use_adblock': True, # Block ads for faster loading
'highlight_elements': True, # Keep highlighting for visibility
'max_agent_steps': 15,
# Enable sharing for viewing execution
'enable_public_share': True, # Get shareable URLs
}
response = _request_with_retry('post', f'{BASE_URL}/run-task', headers=HEADERS, json=payload)
task_id = response.json()['id']
print(f'✅ Task created with {country_code.upper()} proxy: {task_id}')
return task_id
def test_ip_location(country_code: str) -> dict[str, Any]:
"""Test IP address and location detection with proxy."""
task = """
Go to whatismyipaddress.com and tell me:
1. The detected IP address
2. The detected country/location
3. The ISP/organization
4. Any other location details shown
Please be specific about what you see on the page.
"""
task_id = create_task_with_proxy(task, country_code)
return wait_for_completion(task_id)
def test_geo_restricted_content(country_code: str) -> dict[str, Any]:
"""Test access to geo-restricted content."""
task = """
Go to a major news website (like BBC, CNN, or local news) and check:
1. What content is available
2. Any geo-restriction messages
3. Local/regional content differences
4. Language or currency preferences shown
Note any differences from what you might expect.
"""
task_id = create_task_with_proxy(task, country_code)
return wait_for_completion(task_id)
def test_streaming_service_access(country_code: str) -> dict[str, Any]:
"""Test access to region-specific streaming content."""
task = """
Go to a major streaming service website (like Netflix, YouTube, or BBC iPlayer)
and check what content or messaging appears.
Report:
1. What homepage content is shown
2. Any geo-restriction messages or content differences
3. Available content regions or language options
4. Any pricing or availability differences
Note: Don't try to log in, just observe the publicly available content.
"""
task_id = create_task_with_proxy(task, country_code)
return wait_for_completion(task_id)
def test_search_results_by_location(country_code: str) -> dict[str, Any]:
"""Test how search results vary by location."""
task = """
Go to Google and search for "best restaurants near me" or "local news".
Report:
1. What local results appear
2. The detected location in search results
3. Any location-specific content or ads
4. Language preferences
This will show how search results change based on proxy location.
"""
task_id = create_task_with_proxy(task, country_code)
return wait_for_completion(task_id)
def wait_for_completion(task_id: str) -> dict[str, Any]:
"""Wait for task completion and return results."""
print(f'⏳ Waiting for task {task_id} to complete...')
start_time = time.time()
while True:
response = _request_with_retry('get', f'{BASE_URL}/task/{task_id}', headers=HEADERS)
details = response.json()
status = details['status']
steps = len(details.get('steps', []))
elapsed = time.time() - start_time
# Build status message
if status == 'running':
status_msg = f'🌍 Proxy task | Step {steps} | ⏱️ {elapsed:.0f}s | 🤖 Processing...'
else:
status_msg = f'🌍 Proxy task | Step {steps} | ⏱️ {elapsed:.0f}s | Status: {status}'
# Clear line and show status
print(f'\r{status_msg:<80}', end='', flush=True)
if status == 'finished':
print(f'\r✅ Task completed in {steps} steps! ({elapsed:.1f}s total)' + ' ' * 20)
return details
elif status in ['failed', 'stopped']:
print(f'\r❌ Task {status} after {steps} steps' + ' ' * 30)
return details
time.sleep(3)
def demo_proxy_countries():
"""Demonstrate proxy usage across different countries."""
print('\n🌍 Demo 1: Proxy Countries Comparison')
print('-' * 45)
countries = [('us', 'United States'), ('de', 'Germany'), ('jp', 'Japan'), ('au', 'Australia')]
results = {}
for code, name in countries:
print(f'\n🌍 Testing {name} ({code.upper()}) proxy:')
print('=' * 40)
result = test_ip_location(code)
results[code] = result
if result.get('output'):
print(f'📍 Location Result: {result["output"][:200]}...')
# Show execution URLs
if result.get('live_url'):
print(f'🔗 Live Preview: {result["live_url"]}')
if result.get('public_share_url'):
print(f'🌐 Share URL: {result["public_share_url"]}')
elif result.get('share_url'):
print(f'🌐 Share URL: {result["share_url"]}')
print('-' * 40)
time.sleep(2) # Brief pause between tests
# Summary comparison
print('\n📊 Proxy Location Summary:')
print('=' * 30)
for code, result in results.items():
status = result.get('status', 'unknown')
print(f'{code.upper()}: {status}')
def demo_geo_restrictions():
"""Demonstrate geo-restriction bypass."""
print('\n🚫 Demo 2: Geo-Restriction Testing')
print('-' * 40)
# Test from different locations
locations = [('us', 'US content'), ('de', 'European content')]
for code, description in locations:
print(f'\n🌍 Testing {description} with {code.upper()} proxy:')
result = test_geo_restricted_content(code)
if result.get('output'):
print(f'📰 Content Access: {result["output"][:200]}...')
time.sleep(2)
def demo_streaming_access():
"""Demonstrate streaming service access with different proxies."""
print('\n📺 Demo 3: Streaming Service Access')
print('-' * 40)
locations = [('us', 'US'), ('de', 'Germany')]
for code, name in locations:
print(f'\n🌍 Testing streaming access from {name}:')
result = test_streaming_service_access(code)
if result.get('output'):
print(f'📺 Access Result: {result["output"][:200]}...')
time.sleep(2)
def demo_search_localization():
"""Demonstrate search result localization."""
print('\n🔍 Demo 4: Search Localization')
print('-' * 35)
locations = [('us', 'US'), ('de', 'Germany')]
for code, name in locations:
print(f'\n🌍 Testing search results from {name}:')
result = test_search_results_by_location(code)
if result.get('output'):
print(f'🔍 Search Results: {result["output"][:200]}...')
time.sleep(2)
def main():
"""Demonstrate comprehensive proxy usage."""
print('🌍 Browser Use Cloud - Proxy Usage Examples')
print('=' * 50)
print('🎯 Proxy Benefits:')
print('• Bypass geo-restrictions')
print('• Test location-specific content')
print('• Access region-locked websites')
print('• Mobile/residential IP addresses')
print('• Verify IP geolocation')
print('\n🌐 Available Countries:')
countries = ['🇺🇸 US', '🇫🇷 France', '🇮🇹 Italy', '🇯🇵 Japan', '🇦🇺 Australia', '🇩🇪 Germany', '🇫🇮 Finland', '🇨🇦 Canada']
print(' • '.join(countries))
try:
# Parse command line arguments
parser = argparse.ArgumentParser(description='Proxy usage examples')
parser.add_argument(
'--demo', choices=['countries', 'geo', 'streaming', 'search', 'all'], default='countries', help='Which demo to run'
)
args = parser.parse_args()
print(f'\n🔍 Running {args.demo} demo(s)...')
if args.demo == 'countries':
demo_proxy_countries()
elif args.demo == 'geo':
demo_geo_restrictions()
elif args.demo == 'streaming':
demo_streaming_access()
elif args.demo == 'search':
demo_search_localization()
elif args.demo == 'all':
demo_proxy_countries()
demo_geo_restrictions()
demo_streaming_access()
demo_search_localization()
except requests.exceptions.RequestException as e:
print(f'❌ API Error: {e}')
except Exception as e:
print(f'❌ Error: {e}')
if __name__ == '__main__':
main()
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/cloud/05_search_api.py | examples/cloud/05_search_api.py | """
Cloud Example 5: Search API (Beta) 🔍
=====================================
This example demonstrates the Browser Use Search API (BETA):
- Simple search: Search Google and extract from multiple results
- URL search: Extract specific content from a target URL
- Deep navigation through websites (depth parameter)
- Real-time content extraction vs cached results
Perfect for: Content extraction, research, competitive analysis
"""
import argparse
import asyncio
import json
import os
import time
from typing import Any
import aiohttp
# Configuration
API_KEY = os.getenv('BROWSER_USE_API_KEY')
if not API_KEY:
raise ValueError(
'Please set BROWSER_USE_API_KEY environment variable. You can also create an API key at https://cloud.browser-use.com/new-api-key'
)
BASE_URL = os.getenv('BROWSER_USE_BASE_URL', 'https://api.browser-use.com/api/v1')
TIMEOUT = int(os.getenv('BROWSER_USE_TIMEOUT', '30'))
HEADERS = {'Authorization': f'Bearer {API_KEY}', 'Content-Type': 'application/json'}
async def simple_search(query: str, max_websites: int = 5, depth: int = 2) -> dict[str, Any]:
"""
Search Google and extract content from multiple top results.
Args:
query: Search query to process
max_websites: Number of websites to process (1-10)
depth: How deep to navigate (2-5)
Returns:
Dictionary with results from multiple websites
"""
# Validate input parameters
max_websites = max(1, min(max_websites, 10)) # Clamp to 1-10
depth = max(2, min(depth, 5)) # Clamp to 2-5
start_time = time.time()
print(f"🔍 Simple Search: '{query}'")
print(f'📊 Processing {max_websites} websites at depth {depth}')
print(f'💰 Estimated cost: {depth * max_websites}¢')
payload = {'query': query, 'max_websites': max_websites, 'depth': depth}
timeout = aiohttp.ClientTimeout(total=TIMEOUT)
connector = aiohttp.TCPConnector(limit=10) # Limit concurrent connections
async with aiohttp.ClientSession(timeout=timeout, connector=connector) as session:
async with session.post(f'{BASE_URL}/simple-search', json=payload, headers=HEADERS) as response:
elapsed = time.time() - start_time
if response.status == 200:
try:
result = await response.json()
print(f'✅ Found results from {len(result.get("results", []))} websites in {elapsed:.1f}s')
return result
except (aiohttp.ContentTypeError, json.JSONDecodeError) as e:
error_text = await response.text()
print(f'❌ Invalid JSON response: {e} (after {elapsed:.1f}s)')
return {'error': 'Invalid JSON', 'details': error_text}
else:
error_text = await response.text()
print(f'❌ Search failed: {response.status} - {error_text} (after {elapsed:.1f}s)')
return {'error': f'HTTP {response.status}', 'details': error_text}
async def search_url(url: str, query: str, depth: int = 2) -> dict[str, Any]:
"""
Extract specific content from a target URL.
Args:
url: Target URL to extract from
query: What specific content to look for
depth: How deep to navigate (2-5)
Returns:
Dictionary with extracted content
"""
# Validate input parameters
depth = max(2, min(depth, 5)) # Clamp to 2-5
start_time = time.time()
print(f'🎯 URL Search: {url}')
print(f"🔍 Looking for: '{query}'")
print(f'📊 Navigation depth: {depth}')
print(f'💰 Estimated cost: {depth}¢')
payload = {'url': url, 'query': query, 'depth': depth}
timeout = aiohttp.ClientTimeout(total=TIMEOUT)
connector = aiohttp.TCPConnector(limit=10) # Limit concurrent connections
async with aiohttp.ClientSession(timeout=timeout, connector=connector) as session:
async with session.post(f'{BASE_URL}/search-url', json=payload, headers=HEADERS) as response:
elapsed = time.time() - start_time
if response.status == 200:
try:
result = await response.json()
print(f'✅ Extracted content from {result.get("url", "website")} in {elapsed:.1f}s')
return result
except (aiohttp.ContentTypeError, json.JSONDecodeError) as e:
error_text = await response.text()
print(f'❌ Invalid JSON response: {e} (after {elapsed:.1f}s)')
return {'error': 'Invalid JSON', 'details': error_text}
else:
error_text = await response.text()
print(f'❌ URL search failed: {response.status} - {error_text} (after {elapsed:.1f}s)')
return {'error': f'HTTP {response.status}', 'details': error_text}
def display_simple_search_results(results: dict[str, Any]):
"""Display simple search results in a readable format."""
if 'error' in results:
print(f'❌ Error: {results["error"]}')
return
websites = results.get('results', [])
print(f'\n📋 Search Results ({len(websites)} websites)')
print('=' * 50)
for i, site in enumerate(websites, 1):
url = site.get('url', 'Unknown URL')
content = site.get('content', 'No content')
print(f'\n{i}. 🌐 {url}')
print('-' * 40)
# Show first 300 chars of content
if len(content) > 300:
print(f'{content[:300]}...')
print(f'[Content truncated - {len(content)} total characters]')
else:
print(content)
# Show execution URLs if available
if results.get('live_url'):
print(f'\n🔗 Live Preview: {results["live_url"]}')
if results.get('public_share_url'):
print(f'🌐 Share URL: {results["public_share_url"]}')
elif results.get('share_url'):
print(f'🌐 Share URL: {results["share_url"]}')
def display_url_search_results(results: dict[str, Any]):
"""Display URL search results in a readable format."""
if 'error' in results:
print(f'❌ Error: {results["error"]}')
return
url = results.get('url', 'Unknown URL')
content = results.get('content', 'No content')
print(f'\n📄 Extracted Content from: {url}')
print('=' * 60)
print(content)
# Show execution URLs if available
if results.get('live_url'):
print(f'\n🔗 Live Preview: {results["live_url"]}')
if results.get('public_share_url'):
print(f'🌐 Share URL: {results["public_share_url"]}')
elif results.get('share_url'):
print(f'🌐 Share URL: {results["share_url"]}')
async def demo_news_search():
"""Demo: Search for latest news across multiple sources."""
print('\n📰 Demo 1: Latest News Search')
print('-' * 35)
demo_start = time.time()
query = 'latest developments in artificial intelligence 2024'
results = await simple_search(query, max_websites=4, depth=2)
demo_elapsed = time.time() - demo_start
display_simple_search_results(results)
print(f'\n⏱️ Total demo time: {demo_elapsed:.1f}s')
return results
async def demo_competitive_analysis():
"""Demo: Analyze competitor websites."""
print('\n🏢 Demo 2: Competitive Analysis')
print('-' * 35)
query = 'browser automation tools comparison features pricing'
results = await simple_search(query, max_websites=3, depth=3)
display_simple_search_results(results)
return results
async def demo_deep_website_analysis():
"""Demo: Deep analysis of a specific website."""
print('\n🎯 Demo 3: Deep Website Analysis')
print('-' * 35)
demo_start = time.time()
url = 'https://docs.browser-use.com'
query = 'Browser Use features, pricing, and API capabilities'
results = await search_url(url, query, depth=3)
demo_elapsed = time.time() - demo_start
display_url_search_results(results)
print(f'\n⏱️ Total demo time: {demo_elapsed:.1f}s')
return results
async def demo_product_research():
"""Demo: Product research and comparison."""
print('\n🛍️ Demo 4: Product Research')
print('-' * 30)
query = 'best wireless headphones 2024 reviews comparison'
results = await simple_search(query, max_websites=5, depth=2)
display_simple_search_results(results)
return results
async def demo_real_time_vs_cached():
"""Demo: Show difference between real-time and cached results."""
print('\n⚡ Demo 5: Real-time vs Cached Data')
print('-' * 40)
print('🔄 Browser Use Search API benefits:')
print('• Actually browses websites like a human')
print('• Gets live, current data (not cached)')
print('• Navigates deep into sites via clicks')
print('• Handles JavaScript and dynamic content')
print('• Accesses pages requiring navigation')
# Example with live data
query = 'current Bitcoin price USD live'
results = await simple_search(query, max_websites=3, depth=2)
print('\n💰 Live Bitcoin Price Search Results:')
display_simple_search_results(results)
return results
async def demo_search_depth_comparison():
"""Demo: Compare different search depths."""
print('\n📊 Demo 6: Search Depth Comparison')
print('-' * 40)
url = 'https://news.ycombinator.com'
query = 'trending technology discussions'
depths = [2, 3, 4]
results = {}
for depth in depths:
print(f'\n🔍 Testing depth {depth}:')
result = await search_url(url, query, depth)
results[depth] = result
if 'content' in result:
content_length = len(result['content'])
print(f'📏 Content length: {content_length} characters')
# Brief pause between requests
await asyncio.sleep(1)
# Summary
print('\n📊 Depth Comparison Summary:')
print('-' * 30)
for depth, result in results.items():
if 'content' in result:
length = len(result['content'])
print(f'Depth {depth}: {length} characters')
else:
print(f'Depth {depth}: Error or no content')
return results
async def main():
"""Demonstrate comprehensive Search API usage."""
print('🔍 Browser Use Cloud - Search API (BETA)')
print('=' * 45)
print('⚠️ Note: This API is in BETA and may change')
print()
print('🎯 Search API Features:')
print('• Real-time website browsing (not cached)')
print('• Deep navigation through multiple pages')
print('• Dynamic content and JavaScript handling')
print('• Multiple result aggregation')
print('• Cost-effective content extraction')
print('\n💰 Pricing:')
print('• Simple Search: 1¢ × depth × websites')
print('• URL Search: 1¢ × depth')
print('• Example: depth=2, 5 websites = 10¢')
try:
# Parse command line arguments
parser = argparse.ArgumentParser(description='Search API (BETA) examples')
parser.add_argument(
'--demo',
choices=['news', 'competitive', 'deep', 'product', 'realtime', 'depth', 'all'],
default='news',
help='Which demo to run',
)
args = parser.parse_args()
print(f'\n🔍 Running {args.demo} demo(s)...')
if args.demo == 'news':
await demo_news_search()
elif args.demo == 'competitive':
await demo_competitive_analysis()
elif args.demo == 'deep':
await demo_deep_website_analysis()
elif args.demo == 'product':
await demo_product_research()
elif args.demo == 'realtime':
await demo_real_time_vs_cached()
elif args.demo == 'depth':
await demo_search_depth_comparison()
elif args.demo == 'all':
await demo_news_search()
await demo_competitive_analysis()
await demo_deep_website_analysis()
await demo_product_research()
await demo_real_time_vs_cached()
await demo_search_depth_comparison()
except aiohttp.ClientError as e:
print(f'❌ Network Error: {e}')
except Exception as e:
print(f'❌ Error: {e}')
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/cloud/01_basic_task.py | examples/cloud/01_basic_task.py | """
Cloud Example 1: Your First Browser Use Cloud Task
==================================================
This example demonstrates the most basic Browser Use Cloud functionality:
- Create a simple automation task
- Get the task ID
- Monitor completion
- Retrieve results
Perfect for first-time cloud users to understand the API basics.
Cost: ~$0.04 (1 task + 3 steps with GPT-4.1 mini)
"""
import os
import time
from typing import Any
import requests
from requests.exceptions import RequestException
# Configuration
API_KEY = os.getenv('BROWSER_USE_API_KEY')
if not API_KEY:
raise ValueError(
'Please set BROWSER_USE_API_KEY environment variable. You can also create an API key at https://cloud.browser-use.com/new-api-key'
)
BASE_URL = os.getenv('BROWSER_USE_BASE_URL', 'https://api.browser-use.com/api/v1')
TIMEOUT = int(os.getenv('BROWSER_USE_TIMEOUT', '30'))
HEADERS = {'Authorization': f'Bearer {API_KEY}', 'Content-Type': 'application/json'}
def _request_with_retry(method: str, url: str, **kwargs) -> requests.Response:
"""Make HTTP request with timeout and retry logic."""
kwargs.setdefault('timeout', TIMEOUT)
for attempt in range(3):
try:
response = requests.request(method, url, **kwargs)
response.raise_for_status()
return response
except RequestException as e:
if attempt == 2: # Last attempt
raise
sleep_time = 2**attempt
print(f'⚠️ Request failed (attempt {attempt + 1}/3), retrying in {sleep_time}s: {e}')
time.sleep(sleep_time)
# This line should never be reached, but satisfies type checker
raise RuntimeError('Unexpected error in retry logic')
def create_task(instructions: str) -> str:
"""
Create a new browser automation task.
Args:
instructions: Natural language description of what the agent should do
Returns:
task_id: Unique identifier for the created task
"""
print(f'📝 Creating task: {instructions}')
payload = {
'task': instructions,
'llm_model': 'gpt-4.1-mini', # Cost-effective model
'max_agent_steps': 10, # Prevent runaway costs
'enable_public_share': True, # Enable shareable execution URLs
}
response = _request_with_retry('post', f'{BASE_URL}/run-task', headers=HEADERS, json=payload)
task_id = response.json()['id']
print(f'✅ Task created with ID: {task_id}')
return task_id
def get_task_status(task_id: str) -> dict[str, Any]:
"""Get the current status of a task."""
response = _request_with_retry('get', f'{BASE_URL}/task/{task_id}/status', headers=HEADERS)
return response.json()
def get_task_details(task_id: str) -> dict[str, Any]:
"""Get full task details including steps and output."""
response = _request_with_retry('get', f'{BASE_URL}/task/{task_id}', headers=HEADERS)
return response.json()
def wait_for_completion(task_id: str, poll_interval: int = 3) -> dict[str, Any]:
"""
Wait for task completion and show progress.
Args:
task_id: The task to monitor
poll_interval: How often to check status (seconds)
Returns:
Complete task details with output
"""
print(f'⏳ Monitoring task {task_id}...')
step_count = 0
start_time = time.time()
while True:
details = get_task_details(task_id)
status = details['status']
current_steps = len(details.get('steps', []))
elapsed = time.time() - start_time
# Clear line and show current progress
if current_steps > step_count:
step_count = current_steps
# Build status message
if status == 'running':
if current_steps > 0:
status_msg = f'🔄 Step {current_steps} | ⏱️ {elapsed:.0f}s | 🤖 Agent working...'
else:
status_msg = f'🤖 Agent starting... | ⏱️ {elapsed:.0f}s'
else:
status_msg = f'🔄 Step {current_steps} | ⏱️ {elapsed:.0f}s | Status: {status}'
# Clear line and print status
print(f'\r{status_msg:<80}', end='', flush=True)
# Check if finished
if status == 'finished':
print(f'\r✅ Task completed successfully! ({current_steps} steps in {elapsed:.1f}s)' + ' ' * 20)
return details
elif status in ['failed', 'stopped']:
print(f'\r❌ Task {status} after {current_steps} steps' + ' ' * 30)
return details
time.sleep(poll_interval)
def main():
"""Run a basic cloud automation task."""
print('🚀 Browser Use Cloud - Basic Task Example')
print('=' * 50)
# Define a simple search task (using DuckDuckGo to avoid captchas)
task_description = (
"Go to DuckDuckGo and search for 'browser automation tools'. Tell me the top 3 results with their titles and URLs."
)
try:
# Step 1: Create the task
task_id = create_task(task_description)
# Step 2: Wait for completion
result = wait_for_completion(task_id)
# Step 3: Display results
print('\n📊 Results:')
print('-' * 30)
print(f'Status: {result["status"]}')
print(f'Steps taken: {len(result.get("steps", []))}')
if result.get('output'):
print(f'Output: {result["output"]}')
else:
print('No output available')
# Show share URLs for viewing execution
if result.get('live_url'):
print(f'\n🔗 Live Preview: {result["live_url"]}')
if result.get('public_share_url'):
print(f'🌐 Share URL: {result["public_share_url"]}')
elif result.get('share_url'):
print(f'🌐 Share URL: {result["share_url"]}')
if not result.get('live_url') and not result.get('public_share_url') and not result.get('share_url'):
print("\n💡 Tip: Add 'enable_public_share': True to task payload to get shareable URLs")
except requests.exceptions.RequestException as e:
print(f'❌ API Error: {e}')
except Exception as e:
print(f'❌ Error: {e}')
if __name__ == '__main__':
main()
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/cloud/03_structured_output.py | examples/cloud/03_structured_output.py | """
Cloud Example 3: Structured JSON Output 📋
==========================================
This example demonstrates how to get structured, validated JSON output:
- Define Pydantic schemas for type safety
- Extract structured data from websites
- Validate and parse JSON responses
- Handle different data types and nested structures
Perfect for: Data extraction, API integration, structured analysis
Cost: ~$0.06 (1 task + 5-6 steps with GPT-4.1 mini)
"""
import argparse
import json
import os
import time
from typing import Any
import requests
from pydantic import BaseModel, Field, ValidationError
from requests.exceptions import RequestException
# Configuration
API_KEY = os.getenv('BROWSER_USE_API_KEY')
if not API_KEY:
raise ValueError(
'Please set BROWSER_USE_API_KEY environment variable. You can also create an API key at https://cloud.browser-use.com/new-api-key'
)
BASE_URL = os.getenv('BROWSER_USE_BASE_URL', 'https://api.browser-use.com/api/v1')
TIMEOUT = int(os.getenv('BROWSER_USE_TIMEOUT', '30'))
HEADERS = {'Authorization': f'Bearer {API_KEY}', 'Content-Type': 'application/json'}
def _request_with_retry(method: str, url: str, **kwargs) -> requests.Response:
"""Make HTTP request with timeout and retry logic."""
kwargs.setdefault('timeout', TIMEOUT)
for attempt in range(3):
try:
response = requests.request(method, url, **kwargs)
response.raise_for_status()
return response
except RequestException as e:
if attempt == 2: # Last attempt
raise
sleep_time = 2**attempt
print(f'⚠️ Request failed (attempt {attempt + 1}/3), retrying in {sleep_time}s: {e}')
time.sleep(sleep_time)
raise RuntimeError('Unexpected error in retry logic')
# Define structured output schemas using Pydantic
class NewsArticle(BaseModel):
"""Schema for a news article."""
title: str = Field(description='The headline of the article')
summary: str = Field(description='Brief summary of the article')
url: str = Field(description='Direct link to the article')
published_date: str | None = Field(description='Publication date if available')
category: str | None = Field(description='Article category/section')
class NewsResponse(BaseModel):
"""Schema for multiple news articles."""
articles: list[NewsArticle] = Field(description='List of news articles')
source_website: str = Field(description='The website where articles were found')
extracted_at: str = Field(description='When the data was extracted')
class ProductInfo(BaseModel):
"""Schema for product information."""
name: str = Field(description='Product name')
price: float = Field(description='Product price in USD')
rating: float | None = Field(description='Average rating (0-5 scale)')
availability: str = Field(description='Stock status (in stock, out of stock, etc.)')
description: str = Field(description='Product description')
class CompanyInfo(BaseModel):
"""Schema for company information."""
name: str = Field(description='Company name')
stock_symbol: str | None = Field(description='Stock ticker symbol')
market_cap: str | None = Field(description='Market capitalization')
industry: str = Field(description='Primary industry')
headquarters: str = Field(description='Headquarters location')
founded_year: int | None = Field(description='Year founded')
def create_structured_task(instructions: str, schema_model: type[BaseModel], **kwargs) -> str:
"""
Create a task that returns structured JSON output.
Args:
instructions: Task description
schema_model: Pydantic model defining the expected output structure
**kwargs: Additional task parameters
Returns:
task_id: Unique identifier for the created task
"""
print(f'📝 Creating structured task: {instructions}')
print(f'🏗️ Expected schema: {schema_model.__name__}')
# Generate JSON schema from Pydantic model
json_schema = schema_model.model_json_schema()
payload = {
'task': instructions,
'structured_output_json': json.dumps(json_schema),
'llm_model': 'gpt-4.1-mini',
'max_agent_steps': 15,
'enable_public_share': True, # Enable shareable execution URLs
**kwargs,
}
response = _request_with_retry('post', f'{BASE_URL}/run-task', headers=HEADERS, json=payload)
task_id = response.json()['id']
print(f'✅ Structured task created: {task_id}')
return task_id
def wait_for_structured_completion(task_id: str, max_wait_time: int = 300) -> dict[str, Any]:
"""Wait for task completion and return the result."""
print(f'⏳ Waiting for structured output (max {max_wait_time}s)...')
start_time = time.time()
while True:
response = _request_with_retry('get', f'{BASE_URL}/task/{task_id}/status', headers=HEADERS)
status = response.json()
elapsed = time.time() - start_time
# Check for timeout
if elapsed > max_wait_time:
print(f'\r⏰ Task timeout after {max_wait_time}s - stopping wait' + ' ' * 30)
# Get final details before timeout
details_response = _request_with_retry('get', f'{BASE_URL}/task/{task_id}', headers=HEADERS)
details = details_response.json()
return details
# Get step count from full details for better progress tracking
details_response = _request_with_retry('get', f'{BASE_URL}/task/{task_id}', headers=HEADERS)
details = details_response.json()
steps = len(details.get('steps', []))
# Build status message
if status == 'running':
status_msg = f'📋 Structured task | Step {steps} | ⏱️ {elapsed:.0f}s | 🔄 Extracting...'
else:
status_msg = f'📋 Structured task | Step {steps} | ⏱️ {elapsed:.0f}s | Status: {status}'
# Clear line and show status
print(f'\r{status_msg:<80}', end='', flush=True)
if status == 'finished':
print(f'\r✅ Structured data extracted! ({steps} steps in {elapsed:.1f}s)' + ' ' * 20)
return details
elif status in ['failed', 'stopped']:
print(f'\r❌ Task {status} after {steps} steps' + ' ' * 30)
return details
time.sleep(3)
def validate_and_display_output(output: str, schema_model: type[BaseModel]):
"""
Validate the JSON output against the schema and display results.
Args:
output: Raw JSON string from the task
schema_model: Pydantic model for validation
"""
print('\n📊 Structured Output Analysis')
print('=' * 40)
try:
# Parse and validate the JSON
parsed_data = schema_model.model_validate_json(output)
print('✅ JSON validation successful!')
# Pretty print the structured data
print('\n📋 Parsed Data:')
print('-' * 20)
print(parsed_data.model_dump_json(indent=2))
# Display specific fields based on model type
if isinstance(parsed_data, NewsResponse):
print(f'\n📰 Found {len(parsed_data.articles)} articles from {parsed_data.source_website}')
for i, article in enumerate(parsed_data.articles[:3], 1):
print(f'\n{i}. {article.title}')
print(f' Summary: {article.summary[:100]}...')
print(f' URL: {article.url}')
elif isinstance(parsed_data, ProductInfo):
print(f'\n🛍️ Product: {parsed_data.name}')
print(f' Price: ${parsed_data.price}')
print(f' Rating: {parsed_data.rating}/5' if parsed_data.rating else ' Rating: N/A')
print(f' Status: {parsed_data.availability}')
elif isinstance(parsed_data, CompanyInfo):
print(f'\n🏢 Company: {parsed_data.name}')
print(f' Industry: {parsed_data.industry}')
print(f' Headquarters: {parsed_data.headquarters}')
if parsed_data.founded_year:
print(f' Founded: {parsed_data.founded_year}')
return parsed_data
except ValidationError as e:
print('❌ JSON validation failed!')
print(f'Errors: {e}')
print(f'\nRaw output: {output[:500]}...')
return None
except json.JSONDecodeError as e:
print('❌ Invalid JSON format!')
print(f'Error: {e}')
print(f'\nRaw output: {output[:500]}...')
return None
def demo_news_extraction():
"""Demo: Extract structured news data."""
print('\n📰 Demo 1: News Article Extraction')
print('-' * 40)
task = """
Go to a major news website (like BBC, CNN, or Reuters) and extract information
about the top 3 news articles. For each article, get the title, summary, URL,
and any other available metadata.
"""
task_id = create_structured_task(task, NewsResponse)
result = wait_for_structured_completion(task_id)
if result.get('output'):
parsed_result = validate_and_display_output(result['output'], NewsResponse)
# Show execution URLs
if result.get('live_url'):
print(f'\n🔗 Live Preview: {result["live_url"]}')
if result.get('public_share_url'):
print(f'🌐 Share URL: {result["public_share_url"]}')
elif result.get('share_url'):
print(f'🌐 Share URL: {result["share_url"]}')
return parsed_result
else:
print('❌ No structured output received')
return None
def demo_product_extraction():
"""Demo: Extract structured product data."""
print('\n🛍️ Demo 2: Product Information Extraction')
print('-' * 40)
task = """
Go to Amazon and search for 'wireless headphones'. Find the first product result
and extract detailed information including name, price, rating, availability,
and description.
"""
task_id = create_structured_task(task, ProductInfo)
result = wait_for_structured_completion(task_id)
if result.get('output'):
parsed_result = validate_and_display_output(result['output'], ProductInfo)
# Show execution URLs
if result.get('live_url'):
print(f'\n🔗 Live Preview: {result["live_url"]}')
if result.get('public_share_url'):
print(f'🌐 Share URL: {result["public_share_url"]}')
elif result.get('share_url'):
print(f'🌐 Share URL: {result["share_url"]}')
return parsed_result
else:
print('❌ No structured output received')
return None
def demo_company_extraction():
"""Demo: Extract structured company data."""
print('\n🏢 Demo 3: Company Information Extraction')
print('-' * 40)
task = """
Go to a financial website and look up information about Apple Inc.
Extract company details including name, stock symbol, market cap,
industry, headquarters, and founding year.
"""
task_id = create_structured_task(task, CompanyInfo)
result = wait_for_structured_completion(task_id)
if result.get('output'):
parsed_result = validate_and_display_output(result['output'], CompanyInfo)
# Show execution URLs
if result.get('live_url'):
print(f'\n🔗 Live Preview: {result["live_url"]}')
if result.get('public_share_url'):
print(f'🌐 Share URL: {result["public_share_url"]}')
elif result.get('share_url'):
print(f'🌐 Share URL: {result["share_url"]}')
return parsed_result
else:
print('❌ No structured output received')
return None
def main():
"""Demonstrate structured output extraction."""
print('📋 Browser Use Cloud - Structured JSON Output')
print('=' * 50)
print('🎯 Features:')
print('• Type-safe Pydantic schemas')
print('• Automatic JSON validation')
print('• Structured data extraction')
print('• Multiple output formats')
try:
# Parse command line arguments
parser = argparse.ArgumentParser(description='Structured output extraction demo')
parser.add_argument('--demo', choices=['news', 'product', 'company', 'all'], default='news', help='Which demo to run')
args = parser.parse_args()
print(f'\n🔍 Running {args.demo} demo(s)...')
if args.demo == 'news':
demo_news_extraction()
elif args.demo == 'product':
demo_product_extraction()
elif args.demo == 'company':
demo_company_extraction()
elif args.demo == 'all':
demo_news_extraction()
demo_product_extraction()
demo_company_extraction()
except requests.exceptions.RequestException as e:
print(f'❌ API Error: {e}')
except Exception as e:
print(f'❌ Error: {e}')
if __name__ == '__main__':
main()
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/browser/playwright_integration.py | examples/browser/playwright_integration.py | """
Key features:
1. Browser-Use and Playwright sharing the same Chrome instance via CDP
2. Take actions with Playwright and continue with Browser-Use actions
3. Let the agent call Playwright functions like screenshot or click on selectors
"""
import asyncio
import os
import subprocess
import sys
import tempfile
from pydantic import BaseModel, Field
# Check for required dependencies first - before other imports
try:
import aiohttp # type: ignore
from playwright.async_api import Browser, Page, async_playwright # type: ignore
except ImportError as e:
print(f'❌ Missing dependencies for this example: {e}')
print('This example requires: playwright aiohttp')
print('Install with: uv add playwright aiohttp')
print('Also run: playwright install chromium')
sys.exit(1)
from browser_use import Agent, BrowserSession, ChatOpenAI, Tools
from browser_use.agent.views import ActionResult
# Global Playwright browser instance - shared between custom actions
playwright_browser: Browser | None = None
playwright_page: Page | None = None
# Custom action parameter models
class PlaywrightFillFormAction(BaseModel):
"""Parameters for Playwright form filling action."""
customer_name: str = Field(..., description='Customer name to fill')
phone_number: str = Field(..., description='Phone number to fill')
email: str = Field(..., description='Email address to fill')
size_option: str = Field(..., description='Size option (small/medium/large)')
class PlaywrightScreenshotAction(BaseModel):
"""Parameters for Playwright screenshot action."""
filename: str = Field(default='playwright_screenshot.png', description='Filename for screenshot')
quality: int | None = Field(default=None, description='JPEG quality (1-100), only for .jpg/.jpeg files')
class PlaywrightGetTextAction(BaseModel):
"""Parameters for getting text using Playwright selectors."""
selector: str = Field(..., description='CSS selector to get text from. Use "title" for page title.')
async def start_chrome_with_debug_port(port: int = 9222):
"""
Start Chrome with remote debugging enabled.
Returns the Chrome process.
"""
# Create temporary directory for Chrome user data
user_data_dir = tempfile.mkdtemp(prefix='chrome_cdp_')
# Chrome launch command
chrome_paths = [
'/Applications/Google Chrome.app/Contents/MacOS/Google Chrome', # macOS
'/usr/bin/google-chrome', # Linux
'/usr/bin/chromium-browser', # Linux Chromium
'chrome', # Windows/PATH
'chromium', # Generic
]
chrome_exe = None
for path in chrome_paths:
if os.path.exists(path) or path in ['chrome', 'chromium']:
try:
# Test if executable works
test_proc = await asyncio.create_subprocess_exec(
path, '--version', stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL
)
await test_proc.wait()
chrome_exe = path
break
except Exception:
continue
if not chrome_exe:
raise RuntimeError('❌ Chrome not found. Please install Chrome or Chromium.')
# Chrome command arguments
cmd = [
chrome_exe,
f'--remote-debugging-port={port}',
f'--user-data-dir={user_data_dir}',
'--no-first-run',
'--no-default-browser-check',
'--disable-extensions',
'about:blank', # Start with blank page
]
# Start Chrome process
process = await asyncio.create_subprocess_exec(*cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
# Wait for Chrome to start and CDP to be ready
cdp_ready = False
for _ in range(20): # 20 second timeout
try:
async with aiohttp.ClientSession() as session:
async with session.get(
f'http://localhost:{port}/json/version', timeout=aiohttp.ClientTimeout(total=1)
) as response:
if response.status == 200:
cdp_ready = True
break
except Exception:
pass
await asyncio.sleep(1)
if not cdp_ready:
process.terminate()
raise RuntimeError('❌ Chrome failed to start with CDP')
return process
async def connect_playwright_to_cdp(cdp_url: str):
"""
Connect Playwright to the same Chrome instance Browser-Use is using.
This enables custom actions to use Playwright functions.
"""
global playwright_browser, playwright_page
playwright = await async_playwright().start()
playwright_browser = await playwright.chromium.connect_over_cdp(cdp_url)
# Get or create a page
if playwright_browser and playwright_browser.contexts and playwright_browser.contexts[0].pages:
playwright_page = playwright_browser.contexts[0].pages[0]
elif playwright_browser:
context = await playwright_browser.new_context()
playwright_page = await context.new_page()
# Create custom tools that use Playwright functions
tools = Tools()
@tools.registry.action(
"Fill out a form using Playwright's precise form filling capabilities. This uses Playwright selectors for reliable form interaction.",
param_model=PlaywrightFillFormAction,
)
async def playwright_fill_form(params: PlaywrightFillFormAction, browser_session: BrowserSession):
"""
Custom action that uses Playwright to fill forms with high precision.
This demonstrates how to create Browser-Use actions that leverage Playwright's capabilities.
"""
try:
if not playwright_page:
return ActionResult(error='Playwright not connected. Run setup first.')
# Filling form with Playwright's precise selectors
# Wait for form to be ready and fill basic fields
await playwright_page.wait_for_selector('input[name="custname"]', timeout=10000)
await playwright_page.fill('input[name="custname"]', params.customer_name)
await playwright_page.fill('input[name="custtel"]', params.phone_number)
await playwright_page.fill('input[name="custemail"]', params.email)
# Handle size selection - check if it's a select dropdown or radio buttons
size_select = playwright_page.locator('select[name="size"]')
size_radio = playwright_page.locator(f'input[name="size"][value="{params.size_option}"]')
if await size_select.count() > 0:
# It's a select dropdown
await playwright_page.select_option('select[name="size"]', params.size_option)
elif await size_radio.count() > 0:
# It's radio buttons
await playwright_page.check(f'input[name="size"][value="{params.size_option}"]')
else:
raise ValueError(f'Could not find size input field for value: {params.size_option}')
# Get form data to verify it was filled
form_data = {}
form_data['name'] = await playwright_page.input_value('input[name="custname"]')
form_data['phone'] = await playwright_page.input_value('input[name="custtel"]')
form_data['email'] = await playwright_page.input_value('input[name="custemail"]')
# Get size value based on input type
if await size_select.count() > 0:
form_data['size'] = await playwright_page.input_value('select[name="size"]')
else:
# For radio buttons, find the checked one
checked_radio = playwright_page.locator('input[name="size"]:checked')
if await checked_radio.count() > 0:
form_data['size'] = await checked_radio.get_attribute('value')
else:
form_data['size'] = 'none selected'
success_msg = f'✅ Form filled successfully with Playwright: {form_data}'
return ActionResult(
extracted_content=success_msg, include_in_memory=True, long_term_memory=f'Filled form with: {form_data}'
)
except Exception as e:
error_msg = f'❌ Playwright form filling failed: {str(e)}'
return ActionResult(error=error_msg)
@tools.registry.action(
"Take a screenshot using Playwright's screenshot capabilities with high quality and precision.",
param_model=PlaywrightScreenshotAction,
)
async def playwright_screenshot(params: PlaywrightScreenshotAction, browser_session: BrowserSession):
"""
Custom action that uses Playwright's advanced screenshot features.
"""
try:
if not playwright_page:
return ActionResult(error='Playwright not connected. Run setup first.')
# Taking screenshot with Playwright
# Use Playwright's screenshot with full page capture
screenshot_kwargs = {'path': params.filename, 'full_page': True}
# Add quality parameter only for JPEG files
if params.quality is not None and params.filename.lower().endswith(('.jpg', '.jpeg')):
screenshot_kwargs['quality'] = params.quality
await playwright_page.screenshot(**screenshot_kwargs)
success_msg = f'✅ Screenshot saved as {params.filename} using Playwright'
return ActionResult(
extracted_content=success_msg, include_in_memory=True, long_term_memory=f'Screenshot saved: {params.filename}'
)
except Exception as e:
error_msg = f'❌ Playwright screenshot failed: {str(e)}'
return ActionResult(error=error_msg)
@tools.registry.action(
"Extract text from elements using Playwright's powerful CSS selectors and XPath support.", param_model=PlaywrightGetTextAction
)
async def playwright_get_text(params: PlaywrightGetTextAction, browser_session: BrowserSession):
"""
Custom action that uses Playwright's advanced text extraction with CSS selectors and XPath.
"""
try:
if not playwright_page:
return ActionResult(error='Playwright not connected. Run setup first.')
# Extracting text with Playwright selectors
# Handle special selectors
if params.selector.lower() == 'title':
# Use page.title() for title element
text_content = await playwright_page.title()
result_data = {
'selector': 'title',
'text_content': text_content,
'inner_text': text_content,
'tag_name': 'TITLE',
'is_visible': True,
}
else:
# Use Playwright's robust element selection and text extraction
element = playwright_page.locator(params.selector).first
if await element.count() == 0:
error_msg = f'❌ No element found with selector: {params.selector}'
return ActionResult(error=error_msg)
text_content = await element.text_content()
inner_text = await element.inner_text()
# Get additional element info
tag_name = await element.evaluate('el => el.tagName')
is_visible = await element.is_visible()
result_data = {
'selector': params.selector,
'text_content': text_content,
'inner_text': inner_text,
'tag_name': tag_name,
'is_visible': is_visible,
}
success_msg = f'✅ Extracted text using Playwright: {result_data}'
return ActionResult(
extracted_content=str(result_data),
include_in_memory=True,
long_term_memory=f'Extracted from {params.selector}: {result_data["text_content"]}',
)
except Exception as e:
error_msg = f'❌ Playwright text extraction failed: {str(e)}'
return ActionResult(error=error_msg)
async def main():
"""
Main function demonstrating Browser-Use + Playwright integration with custom actions.
"""
print('🚀 Advanced Playwright + Browser-Use Integration with Custom Actions')
chrome_process = None
try:
# Step 1: Start Chrome with CDP debugging
chrome_process = await start_chrome_with_debug_port()
cdp_url = 'http://localhost:9222'
# Step 2: Connect Playwright to the same Chrome instance
await connect_playwright_to_cdp(cdp_url)
# Step 3: Create Browser-Use session connected to same Chrome
browser_session = BrowserSession(cdp_url=cdp_url)
# Step 4: Create AI agent with our custom Playwright-powered tools
agent = Agent(
task="""
Please help me demonstrate the integration between Browser-Use and Playwright:
1. First, navigate to https://httpbin.org/forms/post
2. Use the 'playwright_fill_form' action to fill the form with these details:
- Customer name: "Alice Johnson"
- Phone: "555-9876"
- Email: "alice@demo.com"
- Size: "large"
3. Take a screenshot using the 'playwright_screenshot' action and save it as "form_demo.png"
4. Extract the title of the page using 'playwright_get_text' action with selector "title"
5. Finally, submit the form and tell me what happened
This demonstrates how Browser-Use AI can orchestrate tasks while using Playwright's precise capabilities for specific operations.
""",
llm=ChatOpenAI(model='gpt-4.1-mini'),
tools=tools, # Our custom tools with Playwright actions
browser_session=browser_session,
)
print('🎯 Starting AI agent with custom Playwright actions...')
# Step 5: Run the agent - it will use both Browser-Use actions and our custom Playwright actions
result = await agent.run()
# Keep browser open briefly to see results
print(f'✅ Integration demo completed! Result: {result}')
await asyncio.sleep(2) # Brief pause to see results
except Exception as e:
print(f'❌ Error: {e}')
raise
finally:
# Clean up resources
if playwright_browser:
await playwright_browser.close()
if chrome_process:
chrome_process.terminate()
try:
await asyncio.wait_for(chrome_process.wait(), 5)
except TimeoutError:
chrome_process.kill()
print('✅ Cleanup complete')
if __name__ == '__main__':
# Run the advanced integration demo
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/browser/parallel_browser.py | examples/browser/parallel_browser.py | import asyncio
from browser_use import Agent, Browser, ChatOpenAI
# NOTE: This is still experimental, and agents might conflict each other.
async def main():
# Create 3 separate browser instances
browsers = [
Browser(
user_data_dir=f'./temp-profile-{i}',
headless=False,
)
for i in range(3)
]
# Create 3 agents with different tasks
agents = [
Agent(
task='Search for "browser automation" on Google',
browser=browsers[0],
llm=ChatOpenAI(model='gpt-4.1-mini'),
),
Agent(
task='Search for "AI agents" on DuckDuckGo',
browser=browsers[1],
llm=ChatOpenAI(model='gpt-4.1-mini'),
),
Agent(
task='Visit Wikipedia and search for "web scraping"',
browser=browsers[2],
llm=ChatOpenAI(model='gpt-4.1-mini'),
),
]
# Run all agents in parallel
tasks = [agent.run() for agent in agents]
results = await asyncio.gather(*tasks, return_exceptions=True)
print('🎉 All agents completed!')
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/browser/using_cdp.py | examples/browser/using_cdp.py | """
Simple demonstration of the CDP feature.
To test this locally, follow these steps:
1. Find the chrome executable file.
2. On mac by default, the chrome is in `/Applications/Google Chrome.app/Contents/MacOS/Google Chrome`
3. Add the following argument to the shortcut:
`--remote-debugging-port=9222`
4. Open a web browser and navigate to `http://localhost:9222/json/version` to verify that the Remote Debugging Protocol (CDP) is running.
5. Launch this example.
Full command Mac:
"/Applications/Google Chrome.app/Contents/MacOS/Google Chrome" --remote-debugging-port=9222
@dev You need to set the `OPENAI_API_KEY` environment variable before proceeding.
"""
import asyncio
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent, Tools
from browser_use.browser import BrowserProfile, BrowserSession
from browser_use.llm import ChatOpenAI
browser_session = BrowserSession(browser_profile=BrowserProfile(cdp_url='http://localhost:9222', is_local=True))
tools = Tools()
async def main():
agent = Agent(
task='Visit https://duckduckgo.com and search for "browser-use founders"',
llm=ChatOpenAI(model='gpt-4.1-mini'),
tools=tools,
browser_session=browser_session,
)
await agent.run()
await browser_session.kill()
input('Press Enter to close...')
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/browser/save_cookies.py | examples/browser/save_cookies.py | import asyncio
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Browser
# Connect to your existing Chrome browser
browser = Browser(
executable_path='/Applications/Google Chrome.app/Contents/MacOS/Google Chrome',
user_data_dir='~/Library/Application Support/Google/Chrome',
profile_directory='Default',
)
async def main():
await browser.start()
await browser.export_storage_state('storage_state3.json')
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/browser/cloud_browser.py | examples/browser/cloud_browser.py | """
Examples of using Browser-Use cloud browser service.
Prerequisites:
1. Set BROWSER_USE_API_KEY environment variable
2. Active subscription at https://cloud.browser-use.com
"""
import asyncio
from dotenv import load_dotenv
from browser_use import Agent, Browser, ChatBrowserUse
load_dotenv()
async def basic():
"""Simplest usage - just pass cloud params directly."""
browser = Browser(use_cloud=True)
agent = Agent(
task='Go to github.com/browser-use/browser-use and tell me the star count',
llm=ChatBrowserUse(),
browser=browser,
)
result = await agent.run()
print(f'Result: {result}')
async def full_config():
"""Full cloud configuration with specific profile."""
browser = Browser(
# cloud_profile_id='21182245-590f-4712-8888-9611651a024c',
cloud_proxy_country_code='jp',
cloud_timeout=60,
)
agent = Agent(
task='go and check my ip address and the location',
llm=ChatBrowserUse(),
browser=browser,
)
result = await agent.run()
print(f'Result: {result}')
async def main():
try:
# await basic()
await full_config()
except Exception as e:
print(f'Error: {e}')
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/browser/real_browser.py | examples/browser/real_browser.py | import asyncio
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent, Browser, ChatGoogle
# Connect to your existing Chrome browser
browser = Browser(
executable_path='/Applications/Google Chrome.app/Contents/MacOS/Google Chrome',
user_data_dir='~/Library/Application Support/Google/Chrome',
profile_directory='Default',
)
# NOTE: You have to close all Chrome browsers before running this example so that we can launch chrome in debug mode.
async def main():
# save storage state
agent = Agent(
llm=ChatGoogle(model='gemini-flash-latest'),
# Google blocks this approach, so we use a different search engine
task='go to amazon.com and search for pens to draw on whiteboards',
browser=browser,
)
await agent.run()
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/ui/streamlit_demo.py | examples/ui/streamlit_demo.py | """
To use it, you'll need to install streamlit, and run with:
python -m streamlit run streamlit_demo.py
"""
import asyncio
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
import streamlit as st # type: ignore
from browser_use import Agent
from browser_use.browser import BrowserSession
from browser_use.tools.service import Tools
if os.name == 'nt':
asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy())
# Function to get the LLM based on provider
def get_llm(provider: str):
if provider == 'anthropic':
from browser_use.llm import ChatAnthropic
api_key = os.getenv('ANTHROPIC_API_KEY')
if not api_key:
st.error('Error: ANTHROPIC_API_KEY is not set. Please provide a valid API key.')
st.stop()
return ChatAnthropic(model='claude-3-5-sonnet-20240620', temperature=0.0)
elif provider == 'openai':
from browser_use import ChatOpenAI
api_key = os.getenv('OPENAI_API_KEY')
if not api_key:
st.error('Error: OPENAI_API_KEY is not set. Please provide a valid API key.')
st.stop()
return ChatOpenAI(model='gpt-4.1', temperature=0.0)
else:
st.error(f'Unsupported provider: {provider}')
st.stop()
return None # Never reached, but helps with type checking
# Function to initialize the agent
def initialize_agent(query: str, provider: str):
llm = get_llm(provider)
tools = Tools()
browser_session = BrowserSession()
return Agent(
task=query,
llm=llm, # type: ignore
tools=tools,
browser_session=browser_session,
use_vision=True,
max_actions_per_step=1,
), browser_session
# Streamlit UI
st.title('Automated Browser Agent with LLMs 🤖')
query = st.text_input('Enter your query:', 'go to reddit and search for posts about browser-use')
provider = st.radio('Select LLM Provider:', ['openai', 'anthropic'], index=0)
if st.button('Run Agent'):
st.write('Initializing agent...')
agent, browser_session = initialize_agent(query, provider)
async def run_agent():
with st.spinner('Running automation...'):
await agent.run(max_steps=25)
st.success('Task completed! 🎉')
asyncio.run(run_agent())
st.button('Close Browser', on_click=lambda: asyncio.run(browser_session.kill()))
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/ui/command_line.py | examples/ui/command_line.py | """
To Use It:
Example 1: Using OpenAI (default), with default task: 'go to reddit and search for posts about browser-use'
python command_line.py
Example 2: Using OpenAI with a Custom Query
python command_line.py --query "go to google and search for browser-use"
Example 3: Using Anthropic's Claude Model with a Custom Query
python command_line.py --query "find latest Python tutorials on Medium" --provider anthropic
"""
import argparse
import asyncio
import os
import sys
# Ensure local repository (browser_use) is accessible
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent
from browser_use.browser import BrowserSession
from browser_use.tools.service import Tools
def get_llm(provider: str):
if provider == 'anthropic':
from browser_use.llm import ChatAnthropic
api_key = os.getenv('ANTHROPIC_API_KEY')
if not api_key:
raise ValueError('Error: ANTHROPIC_API_KEY is not set. Please provide a valid API key.')
return ChatAnthropic(model='claude-3-5-sonnet-20240620', temperature=0.0)
elif provider == 'openai':
from browser_use import ChatOpenAI
api_key = os.getenv('OPENAI_API_KEY')
if not api_key:
raise ValueError('Error: OPENAI_API_KEY is not set. Please provide a valid API key.')
return ChatOpenAI(model='gpt-4.1', temperature=0.0)
else:
raise ValueError(f'Unsupported provider: {provider}')
def parse_arguments():
"""Parse command-line arguments."""
parser = argparse.ArgumentParser(description='Automate browser tasks using an LLM agent.')
parser.add_argument(
'--query', type=str, help='The query to process', default='go to reddit and search for posts about browser-use'
)
parser.add_argument(
'--provider',
type=str,
choices=['openai', 'anthropic'],
default='openai',
help='The model provider to use (default: openai)',
)
return parser.parse_args()
def initialize_agent(query: str, provider: str):
"""Initialize the browser agent with the given query and provider."""
llm = get_llm(provider)
tools = Tools()
browser_session = BrowserSession()
return Agent(
task=query,
llm=llm,
tools=tools,
browser_session=browser_session,
use_vision=True,
max_actions_per_step=1,
), browser_session
async def main():
"""Main async function to run the agent."""
args = parse_arguments()
agent, browser_session = initialize_agent(args.query, args.provider)
await agent.run(max_steps=25)
input('Press Enter to close the browser...')
await browser_session.kill()
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/ui/gradio_demo.py | examples/ui/gradio_demo.py | # pyright: reportMissingImports=false
import asyncio
import os
import sys
from dataclasses import dataclass
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
# Third-party imports
import gradio as gr # type: ignore
from rich.console import Console
from rich.panel import Panel
from rich.text import Text
# Local module imports
from browser_use import Agent, ChatOpenAI
@dataclass
class ActionResult:
is_done: bool
extracted_content: str | None
error: str | None
include_in_memory: bool
@dataclass
class AgentHistoryList:
all_results: list[ActionResult]
all_model_outputs: list[dict]
def parse_agent_history(history_str: str) -> None:
console = Console()
# Split the content into sections based on ActionResult entries
sections = history_str.split('ActionResult(')
for i, section in enumerate(sections[1:], 1): # Skip first empty section
# Extract relevant information
content = ''
if 'extracted_content=' in section:
content = section.split('extracted_content=')[1].split(',')[0].strip("'")
if content:
header = Text(f'Step {i}', style='bold blue')
panel = Panel(content, title=header, border_style='blue')
console.print(panel)
console.print()
return None
async def run_browser_task(
task: str,
api_key: str,
model: str = 'gpt-4.1',
headless: bool = True,
) -> str:
if not api_key.strip():
return 'Please provide an API key'
os.environ['OPENAI_API_KEY'] = api_key
try:
agent = Agent(
task=task,
llm=ChatOpenAI(model='gpt-4.1-mini'),
)
result = await agent.run()
# TODO: The result could be parsed better
return str(result)
except Exception as e:
return f'Error: {str(e)}'
def create_ui():
with gr.Blocks(title='Browser Use GUI') as interface:
gr.Markdown('# Browser Use Task Automation')
with gr.Row():
with gr.Column():
api_key = gr.Textbox(label='OpenAI API Key', placeholder='sk-...', type='password')
task = gr.Textbox(
label='Task Description',
placeholder='E.g., Find flights from New York to London for next week',
lines=3,
)
model = gr.Dropdown(choices=['gpt-4.1-mini', 'gpt-5', 'o3', 'gpt-5-mini'], label='Model', value='gpt-4.1-mini')
headless = gr.Checkbox(label='Run Headless', value=False)
submit_btn = gr.Button('Run Task')
with gr.Column():
output = gr.Textbox(label='Output', lines=10, interactive=False)
submit_btn.click(
fn=lambda *args: asyncio.run(run_browser_task(*args)),
inputs=[task, api_key, model, headless],
outputs=output,
)
return interface
if __name__ == '__main__':
demo = create_ui()
demo.launch()
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/getting_started/01_basic_search.py | examples/getting_started/01_basic_search.py | """
Setup:
1. Get your API key from https://cloud.browser-use.com/new-api-key
2. Set environment variable: export BROWSER_USE_API_KEY="your-key"
"""
import asyncio
import os
import sys
# Add the parent directory to the path so we can import browser_use
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent, ChatBrowserUse
async def main():
llm = ChatBrowserUse()
task = "Search Google for 'what is browser automation' and tell me the top 3 results"
agent = Agent(task=task, llm=llm)
await agent.run()
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/getting_started/04_multi_step_task.py | examples/getting_started/04_multi_step_task.py | """
Getting Started Example 4: Multi-Step Task
This example demonstrates how to:
- Perform a complex workflow with multiple steps
- Navigate between different pages
- Combine search, form filling, and data extraction
- Handle a realistic end-to-end scenario
This is the most advanced getting started example, combining all previous concepts.
Setup:
1. Get your API key from https://cloud.browser-use.com/new-api-key
2. Set environment variable: export BROWSER_USE_API_KEY="your-key"
"""
import asyncio
import os
import sys
# Add the parent directory to the path so we can import browser_use
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent, ChatBrowserUse
async def main():
# Initialize the model
llm = ChatBrowserUse()
# Define a multi-step task
task = """
I want you to research Python web scraping libraries. Here's what I need:
1. First, search Google for "best Python web scraping libraries 2024"
2. Find a reputable article or blog post about this topic
3. From that article, extract the top 3 recommended libraries
4. For each library, visit its official website or GitHub page
5. Extract key information about each library:
- Name
- Brief description
- Main features or advantages
- GitHub stars (if available)
Present your findings in a summary format comparing the three libraries.
"""
# Create and run the agent
agent = Agent(task=task, llm=llm)
await agent.run()
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/getting_started/02_form_filling.py | examples/getting_started/02_form_filling.py | """
Getting Started Example 2: Form Filling
This example demonstrates how to:
- Navigate to a website with forms
- Fill out input fields
- Submit forms
- Handle basic form interactions
This builds on the basic search example by showing more complex interactions.
Setup:
1. Get your API key from https://cloud.browser-use.com/new-api-key
2. Set environment variable: export BROWSER_USE_API_KEY="your-key"
"""
import asyncio
import os
import sys
# Add the parent directory to the path so we can import browser_use
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent, ChatBrowserUse
async def main():
# Initialize the model
llm = ChatBrowserUse()
# Define a form filling task
task = """
Go to https://httpbin.org/forms/post and fill out the contact form with:
- Customer name: John Doe
- Telephone: 555-123-4567
- Email: john.doe@example.com
- Size: Medium
- Topping: cheese
- Delivery time: now
- Comments: This is a test form submission
Then submit the form and tell me what response you get.
"""
# Create and run the agent
agent = Agent(task=task, llm=llm)
await agent.run()
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/getting_started/05_fast_agent.py | examples/getting_started/05_fast_agent.py | import asyncio
import os
import sys
# Add the parent directory to the path so we can import browser_use
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent, BrowserProfile
# Speed optimization instructions for the model
SPEED_OPTIMIZATION_PROMPT = """
Speed optimization instructions:
- Be extremely concise and direct in your responses
- Get to the goal as quickly as possible
- Use multi-action sequences whenever possible to reduce steps
"""
async def main():
# 1. Use fast LLM - Llama 4 on Groq for ultra-fast inference
from browser_use import ChatGroq
llm = ChatGroq(
model='meta-llama/llama-4-maverick-17b-128e-instruct',
temperature=0.0,
)
# from browser_use import ChatGoogle
# llm = ChatGoogle(model='gemini-flash-lite-latest')
# 2. Create speed-optimized browser profile
browser_profile = BrowserProfile(
minimum_wait_page_load_time=0.1,
wait_between_actions=0.1,
headless=False,
)
# 3. Define a speed-focused task
task = """
1. Go to reddit https://www.reddit.com/search/?q=browser+agent&type=communities
2. Click directly on the first 5 communities to open each in new tabs
3. Find out what the latest post is about, and switch directly to the next tab
4. Return the latest post summary for each page
"""
# 4. Create agent with all speed optimizations
agent = Agent(
task=task,
llm=llm,
flash_mode=True, # Disables thinking in the LLM output for maximum speed
browser_profile=browser_profile,
extend_system_message=SPEED_OPTIMIZATION_PROMPT,
)
await agent.run()
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/getting_started/03_data_extraction.py | examples/getting_started/03_data_extraction.py | """
Getting Started Example 3: Data Extraction
This example demonstrates how to:
- Navigate to a website with structured data
- Extract specific information from the page
- Process and organize the extracted data
- Return structured results
This builds on previous examples by showing how to get valuable data from websites.
Setup:
1. Get your API key from https://cloud.browser-use.com/new-api-key
2. Set environment variable: export BROWSER_USE_API_KEY="your-key"
"""
import asyncio
import os
import sys
# Add the parent directory to the path so we can import browser_use
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent, ChatBrowserUse
async def main():
# Initialize the model
llm = ChatBrowserUse()
# Define a data extraction task
task = """
Go to https://quotes.toscrape.com/ and extract the following information:
- The first 5 quotes on the page
- The author of each quote
- The tags associated with each quote
Present the information in a clear, structured format like:
Quote 1: "[quote text]" - Author: [author name] - Tags: [tag1, tag2, ...]
Quote 2: "[quote text]" - Author: [author name] - Tags: [tag1, tag2, ...]
etc.
"""
# Create and run the agent
agent = Agent(task=task, llm=llm)
await agent.run()
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
browser-use/browser-use | https://github.com/browser-use/browser-use/blob/630f85dd05127c9d42810a5db235a14f5bac9043/examples/custom-functions/onepassword_2fa.py | examples/custom-functions/onepassword_2fa.py | import asyncio
import logging
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from onepassword.client import Client # type: ignore # pip install onepassword-sdk
from browser_use import ActionResult, Agent, ChatOpenAI, Tools
# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
OP_SERVICE_ACCOUNT_TOKEN = os.getenv('OP_SERVICE_ACCOUNT_TOKEN')
OP_ITEM_ID = os.getenv('OP_ITEM_ID') # Go to 1Password, right click on the item, click "Copy Secret Reference"
tools = Tools()
@tools.registry.action('Get 2FA code from 1Password for Google Account', domains=['*.google.com', 'google.com'])
async def get_1password_2fa() -> ActionResult:
"""
Custom action to retrieve 2FA/MFA code from 1Password using onepassword.client SDK.
"""
client = await Client.authenticate(
# setup instructions: https://github.com/1Password/onepassword-sdk-python/#-get-started
auth=OP_SERVICE_ACCOUNT_TOKEN,
integration_name='Browser-Use',
integration_version='v1.0.0',
)
mfa_code = await client.secrets.resolve(f'op://Private/{OP_ITEM_ID}/One-time passcode')
return ActionResult(extracted_content=mfa_code)
async def main():
# Example task using the 1Password 2FA action
task = 'Go to account.google.com, enter username and password, then if prompted for 2FA code, get 2FA code from 1Password for and enter it'
model = ChatOpenAI(model='gpt-4.1-mini')
agent = Agent(task=task, llm=model, tools=tools)
result = await agent.run()
print(f'Task completed with result: {result}')
if __name__ == '__main__':
asyncio.run(main())
| python | MIT | 630f85dd05127c9d42810a5db235a14f5bac9043 | 2026-01-04T14:38:16.467592Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.