repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/arun_vs_arun_many.py | docs/examples/arun_vs_arun_many.py | import asyncio
import time
from crawl4ai.async_webcrawler import AsyncWebCrawler, CacheMode
from crawl4ai.async_configs import CrawlerRunConfig
from crawl4ai.async_dispatcher import MemoryAdaptiveDispatcher, RateLimiter
VERBOSE = False
async def crawl_sequential(urls):
config = CrawlerRunConfig(cache_mode=CacheMode.BYPASS, verbose=VERBOSE)
results = []
start_time = time.perf_counter()
async with AsyncWebCrawler() as crawler:
for url in urls:
result_container = await crawler.arun(url=url, config=config)
results.append(result_container[0])
total_time = time.perf_counter() - start_time
return total_time, results
async def crawl_parallel_dispatcher(urls):
config = CrawlerRunConfig(cache_mode=CacheMode.BYPASS, verbose=VERBOSE)
# Dispatcher with rate limiter enabled (default behavior)
dispatcher = MemoryAdaptiveDispatcher(
rate_limiter=RateLimiter(base_delay=(1.0, 3.0), max_delay=60.0, max_retries=3),
max_session_permit=50,
)
start_time = time.perf_counter()
async with AsyncWebCrawler() as crawler:
result_container = await crawler.arun_many(urls=urls, config=config, dispatcher=dispatcher)
results = []
if isinstance(result_container, list):
results = result_container
else:
async for res in result_container:
results.append(res)
total_time = time.perf_counter() - start_time
return total_time, results
async def crawl_parallel_no_rate_limit(urls):
config = CrawlerRunConfig(cache_mode=CacheMode.BYPASS, verbose=VERBOSE)
# Dispatcher with no rate limiter and a high session permit to avoid queuing
dispatcher = MemoryAdaptiveDispatcher(
rate_limiter=None,
max_session_permit=len(urls) # allow all URLs concurrently
)
start_time = time.perf_counter()
async with AsyncWebCrawler() as crawler:
result_container = await crawler.arun_many(urls=urls, config=config, dispatcher=dispatcher)
results = []
if isinstance(result_container, list):
results = result_container
else:
async for res in result_container:
results.append(res)
total_time = time.perf_counter() - start_time
return total_time, results
async def main():
urls = ["https://example.com"] * 100
print(f"Crawling {len(urls)} URLs sequentially...")
seq_time, seq_results = await crawl_sequential(urls)
print(f"Sequential crawling took: {seq_time:.2f} seconds\n")
print(f"Crawling {len(urls)} URLs in parallel using arun_many with dispatcher (with rate limit)...")
disp_time, disp_results = await crawl_parallel_dispatcher(urls)
print(f"Parallel (dispatcher with rate limiter) took: {disp_time:.2f} seconds\n")
print(f"Crawling {len(urls)} URLs in parallel using dispatcher with no rate limiter...")
no_rl_time, no_rl_results = await crawl_parallel_no_rate_limit(urls)
print(f"Parallel (dispatcher without rate limiter) took: {no_rl_time:.2f} seconds\n")
print("Crawl4ai - Crawling Comparison")
print("--------------------------------------------------------")
print(f"Sequential crawling took: {seq_time:.2f} seconds")
print(f"Parallel (dispatcher with rate limiter) took: {disp_time:.2f} seconds")
print(f"Parallel (dispatcher without rate limiter) took: {no_rl_time:.2f} seconds")
if __name__ == "__main__":
asyncio.run(main())
| python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/stealth_mode_example.py | docs/examples/stealth_mode_example.py | """
Stealth Mode Example with Crawl4AI
This example demonstrates how to use the stealth mode feature to bypass basic bot detection.
The stealth mode uses playwright-stealth to modify browser fingerprints and behaviors
that are commonly used to detect automated browsers.
Key features demonstrated:
1. Comparing crawling with and without stealth mode
2. Testing against bot detection sites
3. Accessing sites that block automated browsers
4. Best practices for stealth crawling
"""
import asyncio
import json
from typing import Dict, Any
from colorama import Fore, Style, init
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig
from crawl4ai.async_logger import AsyncLogger
# Initialize colorama for colored output
init()
# Create a logger for better output
logger = AsyncLogger(verbose=True)
async def test_bot_detection(use_stealth: bool = False) -> Dict[str, Any]:
"""Test against a bot detection service"""
logger.info(
f"Testing bot detection with stealth={'ON' if use_stealth else 'OFF'}",
tag="STEALTH"
)
# Configure browser with or without stealth
browser_config = BrowserConfig(
headless=False, # Use False to see the browser in action
enable_stealth=use_stealth,
viewport_width=1280,
viewport_height=800
)
async with AsyncWebCrawler(config=browser_config) as crawler:
# JavaScript to extract bot detection results
detection_script = """
// Comprehensive bot detection checks
(() => {
const detectionResults = {
// Basic WebDriver detection
webdriver: navigator.webdriver,
// Chrome specific
chrome: !!window.chrome,
chromeRuntime: !!window.chrome?.runtime,
// Automation indicators
automationControlled: navigator.webdriver,
// Permissions API
permissionsPresent: !!navigator.permissions?.query,
// Plugins
pluginsLength: navigator.plugins.length,
pluginsArray: Array.from(navigator.plugins).map(p => p.name),
// Languages
languages: navigator.languages,
language: navigator.language,
// User agent
userAgent: navigator.userAgent,
// Screen and window properties
screen: {
width: screen.width,
height: screen.height,
availWidth: screen.availWidth,
availHeight: screen.availHeight,
colorDepth: screen.colorDepth,
pixelDepth: screen.pixelDepth
},
// WebGL vendor
webglVendor: (() => {
try {
const canvas = document.createElement('canvas');
const gl = canvas.getContext('webgl') || canvas.getContext('experimental-webgl');
const ext = gl.getExtension('WEBGL_debug_renderer_info');
return gl.getParameter(ext.UNMASKED_VENDOR_WEBGL);
} catch (e) {
return 'Error';
}
})(),
// Platform
platform: navigator.platform,
// Hardware concurrency
hardwareConcurrency: navigator.hardwareConcurrency,
// Device memory
deviceMemory: navigator.deviceMemory,
// Connection
connection: navigator.connection?.effectiveType
};
// Log results for console capture
console.log('DETECTION_RESULTS:', JSON.stringify(detectionResults, null, 2));
// Return results
return detectionResults;
})();
"""
# Crawl bot detection test page
config = CrawlerRunConfig(
js_code=detection_script,
capture_console_messages=True,
wait_until="networkidle",
delay_before_return_html=2.0 # Give time for all checks to complete
)
result = await crawler.arun(
url="https://bot.sannysoft.com",
config=config
)
if result.success:
# Extract detection results from console
detection_data = None
for msg in result.console_messages or []:
if "DETECTION_RESULTS:" in msg.get("text", ""):
try:
json_str = msg["text"].replace("DETECTION_RESULTS:", "").strip()
detection_data = json.loads(json_str)
except:
pass
# Also try to get from JavaScript execution result
if not detection_data and result.js_execution_result:
detection_data = result.js_execution_result
return {
"success": True,
"url": result.url,
"detection_data": detection_data,
"page_title": result.metadata.get("title", ""),
"stealth_enabled": use_stealth
}
else:
return {
"success": False,
"error": result.error_message,
"stealth_enabled": use_stealth
}
async def test_cloudflare_site(use_stealth: bool = False) -> Dict[str, Any]:
"""Test accessing a Cloudflare-protected site"""
logger.info(
f"Testing Cloudflare site with stealth={'ON' if use_stealth else 'OFF'}",
tag="STEALTH"
)
browser_config = BrowserConfig(
headless=True, # Cloudflare detection works better in headless mode with stealth
enable_stealth=use_stealth,
viewport_width=1920,
viewport_height=1080
)
async with AsyncWebCrawler(config=browser_config) as crawler:
config = CrawlerRunConfig(
wait_until="networkidle",
page_timeout=30000, # 30 seconds
delay_before_return_html=3.0
)
# Test on a site that often shows Cloudflare challenges
result = await crawler.arun(
url="https://nowsecure.nl",
config=config
)
# Check if we hit Cloudflare challenge
cloudflare_detected = False
if result.html:
cloudflare_indicators = [
"Checking your browser",
"Just a moment",
"cf-browser-verification",
"cf-challenge",
"ray ID"
]
cloudflare_detected = any(indicator in result.html for indicator in cloudflare_indicators)
return {
"success": result.success,
"url": result.url,
"cloudflare_challenge": cloudflare_detected,
"status_code": result.status_code,
"page_title": result.metadata.get("title", "") if result.metadata else "",
"stealth_enabled": use_stealth,
"html_snippet": result.html[:500] if result.html else ""
}
async def test_anti_bot_site(use_stealth: bool = False) -> Dict[str, Any]:
"""Test against sites with anti-bot measures"""
logger.info(
f"Testing anti-bot site with stealth={'ON' if use_stealth else 'OFF'}",
tag="STEALTH"
)
browser_config = BrowserConfig(
headless=False,
enable_stealth=use_stealth,
# Additional browser arguments that help with stealth
extra_args=[
"--disable-blink-features=AutomationControlled",
"--disable-features=site-per-process"
] if not use_stealth else [] # These are automatically applied with stealth
)
async with AsyncWebCrawler(config=browser_config) as crawler:
# Some sites check for specific behaviors
behavior_script = """
(async () => {
// Simulate human-like behavior
const sleep = ms => new Promise(resolve => setTimeout(resolve, ms));
// Random mouse movement
const moveX = Math.random() * 100;
const moveY = Math.random() * 100;
// Simulate reading time
await sleep(1000 + Math.random() * 2000);
// Scroll slightly
window.scrollBy(0, 100 + Math.random() * 200);
console.log('Human behavior simulation complete');
return true;
})()
"""
config = CrawlerRunConfig(
js_code=behavior_script,
wait_until="networkidle",
delay_before_return_html=5.0, # Longer delay to appear more human
capture_console_messages=True
)
# Test on a site that implements anti-bot measures
result = await crawler.arun(
url="https://www.g2.com/",
config=config
)
# Check for common anti-bot blocks
blocked_indicators = [
"Access Denied",
"403 Forbidden",
"Security Check",
"Verify you are human",
"captcha",
"challenge"
]
blocked = False
if result.html:
blocked = any(indicator.lower() in result.html.lower() for indicator in blocked_indicators)
return {
"success": result.success and not blocked,
"url": result.url,
"blocked": blocked,
"status_code": result.status_code,
"page_title": result.metadata.get("title", "") if result.metadata else "",
"stealth_enabled": use_stealth
}
async def compare_results():
"""Run all tests with and without stealth mode and compare results"""
print(f"\n{Fore.CYAN}{'='*60}{Style.RESET_ALL}")
print(f"{Fore.CYAN}Crawl4AI Stealth Mode Comparison{Style.RESET_ALL}")
print(f"{Fore.CYAN}{'='*60}{Style.RESET_ALL}\n")
# Test 1: Bot Detection
print(f"{Fore.YELLOW}1. Bot Detection Test (bot.sannysoft.com){Style.RESET_ALL}")
print("-" * 40)
# Without stealth
regular_detection = await test_bot_detection(use_stealth=False)
if regular_detection["success"] and regular_detection["detection_data"]:
print(f"{Fore.RED}Without Stealth:{Style.RESET_ALL}")
data = regular_detection["detection_data"]
print(f" • WebDriver detected: {data.get('webdriver', 'Unknown')}")
print(f" • Chrome: {data.get('chrome', 'Unknown')}")
print(f" • Languages: {data.get('languages', 'Unknown')}")
print(f" • Plugins: {data.get('pluginsLength', 'Unknown')}")
print(f" • User Agent: {data.get('userAgent', 'Unknown')[:60]}...")
# With stealth
stealth_detection = await test_bot_detection(use_stealth=True)
if stealth_detection["success"] and stealth_detection["detection_data"]:
print(f"\n{Fore.GREEN}With Stealth:{Style.RESET_ALL}")
data = stealth_detection["detection_data"]
print(f" • WebDriver detected: {data.get('webdriver', 'Unknown')}")
print(f" • Chrome: {data.get('chrome', 'Unknown')}")
print(f" • Languages: {data.get('languages', 'Unknown')}")
print(f" • Plugins: {data.get('pluginsLength', 'Unknown')}")
print(f" • User Agent: {data.get('userAgent', 'Unknown')[:60]}...")
# Test 2: Cloudflare Site
print(f"\n\n{Fore.YELLOW}2. Cloudflare Protected Site Test{Style.RESET_ALL}")
print("-" * 40)
# Without stealth
regular_cf = await test_cloudflare_site(use_stealth=False)
print(f"{Fore.RED}Without Stealth:{Style.RESET_ALL}")
print(f" • Success: {regular_cf['success']}")
print(f" • Cloudflare Challenge: {regular_cf['cloudflare_challenge']}")
print(f" • Status Code: {regular_cf['status_code']}")
print(f" • Page Title: {regular_cf['page_title']}")
# With stealth
stealth_cf = await test_cloudflare_site(use_stealth=True)
print(f"\n{Fore.GREEN}With Stealth:{Style.RESET_ALL}")
print(f" • Success: {stealth_cf['success']}")
print(f" • Cloudflare Challenge: {stealth_cf['cloudflare_challenge']}")
print(f" • Status Code: {stealth_cf['status_code']}")
print(f" • Page Title: {stealth_cf['page_title']}")
# Test 3: Anti-bot Site
print(f"\n\n{Fore.YELLOW}3. Anti-Bot Site Test{Style.RESET_ALL}")
print("-" * 40)
# Without stealth
regular_antibot = await test_anti_bot_site(use_stealth=False)
print(f"{Fore.RED}Without Stealth:{Style.RESET_ALL}")
print(f" • Success: {regular_antibot['success']}")
print(f" • Blocked: {regular_antibot['blocked']}")
print(f" • Status Code: {regular_antibot['status_code']}")
print(f" • Page Title: {regular_antibot['page_title']}")
# With stealth
stealth_antibot = await test_anti_bot_site(use_stealth=True)
print(f"\n{Fore.GREEN}With Stealth:{Style.RESET_ALL}")
print(f" • Success: {stealth_antibot['success']}")
print(f" • Blocked: {stealth_antibot['blocked']}")
print(f" • Status Code: {stealth_antibot['status_code']}")
print(f" • Page Title: {stealth_antibot['page_title']}")
# Summary
print(f"\n{Fore.CYAN}{'='*60}{Style.RESET_ALL}")
print(f"{Fore.CYAN}Summary:{Style.RESET_ALL}")
print(f"{Fore.CYAN}{'='*60}{Style.RESET_ALL}")
print(f"\nStealth mode helps bypass basic bot detection by:")
print(f" • Hiding webdriver property")
print(f" • Modifying browser fingerprints")
print(f" • Adjusting navigator properties")
print(f" • Emulating real browser plugin behavior")
print(f"\n{Fore.YELLOW}Note:{Style.RESET_ALL} Stealth mode is not a silver bullet.")
print(f"Advanced anti-bot systems may still detect automation.")
print(f"Always respect robots.txt and website terms of service.")
async def stealth_best_practices():
"""Demonstrate best practices for using stealth mode"""
print(f"\n\n{Fore.CYAN}{'='*60}{Style.RESET_ALL}")
print(f"{Fore.CYAN}Stealth Mode Best Practices{Style.RESET_ALL}")
print(f"{Fore.CYAN}{'='*60}{Style.RESET_ALL}\n")
# Best Practice 1: Combine with realistic behavior
print(f"{Fore.YELLOW}1. Combine with Realistic Behavior:{Style.RESET_ALL}")
browser_config = BrowserConfig(
headless=False,
enable_stealth=True,
viewport_width=1920,
viewport_height=1080
)
async with AsyncWebCrawler(config=browser_config) as crawler:
# Simulate human-like behavior
human_behavior_script = """
(async () => {
// Wait random time between actions
const randomWait = () => Math.random() * 2000 + 1000;
// Simulate reading
await new Promise(resolve => setTimeout(resolve, randomWait()));
// Smooth scroll
const smoothScroll = async () => {
const totalHeight = document.body.scrollHeight;
const viewHeight = window.innerHeight;
let currentPosition = 0;
while (currentPosition < totalHeight - viewHeight) {
const scrollAmount = Math.random() * 300 + 100;
window.scrollBy({
top: scrollAmount,
behavior: 'smooth'
});
currentPosition += scrollAmount;
await new Promise(resolve => setTimeout(resolve, randomWait()));
}
};
await smoothScroll();
console.log('Human-like behavior simulation completed');
return true;
})()
"""
config = CrawlerRunConfig(
js_code=human_behavior_script,
wait_until="networkidle",
delay_before_return_html=3.0,
capture_console_messages=True
)
result = await crawler.arun(
url="https://example.com",
config=config
)
print(f" ✓ Simulated human-like scrolling and reading patterns")
print(f" ✓ Added random delays between actions")
print(f" ✓ Result: {result.success}")
# Best Practice 2: Use appropriate viewport and user agent
print(f"\n{Fore.YELLOW}2. Use Realistic Viewport and User Agent:{Style.RESET_ALL}")
# Get a realistic user agent
from crawl4ai.user_agent_generator import UserAgentGenerator
ua_generator = UserAgentGenerator()
browser_config = BrowserConfig(
headless=True,
enable_stealth=True,
viewport_width=1920,
viewport_height=1080,
user_agent=ua_generator.generate(device_type="desktop", browser_type="chrome")
)
print(f" ✓ Using realistic viewport: 1920x1080")
print(f" ✓ Using current Chrome user agent")
print(f" ✓ Stealth mode will ensure consistency")
# Best Practice 3: Manage request rate
print(f"\n{Fore.YELLOW}3. Manage Request Rate:{Style.RESET_ALL}")
print(f" ✓ Add delays between requests")
print(f" ✓ Randomize timing patterns")
print(f" ✓ Respect robots.txt")
# Best Practice 4: Session management
print(f"\n{Fore.YELLOW}4. Use Session Management:{Style.RESET_ALL}")
browser_config = BrowserConfig(
headless=False,
enable_stealth=True
)
async with AsyncWebCrawler(config=browser_config) as crawler:
# Create a session for multiple requests
session_id = "stealth_session_1"
config = CrawlerRunConfig(
session_id=session_id,
wait_until="domcontentloaded"
)
# First request
result1 = await crawler.arun(
url="https://example.com",
config=config
)
# Subsequent request reuses the same browser context
result2 = await crawler.arun(
url="https://example.com/about",
config=config
)
print(f" ✓ Reused browser session for multiple requests")
print(f" ✓ Maintains cookies and state between requests")
print(f" ✓ More efficient and realistic browsing pattern")
print(f"\n{Fore.CYAN}{'='*60}{Style.RESET_ALL}")
async def main():
"""Run all examples"""
# Run comparison tests
await compare_results()
# Show best practices
await stealth_best_practices()
print(f"\n{Fore.GREEN}Examples completed!{Style.RESET_ALL}")
print(f"\n{Fore.YELLOW}Remember:{Style.RESET_ALL}")
print(f"• Stealth mode helps with basic bot detection")
print(f"• Always respect website terms of service")
print(f"• Consider rate limiting and ethical scraping practices")
print(f"• For advanced protection, consider additional measures")
if __name__ == "__main__":
asyncio.run(main()) | python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/crawlai_vs_firecrawl.py | docs/examples/crawlai_vs_firecrawl.py | import os, time
# append the path to the root of the project
import sys
import asyncio
sys.path.append(os.path.join(os.path.dirname(__file__), "..", ".."))
from firecrawl import FirecrawlApp
from crawl4ai import AsyncWebCrawler
__data__ = os.path.join(os.path.dirname(__file__), "..", "..") + "/.data"
async def compare():
app = FirecrawlApp(api_key=os.environ["FIRECRAWL_API_KEY"])
# Tet Firecrawl with a simple crawl
start = time.time()
scrape_status = app.scrape_url(
"https://www.nbcnews.com/business", params={"formats": ["markdown", "html"]}
)
end = time.time()
print(f"Time taken: {end - start} seconds")
print(len(scrape_status["markdown"]))
# save the markdown content with provider name
with open(f"{__data__}/firecrawl_simple.md", "w") as f:
f.write(scrape_status["markdown"])
# Count how many "cldnry.s-nbcnews.com" are in the markdown
print(scrape_status["markdown"].count("cldnry.s-nbcnews.com"))
async with AsyncWebCrawler() as crawler:
start = time.time()
result = await crawler.arun(
url="https://www.nbcnews.com/business",
# js_code=["const loadMoreButton = Array.from(document.querySelectorAll('button')).find(button => button.textContent.includes('Load More')); loadMoreButton && loadMoreButton.click();"],
word_count_threshold=0,
bypass_cache=True,
verbose=False,
)
end = time.time()
print(f"Time taken: {end - start} seconds")
print(len(result.markdown))
# save the markdown content with provider name
with open(f"{__data__}/crawl4ai_simple.md", "w") as f:
f.write(result.markdown)
# count how many "cldnry.s-nbcnews.com" are in the markdown
print(result.markdown.count("cldnry.s-nbcnews.com"))
start = time.time()
result = await crawler.arun(
url="https://www.nbcnews.com/business",
js_code=[
"const loadMoreButton = Array.from(document.querySelectorAll('button')).find(button => button.textContent.includes('Load More')); loadMoreButton && loadMoreButton.click();"
],
word_count_threshold=0,
bypass_cache=True,
verbose=False,
)
end = time.time()
print(f"Time taken: {end - start} seconds")
print(len(result.markdown))
# save the markdown content with provider name
with open(f"{__data__}/crawl4ai_js.md", "w") as f:
f.write(result.markdown)
# count how many "cldnry.s-nbcnews.com" are in the markdown
print(result.markdown.count("cldnry.s-nbcnews.com"))
if __name__ == "__main__":
asyncio.run(compare())
| python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/hooks_example.py | docs/examples/hooks_example.py | from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode
from playwright.async_api import Page, BrowserContext
async def main():
print("🔗 Hooks Example: Demonstrating different hook use cases")
# Configure browser settings
browser_config = BrowserConfig(headless=True)
# Configure crawler settings
crawler_run_config = CrawlerRunConfig(
js_code="window.scrollTo(0, document.body.scrollHeight);",
wait_for="body",
cache_mode=CacheMode.BYPASS,
)
# Create crawler instance
crawler = AsyncWebCrawler(config=browser_config)
# Define and set hook functions
async def on_browser_created(browser, context: BrowserContext, **kwargs):
"""Hook called after the browser is created"""
print("[HOOK] on_browser_created - Browser is ready!")
# Example: Set a cookie that will be used for all requests
return browser
async def on_page_context_created(page: Page, context: BrowserContext, **kwargs):
"""Hook called after a new page and context are created"""
print("[HOOK] on_page_context_created - New page created!")
# Example: Set default viewport size
await context.add_cookies(
[
{
"name": "session_id",
"value": "example_session",
"domain": ".example.com",
"path": "/",
}
]
)
await page.set_viewport_size({"width": 1080, "height": 800})
return page
async def on_user_agent_updated(
page: Page, context: BrowserContext, user_agent: str, **kwargs
):
"""Hook called when the user agent is updated"""
print(f"[HOOK] on_user_agent_updated - New user agent: {user_agent}")
return page
async def on_execution_started(page: Page, context: BrowserContext, **kwargs):
"""Hook called after custom JavaScript execution"""
print("[HOOK] on_execution_started - Custom JS executed!")
return page
async def before_goto(page: Page, context: BrowserContext, url: str, **kwargs):
"""Hook called before navigating to each URL"""
print(f"[HOOK] before_goto - About to visit: {url}")
# Example: Add custom headers for the request
await page.set_extra_http_headers({"Custom-Header": "my-value"})
return page
async def after_goto(
page: Page, context: BrowserContext, url: str, response: dict, **kwargs
):
"""Hook called after navigating to each URL"""
print(f"[HOOK] after_goto - Successfully loaded: {url}")
# Example: Wait for a specific element to be loaded
try:
await page.wait_for_selector(".content", timeout=1000)
print("Content element found!")
except:
print("Content element not found, continuing anyway")
return page
async def before_retrieve_html(page: Page, context: BrowserContext, **kwargs):
"""Hook called before retrieving the HTML content"""
print("[HOOK] before_retrieve_html - About to get HTML content")
# Example: Scroll to bottom to trigger lazy loading
await page.evaluate("window.scrollTo(0, document.body.scrollHeight);")
return page
async def before_return_html(
page: Page, context: BrowserContext, html: str, **kwargs
):
"""Hook called before returning the HTML content"""
print(f"[HOOK] before_return_html - Got HTML content (length: {len(html)})")
# Example: You could modify the HTML content here if needed
return page
# Set all the hooks
crawler.crawler_strategy.set_hook("on_browser_created", on_browser_created)
crawler.crawler_strategy.set_hook(
"on_page_context_created", on_page_context_created
)
crawler.crawler_strategy.set_hook("on_user_agent_updated", on_user_agent_updated)
crawler.crawler_strategy.set_hook("on_execution_started", on_execution_started)
crawler.crawler_strategy.set_hook("before_goto", before_goto)
crawler.crawler_strategy.set_hook("after_goto", after_goto)
crawler.crawler_strategy.set_hook("before_retrieve_html", before_retrieve_html)
crawler.crawler_strategy.set_hook("before_return_html", before_return_html)
await crawler.start()
# Example usage: crawl a simple website
url = "https://example.com"
result = await crawler.arun(url, config=crawler_run_config)
print(f"\nCrawled URL: {result.url}")
print(f"HTML length: {len(result.html)}")
await crawler.close()
if __name__ == "__main__":
import asyncio
asyncio.run(main())
| python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/crypto_analysis_example.py | docs/examples/crypto_analysis_example.py | """
Crawl4AI Crypto Trading Analysis Demo
Author: Unclecode
Date: 2024-03-15
This script demonstrates advanced crypto market analysis using:
1. Web scraping of real-time CoinMarketCap data
2. Smart table extraction with layout detection
3. Hedge fund-grade financial metrics
4. Interactive visualizations for trading signals
Key Features:
- Volume Anomaly Detection: Finds unusual trading activity
- Liquidity Power Score: Identifies easily tradable assets
- Volatility-Weighted Momentum: Surface sustainable trends
- Smart Money Signals: Algorithmic buy/hold recommendations
"""
import asyncio
import pandas as pd
import numpy as np
import re
import plotly.express as px
from crawl4ai import (
AsyncWebCrawler,
BrowserConfig,
CrawlerRunConfig,
CacheMode,
LXMLWebScrapingStrategy,
)
from crawl4ai import CrawlResult
from typing import List
__current_dir__ = __file__.rsplit("/", 1)[0]
class CryptoAlphaGenerator:
"""
Advanced crypto analysis engine that transforms raw web data into:
- Volume anomaly flags
- Liquidity scores
- Momentum-risk ratios
- Machine learning-inspired trading signals
Methods:
analyze_tables(): Process raw tables into trading insights
create_visuals(): Generate institutional-grade visualizations
generate_insights(): Create plain English trading recommendations
"""
def clean_data(self, df: pd.DataFrame) -> pd.DataFrame:
"""
Convert crypto market data to machine-readable format.
Handles currency symbols, units (B=Billions), and percentage values.
"""
# Make a copy to avoid SettingWithCopyWarning
df = df.copy()
# Clean Price column (handle currency symbols)
df["Price"] = df["Price"].astype(str).str.replace("[^\d.]", "", regex=True).astype(float)
# Handle Market Cap and Volume, considering both Billions and Trillions
def convert_large_numbers(value):
if pd.isna(value):
return float('nan')
value = str(value)
multiplier = 1
if 'B' in value:
multiplier = 1e9
elif 'T' in value:
multiplier = 1e12
# Handle cases where the value might already be numeric
cleaned_value = re.sub(r"[^\d.]", "", value)
return float(cleaned_value) * multiplier if cleaned_value else float('nan')
df["Market Cap"] = df["Market Cap"].apply(convert_large_numbers)
df["Volume(24h)"] = df["Volume(24h)"].apply(convert_large_numbers)
# Convert percentages to decimal values
for col in ["1h %", "24h %", "7d %"]:
if col in df.columns:
# First ensure it's string, then clean
df[col] = (
df[col].astype(str)
.str.replace("%", "")
.str.replace(",", ".")
.replace("nan", np.nan)
)
df[col] = pd.to_numeric(df[col], errors='coerce') / 100
return df
def calculate_metrics(self, df: pd.DataFrame) -> pd.DataFrame:
"""
Compute advanced trading metrics used by quantitative funds:
1. Volume/Market Cap Ratio - Measures liquidity efficiency
(High ratio = Underestimated attention, and small-cap = higher growth potential)
2. Volatility Score - Risk-adjusted momentum potential - Shows how stable is the trend
(STD of 1h/24h/7d returns)
3. Momentum Score - Weighted average of returns - Shows how strong is the trend
(1h:30% + 24h:50% + 7d:20%)
4. Volume Anomaly - 3σ deviation detection
(Flags potential insider activity) - Unusual trading activity – Flags coins with volume spikes (potential insider buying or news).
"""
# Liquidity Metrics
df["Volume/Market Cap Ratio"] = df["Volume(24h)"] / df["Market Cap"]
# Risk Metrics
df["Volatility Score"] = df[["1h %", "24h %", "7d %"]].std(axis=1)
# Momentum Metrics
df["Momentum Score"] = df["1h %"] * 0.3 + df["24h %"] * 0.5 + df["7d %"] * 0.2
# Anomaly Detection
median_vol = df["Volume(24h)"].median()
df["Volume Anomaly"] = df["Volume(24h)"] > 3 * median_vol
# Value Flags
# Undervalued Flag - Low market cap and high momentum
# (High growth potential and low attention)
df["Undervalued Flag"] = (df["Market Cap"] < 1e9) & (
df["Momentum Score"] > 0.05
)
# Liquid Giant Flag - High volume/market cap ratio and large market cap
# (High liquidity and large market cap = institutional interest)
df["Liquid Giant"] = (df["Volume/Market Cap Ratio"] > 0.15) & (
df["Market Cap"] > 1e9
)
return df
def generate_insights_simple(self, df: pd.DataFrame) -> str:
"""
Generates an ultra-actionable crypto trading report with:
- Risk-tiered opportunities (High/Medium/Low)
- Concrete examples for each trade type
- Entry/exit strategies spelled out
- Visual cues for quick scanning
"""
report = [
"🚀 **CRYPTO TRADING CHEAT SHEET** 🚀",
"*Based on quantitative signals + hedge fund tactics*",
"━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
]
# 1. HIGH-RISK: Undervalued Small-Caps (Momentum Plays)
high_risk = df[df["Undervalued Flag"]].sort_values("Momentum Score", ascending=False)
if not high_risk.empty:
example_coin = high_risk.iloc[0]
report.extend([
"\n🔥 **HIGH-RISK: Rocket Fuel Small-Caps**",
f"*Example Trade:* {example_coin['Name']} (Price: ${example_coin['Price']:.6f})",
"📊 *Why?* Tiny market cap (<$1B) but STRONG momentum (+{:.0f}% last week)".format(example_coin['7d %']*100),
"🎯 *Strategy:*",
"1. Wait for 5-10% dip from recent high (${:.6f} → Buy under ${:.6f})".format(
example_coin['Price'] / (1 - example_coin['24h %']), # Approx recent high
example_coin['Price'] * 0.95
),
"2. Set stop-loss at -10% (${:.6f})".format(example_coin['Price'] * 0.90),
"3. Take profit at +20% (${:.6f})".format(example_coin['Price'] * 1.20),
"⚠️ *Risk Warning:* These can drop 30% fast! Never bet more than 5% of your portfolio.",
"━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
])
# 2. MEDIUM-RISK: Liquid Giants (Swing Trades)
medium_risk = df[df["Liquid Giant"]].sort_values("Volume/Market Cap Ratio", ascending=False)
if not medium_risk.empty:
example_coin = medium_risk.iloc[0]
report.extend([
"\n💎 **MEDIUM-RISK: Liquid Giants (Safe Swing Trades)**",
f"*Example Trade:* {example_coin['Name']} (Market Cap: ${example_coin['Market Cap']/1e9:.1f}B)",
"📊 *Why?* Huge volume (${:.1f}M/day) makes it easy to enter/exit".format(example_coin['Volume(24h)']/1e6),
"🎯 *Strategy:*",
"1. Buy when 24h volume > 15% of market cap (Current: {:.0f}%)".format(example_coin['Volume/Market Cap Ratio']*100),
"2. Hold 1-4 weeks (Big coins trend longer)",
"3. Exit when momentum drops below 5% (Current: {:.0f}%)".format(example_coin['Momentum Score']*100),
"📉 *Pro Tip:* Watch Bitcoin's trend - if BTC drops 5%, these usually follow.",
"━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
])
# 3. LOW-RISK: Stable Momentum (DCA Targets)
low_risk = df[
(df["Momentum Score"] > 0.05) &
(df["Volatility Score"] < 0.03)
].sort_values("Market Cap", ascending=False)
if not low_risk.empty:
example_coin = low_risk.iloc[0]
report.extend([
"\n🛡️ **LOW-RISK: Steady Climbers (DCA & Forget)**",
f"*Example Trade:* {example_coin['Name']} (Volatility: {example_coin['Volatility Score']:.2f}/5)",
"📊 *Why?* Rises steadily (+{:.0f}%/week) with LOW drama".format(example_coin['7d %']*100),
"🎯 *Strategy:*",
"1. Buy small amounts every Tuesday/Friday (DCA)",
"2. Hold for 3+ months (Compound gains work best here)",
"3. Sell 10% at every +25% milestone",
"💰 *Best For:* Long-term investors who hate sleepless nights",
"━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
])
# Volume Spike Alerts
anomalies = df[df["Volume Anomaly"]].sort_values("Volume(24h)", ascending=False)
if not anomalies.empty:
example_coin = anomalies.iloc[0]
report.extend([
"\n🚨 **Volume Spike Alert (Possible News/Whale Action)**",
f"*Coin:* {example_coin['Name']} (Volume: ${example_coin['Volume(24h)']/1e6:.1f}M, usual: ${example_coin['Volume(24h)']/3/1e6:.1f}M)",
"🔍 *Check:* Twitter/CoinGecko for news before trading",
"⚡ *If no news:* Could be insider buying - watch price action:",
"- Break above today's high → Buy with tight stop-loss",
"- Fade back down → Avoid (may be a fakeout)"
])
# Pro Tip Footer
report.append("\n✨ *Pro Tip:* Bookmark this report & check back in 24h to see if signals held up.")
return "\n".join(report)
def generate_insights(self, df: pd.DataFrame) -> str:
"""
Generates a tactical trading report with:
- Top 3 trades per risk level (High/Medium/Low)
- Auto-calculated entry/exit prices
- BTC chart toggle tip
"""
# Filter top candidates for each risk level
high_risk = (
df[df["Undervalued Flag"]]
.sort_values("Momentum Score", ascending=False)
.head(3)
)
medium_risk = (
df[df["Liquid Giant"]]
.sort_values("Volume/Market Cap Ratio", ascending=False)
.head(3)
)
low_risk = (
df[(df["Momentum Score"] > 0.05) & (df["Volatility Score"] < 0.03)]
.sort_values("Momentum Score", ascending=False)
.head(3)
)
report = ["# 🎯 Crypto Trading Tactical Report (Top 3 Per Risk Tier)"]
# 1. High-Risk Trades (Small-Cap Momentum)
if not high_risk.empty:
report.append("\n## 🔥 HIGH RISK: Small-Cap Rockets (5-50% Potential)")
for i, coin in high_risk.iterrows():
current_price = coin["Price"]
entry = current_price * 0.95 # -5% dip
stop_loss = current_price * 0.90 # -10%
take_profit = current_price * 1.20 # +20%
report.append(
f"\n### {coin['Name']} (Momentum: {coin['Momentum Score']:.1%})"
f"\n- **Current Price:** ${current_price:.4f}"
f"\n- **Entry:** < ${entry:.4f} (Wait for pullback)"
f"\n- **Stop-Loss:** ${stop_loss:.4f} (-10%)"
f"\n- **Target:** ${take_profit:.4f} (+20%)"
f"\n- **Risk/Reward:** 1:2"
f"\n- **Watch:** Volume spikes above {coin['Volume(24h)']/1e6:.1f}M"
)
# 2. Medium-Risk Trades (Liquid Giants)
if not medium_risk.empty:
report.append("\n## 💎 MEDIUM RISK: Liquid Swing Trades (10-30% Potential)")
for i, coin in medium_risk.iterrows():
current_price = coin["Price"]
entry = current_price * 0.98 # -2% dip
stop_loss = current_price * 0.94 # -6%
take_profit = current_price * 1.15 # +15%
report.append(
f"\n### {coin['Name']} (Liquidity Score: {coin['Volume/Market Cap Ratio']:.1%})"
f"\n- **Current Price:** ${current_price:.2f}"
f"\n- **Entry:** < ${entry:.2f} (Buy slight dips)"
f"\n- **Stop-Loss:** ${stop_loss:.2f} (-6%)"
f"\n- **Target:** ${take_profit:.2f} (+15%)"
f"\n- **Hold Time:** 1-3 weeks"
f"\n- **Key Metric:** Volume/Cap > 15%"
)
# 3. Low-Risk Trades (Stable Momentum)
if not low_risk.empty:
report.append("\n## 🛡️ LOW RISK: Steady Gainers (5-15% Potential)")
for i, coin in low_risk.iterrows():
current_price = coin["Price"]
entry = current_price * 0.99 # -1% dip
stop_loss = current_price * 0.97 # -3%
take_profit = current_price * 1.10 # +10%
report.append(
f"\n### {coin['Name']} (Stability Score: {1/coin['Volatility Score']:.1f}x)"
f"\n- **Current Price:** ${current_price:.2f}"
f"\n- **Entry:** < ${entry:.2f} (Safe zone)"
f"\n- **Stop-Loss:** ${stop_loss:.2f} (-3%)"
f"\n- **Target:** ${take_profit:.2f} (+10%)"
f"\n- **DCA Suggestion:** 3 buys over 72 hours"
)
# Volume Anomaly Alert
anomalies = df[df["Volume Anomaly"]].sort_values("Volume(24h)", ascending=False).head(2)
if not anomalies.empty:
report.append("\n⚠️ **Volume Spike Alerts**")
for i, coin in anomalies.iterrows():
report.append(
f"- {coin['Name']}: Volume {coin['Volume(24h)']/1e6:.1f}M "
f"(3x normal) | Price moved: {coin['24h %']:.1%}"
)
# Pro Tip
report.append(
"\n📊 **Chart Hack:** Hide BTC in visuals:\n"
"```python\n"
"# For 3D Map:\n"
"fig.update_traces(visible=False, selector={'name':'Bitcoin'})\n"
"# For Treemap:\n"
"df = df[df['Name'] != 'Bitcoin']\n"
"```"
)
return "\n".join(report)
def create_visuals(self, df: pd.DataFrame) -> dict:
"""Enhanced visuals with BTC toggle support"""
# 3D Market Map (with BTC toggle hint)
fig1 = px.scatter_3d(
df,
x="Market Cap",
y="Volume/Market Cap Ratio",
z="Momentum Score",
color="Name", # Color by name to allow toggling
hover_name="Name",
title="Market Map (Toggle BTC in legend to focus on alts)",
log_x=True
)
fig1.update_traces(
marker=dict(size=df["Volatility Score"]*100 + 5) # Dynamic sizing
)
# Liquidity Tree (exclude BTC if too dominant)
if df[df["Name"] == "BitcoinBTC"]["Market Cap"].values[0] > df["Market Cap"].median() * 10:
df = df[df["Name"] != "BitcoinBTC"]
fig2 = px.treemap(
df,
path=["Name"],
values="Market Cap",
color="Volume/Market Cap Ratio",
title="Liquidity Tree (BTC auto-removed if dominant)"
)
return {"market_map": fig1, "liquidity_tree": fig2}
async def main():
"""
Main execution flow:
1. Configure headless browser for scraping
2. Extract live crypto market data
3. Clean and analyze using hedge fund models
4. Generate visualizations and insights
5. Output professional trading report
"""
# Configure browser with anti-detection features
browser_config = BrowserConfig(
headless=False,
)
# Initialize crawler with smart table detection
crawler = AsyncWebCrawler(config=browser_config)
await crawler.start()
try:
# Set up scraping parameters
crawl_config = CrawlerRunConfig(
cache_mode=CacheMode.BYPASS,
table_score_threshold=8, # Strict table detection
keep_data_attributes=True,
scraping_strategy=LXMLWebScrapingStrategy(),
scan_full_page=True,
scroll_delay=0.2,
)
# Execute market data extraction
results: List[CrawlResult] = await crawler.arun(
url="https://coinmarketcap.com/?page=1", config=crawl_config
)
# Process results
raw_df = pd.DataFrame()
for result in results:
# Use the new tables field, falling back to media["tables"] for backward compatibility
tables = result.tables if hasattr(result, "tables") and result.tables else result.media.get("tables", [])
if result.success and tables:
# Extract primary market table
# DataFrame
raw_df = pd.DataFrame(
tables[0]["rows"],
columns=tables[0]["headers"],
)
break
# This is for debugging only
# ////// Remove this in production from here..
# Save raw data for debugging
raw_df.to_csv(f"{__current_dir__}/tmp/raw_crypto_data.csv", index=False)
print("🔍 Raw data saved to 'raw_crypto_data.csv'")
# Read from file for debugging
raw_df = pd.read_csv(f"{__current_dir__}/tmp/raw_crypto_data.csv")
# ////// ..to here
# Select top 20
raw_df = raw_df.head(50)
# Remove "Buy" from name
raw_df["Name"] = raw_df["Name"].str.replace("Buy", "")
# Initialize analysis engine
analyzer = CryptoAlphaGenerator()
clean_df = analyzer.clean_data(raw_df)
analyzed_df = analyzer.calculate_metrics(clean_df)
# Generate outputs
visuals = analyzer.create_visuals(analyzed_df)
insights = analyzer.generate_insights(analyzed_df)
# Save visualizations
visuals["market_map"].write_html(f"{__current_dir__}/tmp/market_map.html")
visuals["liquidity_tree"].write_html(f"{__current_dir__}/tmp/liquidity_tree.html")
# Display results
print("🔑 Key Trading Insights:")
print(insights)
print("\n📊 Open 'market_map.html' for interactive analysis")
print("\n📊 Open 'liquidity_tree.html' for interactive analysis")
finally:
await crawler.close()
if __name__ == "__main__":
asyncio.run(main())
| python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/language_support_example.py | docs/examples/language_support_example.py | import asyncio
from crawl4ai import AsyncWebCrawler, AsyncPlaywrightCrawlerStrategy
async def main():
# Example 1: Setting language when creating the crawler
crawler1 = AsyncWebCrawler(
crawler_strategy=AsyncPlaywrightCrawlerStrategy(
headers={"Accept-Language": "fr-FR,fr;q=0.9,en-US;q=0.8,en;q=0.7"}
)
)
result1 = await crawler1.arun("https://www.example.com")
print(
"Example 1 result:", result1.extracted_content[:100]
) # Print first 100 characters
# Example 2: Setting language before crawling
crawler2 = AsyncWebCrawler()
crawler2.crawler_strategy.headers[
"Accept-Language"
] = "es-ES,es;q=0.9,en-US;q=0.8,en;q=0.7"
result2 = await crawler2.arun("https://www.example.com")
print("Example 2 result:", result2.extracted_content[:100])
# Example 3: Setting language when calling arun method
crawler3 = AsyncWebCrawler()
result3 = await crawler3.arun(
"https://www.example.com",
headers={"Accept-Language": "de-DE,de;q=0.9,en-US;q=0.8,en;q=0.7"},
)
print("Example 3 result:", result3.extracted_content[:100])
# Example 4: Crawling multiple pages with different languages
urls = [
("https://www.example.com", "fr-FR,fr;q=0.9"),
("https://www.example.org", "es-ES,es;q=0.9"),
("https://www.example.net", "de-DE,de;q=0.9"),
]
crawler4 = AsyncWebCrawler()
results = await asyncio.gather(
*[crawler4.arun(url, headers={"Accept-Language": lang}) for url, lang in urls]
)
for url, result in zip([u for u, _ in urls], results):
print(f"Result for {url}:", result.extracted_content[:100])
if __name__ == "__main__":
asyncio.run(main())
| python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/use_geo_location.py | docs/examples/use_geo_location.py | # use_geo_location.py
"""
Example: override locale, timezone, and geolocation using Crawl4ai patterns.
This demo uses `AsyncWebCrawler.arun()` to fetch a page with
browser context primed for specific locale, timezone, and GPS,
and saves a screenshot for visual verification.
"""
import asyncio
import base64
from pathlib import Path
from typing import List
from crawl4ai import (
AsyncWebCrawler,
CrawlerRunConfig,
BrowserConfig,
GeolocationConfig,
CrawlResult,
)
async def demo_geo_override():
"""Demo: Crawl a geolocation-test page with overrides and screenshot."""
print("\n=== Geo-Override Crawl ===")
# 1) Browser setup: use Playwright-managed contexts
browser_cfg = BrowserConfig(
headless=False,
viewport_width=1280,
viewport_height=720,
use_managed_browser=False,
)
# 2) Run config: include locale, timezone_id, geolocation, and screenshot
run_cfg = CrawlerRunConfig(
url="https://browserleaks.com/geo", # test page that shows your location
locale="en-US", # Accept-Language & UI locale
timezone_id="America/Los_Angeles", # JS Date()/Intl timezone
geolocation=GeolocationConfig( # override GPS coords
latitude=34.0522,
longitude=-118.2437,
accuracy=10.0,
),
screenshot=True, # capture screenshot after load
session_id="geo_test", # reuse context if rerunning
delay_before_return_html=5
)
async with AsyncWebCrawler(config=browser_cfg) as crawler:
# 3) Run crawl (returns list even for single URL)
results: List[CrawlResult] = await crawler.arun(
url=run_cfg.url,
config=run_cfg,
)
result = results[0]
# 4) Save screenshot and report path
if result.screenshot:
__current_dir = Path(__file__).parent
out_dir = __current_dir / "tmp"
out_dir.mkdir(exist_ok=True)
shot_path = out_dir / "geo_test.png"
with open(shot_path, "wb") as f:
f.write(base64.b64decode(result.screenshot))
print(f"Saved screenshot to {shot_path}")
else:
print("No screenshot captured, check configuration.")
if __name__ == "__main__":
asyncio.run(demo_geo_override())
| python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/amazon_product_extraction_using_use_javascript.py | docs/examples/amazon_product_extraction_using_use_javascript.py | """
This example demonstrates how to use JSON CSS extraction to scrape product information
from Amazon search results. It shows how to extract structured data like product titles,
prices, ratings, and other details using CSS selectors.
"""
from crawl4ai import AsyncWebCrawler, CacheMode
from crawl4ai import JsonCssExtractionStrategy
from crawl4ai.async_configs import BrowserConfig, CrawlerRunConfig
import json
async def extract_amazon_products():
# Initialize browser config
browser_config = BrowserConfig(
# browser_type="chromium",
headless=True
)
js_code_to_search = """
const task = async () => {
document.querySelector('#twotabsearchtextbox').value = 'Samsung Galaxy Tab';
document.querySelector('#nav-search-submit-button').click();
}
await task();
"""
js_code_to_search_sync = """
document.querySelector('#twotabsearchtextbox').value = 'Samsung Galaxy Tab';
document.querySelector('#nav-search-submit-button').click();
"""
crawler_config = CrawlerRunConfig(
cache_mode=CacheMode.BYPASS,
js_code=js_code_to_search,
wait_for='css:[data-component-type="s-search-result"]',
extraction_strategy=JsonCssExtractionStrategy(
schema={
"name": "Amazon Product Search Results",
"baseSelector": "[data-component-type='s-search-result']",
"fields": [
{
"name": "asin",
"selector": "",
"type": "attribute",
"attribute": "data-asin",
},
{"name": "title", "selector": "h2 a span", "type": "text"},
{
"name": "url",
"selector": "h2 a",
"type": "attribute",
"attribute": "href",
},
{
"name": "image",
"selector": ".s-image",
"type": "attribute",
"attribute": "src",
},
{
"name": "rating",
"selector": ".a-icon-star-small .a-icon-alt",
"type": "text",
},
{
"name": "reviews_count",
"selector": "[data-csa-c-func-deps='aui-da-a-popover'] ~ span span",
"type": "text",
},
{
"name": "price",
"selector": ".a-price .a-offscreen",
"type": "text",
},
{
"name": "original_price",
"selector": ".a-price.a-text-price .a-offscreen",
"type": "text",
},
{
"name": "sponsored",
"selector": ".puis-sponsored-label-text",
"type": "exists",
},
{
"name": "delivery_info",
"selector": "[data-cy='delivery-recipe'] .a-color-base",
"type": "text",
"multiple": True,
},
],
}
),
)
# Example search URL (you should replace with your actual Amazon URL)
url = "https://www.amazon.com/"
# Use context manager for proper resource handling
async with AsyncWebCrawler(config=browser_config) as crawler:
# Extract the data
result = await crawler.arun(url=url, config=crawler_config)
# Process and print the results
if result and result.extracted_content:
# Parse the JSON string into a list of products
products = json.loads(result.extracted_content)
# Process each product in the list
for product in products:
print("\nProduct Details:")
print(f"ASIN: {product.get('asin')}")
print(f"Title: {product.get('title')}")
print(f"Price: {product.get('price')}")
print(f"Original Price: {product.get('original_price')}")
print(f"Rating: {product.get('rating')}")
print(f"Reviews: {product.get('reviews_count')}")
print(f"Sponsored: {'Yes' if product.get('sponsored') else 'No'}")
if product.get("delivery_info"):
print(f"Delivery: {' '.join(product['delivery_info'])}")
print("-" * 80)
if __name__ == "__main__":
import asyncio
asyncio.run(extract_amazon_products())
| python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/undetectability/undetected_cloudflare_test.py | docs/examples/undetectability/undetected_cloudflare_test.py | """
Undetected Browser Test - Cloudflare Protected Site
Tests the difference between regular and undetected modes on a Cloudflare-protected site
"""
import asyncio
from crawl4ai import (
AsyncWebCrawler,
BrowserConfig,
CrawlerRunConfig,
UndetectedAdapter
)
from crawl4ai.async_crawler_strategy import AsyncPlaywrightCrawlerStrategy
# Test URL with Cloudflare protection
TEST_URL = "https://nowsecure.nl"
async def test_regular_browser():
"""Test with regular browser - likely to be blocked"""
print("=" * 60)
print("Testing with Regular Browser")
print("=" * 60)
browser_config = BrowserConfig(
headless=False,
verbose=True,
viewport_width=1920,
viewport_height=1080,
)
async with AsyncWebCrawler(config=browser_config) as crawler:
config = CrawlerRunConfig(
delay_before_return_html=2.0,
simulate_user=True,
magic=True, # Try with magic mode too
)
result = await crawler.arun(url=TEST_URL, config=config)
print(f"\n✓ Success: {result.success}")
print(f"✓ Status Code: {result.status_code}")
print(f"✓ HTML Length: {len(result.html)}")
# Check for Cloudflare challenge
if result.html:
cf_indicators = [
"Checking your browser",
"Please stand by",
"cloudflare",
"cf-browser-verification",
"Access denied",
"Ray ID"
]
detected = False
for indicator in cf_indicators:
if indicator.lower() in result.html.lower():
print(f"⚠️ Cloudflare Challenge Detected: '{indicator}' found")
detected = True
break
if not detected and len(result.markdown.raw_markdown) > 100:
print("✅ Successfully bypassed Cloudflare!")
print(f"Content preview: {result.markdown.raw_markdown[:200]}...")
elif not detected:
print("⚠️ Page loaded but content seems minimal")
return result
async def test_undetected_browser():
"""Test with undetected browser - should bypass Cloudflare"""
print("\n" + "=" * 60)
print("Testing with Undetected Browser")
print("=" * 60)
browser_config = BrowserConfig(
headless=False, # Headless is easier to detect
verbose=True,
viewport_width=1920,
viewport_height=1080,
)
# Create undetected adapter
undetected_adapter = UndetectedAdapter()
# Create strategy with undetected adapter
crawler_strategy = AsyncPlaywrightCrawlerStrategy(
browser_config=browser_config,
browser_adapter=undetected_adapter
)
async with AsyncWebCrawler(
crawler_strategy=crawler_strategy,
config=browser_config
) as crawler:
config = CrawlerRunConfig(
delay_before_return_html=2.0,
simulate_user=True,
)
result = await crawler.arun(url=TEST_URL, config=config)
print(f"\n✓ Success: {result.success}")
print(f"✓ Status Code: {result.status_code}")
print(f"✓ HTML Length: {len(result.html)}")
# Check for Cloudflare challenge
if result.html:
cf_indicators = [
"Checking your browser",
"Please stand by",
"cloudflare",
"cf-browser-verification",
"Access denied",
"Ray ID"
]
detected = False
for indicator in cf_indicators:
if indicator.lower() in result.html.lower():
print(f"⚠️ Cloudflare Challenge Detected: '{indicator}' found")
detected = True
break
if not detected and len(result.markdown.raw_markdown) > 100:
print("✅ Successfully bypassed Cloudflare!")
print(f"Content preview: {result.markdown.raw_markdown[:200]}...")
elif not detected:
print("⚠️ Page loaded but content seems minimal")
return result
async def main():
"""Compare regular vs undetected browser"""
print("🤖 Crawl4AI - Cloudflare Bypass Test")
print(f"Testing URL: {TEST_URL}\n")
# Test regular browser
regular_result = await test_regular_browser()
# Small delay
await asyncio.sleep(2)
# Test undetected browser
undetected_result = await test_undetected_browser()
# Summary
print("\n" + "=" * 60)
print("SUMMARY")
print("=" * 60)
print(f"Regular Browser:")
print(f" - Success: {regular_result.success}")
print(f" - Content Length: {len(regular_result.markdown.raw_markdown) if regular_result.markdown else 0}")
print(f"\nUndetected Browser:")
print(f" - Success: {undetected_result.success}")
print(f" - Content Length: {len(undetected_result.markdown.raw_markdown) if undetected_result.markdown else 0}")
if undetected_result.success and len(undetected_result.markdown.raw_markdown) > len(regular_result.markdown.raw_markdown):
print("\n✅ Undetected browser successfully bypassed protection!")
print("=" * 60)
if __name__ == "__main__":
asyncio.run(main()) | python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/undetectability/undetected_bot_test.py | docs/examples/undetectability/undetected_bot_test.py | """
Bot Detection Test - Compare Regular vs Undetected
Tests browser fingerprinting differences at bot.sannysoft.com
"""
import asyncio
from crawl4ai import (
AsyncWebCrawler,
BrowserConfig,
CrawlerRunConfig,
UndetectedAdapter,
CrawlResult
)
from crawl4ai.async_crawler_strategy import AsyncPlaywrightCrawlerStrategy
# Bot detection test site
TEST_URL = "https://bot.sannysoft.com"
def analyze_bot_detection(result: CrawlResult) -> dict:
"""Analyze bot detection results from the page"""
detections = {
"webdriver": False,
"headless": False,
"automation": False,
"user_agent": False,
"total_tests": 0,
"failed_tests": 0
}
if not result.success or not result.html:
return detections
# Look for specific test results in the HTML
html_lower = result.html.lower()
# Check for common bot indicators
if "webdriver" in html_lower and ("fail" in html_lower or "true" in html_lower):
detections["webdriver"] = True
detections["failed_tests"] += 1
if "headless" in html_lower and ("fail" in html_lower or "true" in html_lower):
detections["headless"] = True
detections["failed_tests"] += 1
if "automation" in html_lower and "detected" in html_lower:
detections["automation"] = True
detections["failed_tests"] += 1
# Count total tests (approximate)
detections["total_tests"] = html_lower.count("test") + html_lower.count("check")
return detections
async def test_browser_mode(adapter_name: str, adapter=None):
"""Test a browser mode and return results"""
print(f"\n{'='*60}")
print(f"Testing: {adapter_name}")
print(f"{'='*60}")
browser_config = BrowserConfig(
headless=False, # Run in headed mode for better results
verbose=True,
viewport_width=1920,
viewport_height=1080,
)
if adapter:
# Use undetected mode
crawler_strategy = AsyncPlaywrightCrawlerStrategy(
browser_config=browser_config,
browser_adapter=adapter
)
crawler = AsyncWebCrawler(
crawler_strategy=crawler_strategy,
config=browser_config
)
else:
# Use regular mode
crawler = AsyncWebCrawler(config=browser_config)
async with crawler:
config = CrawlerRunConfig(
delay_before_return_html=3.0, # Let detection scripts run
wait_for_images=True,
screenshot=True,
simulate_user=False, # Don't simulate for accurate detection
)
result = await crawler.arun(url=TEST_URL, config=config)
print(f"\n✓ Success: {result.success}")
print(f"✓ Status Code: {result.status_code}")
if result.success:
# Analyze detection results
detections = analyze_bot_detection(result)
print(f"\n🔍 Bot Detection Analysis:")
print(f" - WebDriver Detected: {'❌ Yes' if detections['webdriver'] else '✅ No'}")
print(f" - Headless Detected: {'❌ Yes' if detections['headless'] else '✅ No'}")
print(f" - Automation Detected: {'❌ Yes' if detections['automation'] else '✅ No'}")
print(f" - Failed Tests: {detections['failed_tests']}")
# Show some content
if result.markdown.raw_markdown:
print(f"\nContent preview:")
lines = result.markdown.raw_markdown.split('\n')
for line in lines[:20]: # Show first 20 lines
if any(keyword in line.lower() for keyword in ['test', 'pass', 'fail', 'yes', 'no']):
print(f" {line.strip()}")
return result, detections if result.success else {}
async def main():
"""Run the comparison"""
print("🤖 Crawl4AI - Bot Detection Test")
print(f"Testing at: {TEST_URL}")
print("This site runs various browser fingerprinting tests\n")
# Test regular browser
regular_result, regular_detections = await test_browser_mode("Regular Browser")
# Small delay
await asyncio.sleep(2)
# Test undetected browser
undetected_adapter = UndetectedAdapter()
undetected_result, undetected_detections = await test_browser_mode(
"Undetected Browser",
undetected_adapter
)
# Summary comparison
print(f"\n{'='*60}")
print("COMPARISON SUMMARY")
print(f"{'='*60}")
print(f"\n{'Test':<25} {'Regular':<15} {'Undetected':<15}")
print(f"{'-'*55}")
if regular_detections and undetected_detections:
print(f"{'WebDriver Detection':<25} {'❌ Detected' if regular_detections['webdriver'] else '✅ Passed':<15} {'❌ Detected' if undetected_detections['webdriver'] else '✅ Passed':<15}")
print(f"{'Headless Detection':<25} {'❌ Detected' if regular_detections['headless'] else '✅ Passed':<15} {'❌ Detected' if undetected_detections['headless'] else '✅ Passed':<15}")
print(f"{'Automation Detection':<25} {'❌ Detected' if regular_detections['automation'] else '✅ Passed':<15} {'❌ Detected' if undetected_detections['automation'] else '✅ Passed':<15}")
print(f"{'Failed Tests':<25} {regular_detections['failed_tests']:<15} {undetected_detections['failed_tests']:<15}")
print(f"\n{'='*60}")
if undetected_detections.get('failed_tests', 0) < regular_detections.get('failed_tests', 1):
print("✅ Undetected browser performed better at evading detection!")
else:
print("ℹ️ Both browsers had similar detection results")
if __name__ == "__main__":
asyncio.run(main()) | python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/undetectability/undetected_basic_test.py | docs/examples/undetectability/undetected_basic_test.py | """
Basic Undetected Browser Test
Simple example to test if undetected mode works
"""
import asyncio
from crawl4ai import AsyncWebCrawler, BrowserConfig
async def test_regular_mode():
"""Test with regular browser"""
print("Testing Regular Browser Mode...")
browser_config = BrowserConfig(
headless=False,
verbose=True
)
async with AsyncWebCrawler(config=browser_config) as crawler:
result = await crawler.arun(url="https://www.example.com")
print(f"Regular Mode - Success: {result.success}")
print(f"Regular Mode - Status: {result.status_code}")
print(f"Regular Mode - Content length: {len(result.markdown.raw_markdown)}")
print(f"Regular Mode - First 100 chars: {result.markdown.raw_markdown[:100]}...")
return result.success
async def test_undetected_mode():
"""Test with undetected browser"""
print("\nTesting Undetected Browser Mode...")
from crawl4ai import UndetectedAdapter
from crawl4ai.async_crawler_strategy import AsyncPlaywrightCrawlerStrategy
browser_config = BrowserConfig(
headless=False,
verbose=True
)
# Create undetected adapter
undetected_adapter = UndetectedAdapter()
# Create strategy with undetected adapter
crawler_strategy = AsyncPlaywrightCrawlerStrategy(
browser_config=browser_config,
browser_adapter=undetected_adapter
)
async with AsyncWebCrawler(
crawler_strategy=crawler_strategy,
config=browser_config
) as crawler:
result = await crawler.arun(url="https://www.example.com")
print(f"Undetected Mode - Success: {result.success}")
print(f"Undetected Mode - Status: {result.status_code}")
print(f"Undetected Mode - Content length: {len(result.markdown.raw_markdown)}")
print(f"Undetected Mode - First 100 chars: {result.markdown.raw_markdown[:100]}...")
return result.success
async def main():
"""Run both tests"""
print("🤖 Crawl4AI Basic Adapter Test\n")
# Test regular mode
regular_success = await test_regular_mode()
# Test undetected mode
undetected_success = await test_undetected_mode()
# Summary
print("\n" + "="*50)
print("Summary:")
print(f"Regular Mode: {'✅ Success' if regular_success else '❌ Failed'}")
print(f"Undetected Mode: {'✅ Success' if undetected_success else '❌ Failed'}")
print("="*50)
if __name__ == "__main__":
asyncio.run(main()) | python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/undetectability/undetected_vs_regular_comparison.py | docs/examples/undetectability/undetected_vs_regular_comparison.py | """
Undetected vs Regular Browser Comparison
This example demonstrates the difference between regular and undetected browser modes
when accessing sites with bot detection services.
Based on tested anti-bot services:
- Cloudflare
- Kasada
- Akamai
- DataDome
- Bet365
- And others
"""
import asyncio
from crawl4ai import (
AsyncWebCrawler,
BrowserConfig,
CrawlerRunConfig,
PlaywrightAdapter,
UndetectedAdapter,
CrawlResult
)
from crawl4ai.async_crawler_strategy import AsyncPlaywrightCrawlerStrategy
# Test URLs for various bot detection services
TEST_SITES = {
"Cloudflare Protected": "https://nowsecure.nl",
# "Bot Detection Test": "https://bot.sannysoft.com",
# "Fingerprint Test": "https://fingerprint.com/products/bot-detection",
# "Browser Scan": "https://browserscan.net",
# "CreepJS": "https://abrahamjuliot.github.io/creepjs",
}
async def test_with_adapter(url: str, adapter_name: str, adapter):
"""Test a URL with a specific adapter"""
browser_config = BrowserConfig(
headless=False, # Better for avoiding detection
viewport_width=1920,
viewport_height=1080,
verbose=True,
)
# Create the crawler strategy with the adapter
crawler_strategy = AsyncPlaywrightCrawlerStrategy(
browser_config=browser_config,
browser_adapter=adapter
)
print(f"\n{'='*60}")
print(f"Testing with {adapter_name} adapter")
print(f"URL: {url}")
print(f"{'='*60}")
try:
async with AsyncWebCrawler(
crawler_strategy=crawler_strategy,
config=browser_config
) as crawler:
crawler_config = CrawlerRunConfig(
delay_before_return_html=3.0, # Give page time to load
wait_for_images=True,
screenshot=True,
simulate_user=True, # Add user simulation
)
result: CrawlResult = await crawler.arun(
url=url,
config=crawler_config
)
# Check results
print(f"✓ Status Code: {result.status_code}")
print(f"✓ Success: {result.success}")
print(f"✓ HTML Length: {len(result.html)}")
print(f"✓ Markdown Length: {len(result.markdown.raw_markdown)}")
# Check for common bot detection indicators
detection_indicators = [
"Access denied",
"Please verify you are human",
"Checking your browser",
"Enable JavaScript",
"captcha",
"403 Forbidden",
"Bot detection",
"Security check"
]
content_lower = result.markdown.raw_markdown.lower()
detected = False
for indicator in detection_indicators:
if indicator.lower() in content_lower:
print(f"⚠️ Possible detection: Found '{indicator}'")
detected = True
break
if not detected:
print("✅ No obvious bot detection triggered!")
# Show first 200 chars of content
print(f"Content preview: {result.markdown.raw_markdown[:200]}...")
return result.success and not detected
except Exception as e:
print(f"❌ Error: {str(e)}")
return False
async def compare_adapters(url: str, site_name: str):
"""Compare regular and undetected adapters on the same URL"""
print(f"\n{'#'*60}")
print(f"# Testing: {site_name}")
print(f"{'#'*60}")
# Test with regular adapter
regular_adapter = PlaywrightAdapter()
regular_success = await test_with_adapter(url, "Regular", regular_adapter)
# Small delay between tests
await asyncio.sleep(2)
# Test with undetected adapter
undetected_adapter = UndetectedAdapter()
undetected_success = await test_with_adapter(url, "Undetected", undetected_adapter)
# Summary
print(f"\n{'='*60}")
print(f"Summary for {site_name}:")
print(f"Regular Adapter: {'✅ Passed' if regular_success else '❌ Blocked/Detected'}")
print(f"Undetected Adapter: {'✅ Passed' if undetected_success else '❌ Blocked/Detected'}")
print(f"{'='*60}")
return regular_success, undetected_success
async def main():
"""Run comparison tests on multiple sites"""
print("🤖 Crawl4AI Browser Adapter Comparison")
print("Testing regular vs undetected browser modes\n")
results = {}
# Test each site
for site_name, url in TEST_SITES.items():
regular, undetected = await compare_adapters(url, site_name)
results[site_name] = {
"regular": regular,
"undetected": undetected
}
# Delay between different sites
await asyncio.sleep(3)
# Final summary
print(f"\n{'#'*60}")
print("# FINAL RESULTS")
print(f"{'#'*60}")
print(f"{'Site':<30} {'Regular':<15} {'Undetected':<15}")
print(f"{'-'*60}")
for site, result in results.items():
regular_status = "✅ Passed" if result["regular"] else "❌ Blocked"
undetected_status = "✅ Passed" if result["undetected"] else "❌ Blocked"
print(f"{site:<30} {regular_status:<15} {undetected_status:<15}")
# Calculate success rates
regular_success = sum(1 for r in results.values() if r["regular"])
undetected_success = sum(1 for r in results.values() if r["undetected"])
total = len(results)
print(f"\n{'='*60}")
print(f"Success Rates:")
print(f"Regular Adapter: {regular_success}/{total} ({regular_success/total*100:.1f}%)")
print(f"Undetected Adapter: {undetected_success}/{total} ({undetected_success/total*100:.1f}%)")
print(f"{'='*60}")
if __name__ == "__main__":
# Note: This example may take a while to run as it tests multiple sites
# You can comment out sites in TEST_SITES to run faster tests
asyncio.run(main()) | python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/url_seeder/url_seeder_demo.py | docs/examples/url_seeder/url_seeder_demo.py | """
URL Seeder Demo - Interactive showcase of Crawl4AI's URL discovery capabilities
This demo shows:
1. Basic URL discovery from sitemaps and Common Crawl
2. Cache management and forced refresh
3. Live URL validation and metadata extraction
4. BM25 relevance scoring for intelligent filtering
5. Integration with AsyncWebCrawler for the complete pipeline
6. Multi-domain discovery across multiple sites
Note: The AsyncUrlSeeder now supports context manager protocol for automatic cleanup.
"""
import asyncio
import time
from datetime import datetime
from rich.console import Console
from rich.table import Table
from rich.panel import Panel
from rich.progress import Progress, SpinnerColumn, BarColumn, TimeElapsedColumn
from rich.prompt import Prompt, Confirm
from crawl4ai import (
AsyncWebCrawler,
CrawlerRunConfig,
AsyncUrlSeeder,
SeedingConfig
)
console = Console()
console.rule("[bold green]🌐 Crawl4AI URL Seeder: Interactive Demo")
DOMAIN = "crawl4ai.com"
# Utils
def print_head_info(head_data):
table = Table(title="<head> Metadata", expand=True)
table.add_column("Key", style="cyan", no_wrap=True)
table.add_column("Value", style="magenta")
if not head_data:
console.print("[yellow]No head data found.")
return
if head_data.get("title"):
table.add_row("title", head_data["title"])
if head_data.get("charset"):
table.add_row("charset", head_data["charset"])
for k, v in head_data.get("meta", {}).items():
table.add_row(f"meta:{k}", v)
for rel, items in head_data.get("link", {}).items():
for item in items:
table.add_row(f"link:{rel}", item.get("href", ""))
console.print(table)
async def section_1_basic_exploration(seed: AsyncUrlSeeder):
console.rule("[bold cyan]1. Basic Seeding")
cfg = SeedingConfig(source="cc+sitemap", pattern="*", verbose=True)
start_time = time.time()
with Progress(SpinnerColumn(), "[progress.description]{task.description}") as p:
p.add_task(description="Fetching from Common Crawl + Sitemap...", total=None)
urls = await seed.urls(DOMAIN, cfg)
elapsed = time.time() - start_time
console.print(f"[green]✓ Fetched {len(urls)} URLs in {elapsed:.2f} seconds")
console.print(f"[dim] Speed: {len(urls)/elapsed:.0f} URLs/second[/dim]\n")
console.print("[bold]Sample URLs:[/bold]")
for u in urls[:5]:
console.print(f" • {u['url']}")
async def section_2_cache_demo(seed: AsyncUrlSeeder):
console.rule("[bold cyan]2. Caching Demonstration")
console.print("[yellow]Using `force=True` to bypass cache and fetch fresh data.[/yellow]")
cfg = SeedingConfig(source="cc", pattern="*crawl4ai.com/core/*", verbose=False, force = True)
await seed.urls(DOMAIN, cfg)
async def section_3_live_head(seed: AsyncUrlSeeder):
console.rule("[bold cyan]3. Live Check + Head Extraction")
cfg = SeedingConfig(
extract_head=True,
concurrency=10,
hits_per_sec=5,
pattern="*crawl4ai.com/*",
max_urls=10,
verbose=False,
)
urls = await seed.urls(DOMAIN, cfg)
valid = [u for u in urls if u["status"] == "valid"]
console.print(f"[green]Valid: {len(valid)} / {len(urls)}")
if valid:
print_head_info(valid[0]["head_data"])
async def section_4_bm25_scoring(seed: AsyncUrlSeeder):
console.rule("[bold cyan]4. BM25 Relevance Scoring")
console.print("[yellow]Using AI-powered relevance scoring to find the most relevant content[/yellow]")
query = "markdown generation extraction strategies"
cfg = SeedingConfig(
source="sitemap",
extract_head=True,
query=query,
scoring_method="bm25",
score_threshold=0.3, # Only URLs with >30% relevance
max_urls=20,
verbose=False
)
with Progress(SpinnerColumn(), "[progress.description]{task.description}") as p:
p.add_task(description=f"Searching for: '{query}'", total=None)
urls = await seed.urls(DOMAIN, cfg)
console.print(f"[green]Found {len(urls)} relevant URLs (score > 0.3)")
# Show top results with scores
table = Table(title="Top 5 Most Relevant Pages", expand=True)
table.add_column("Score", style="cyan", width=8)
table.add_column("Title", style="magenta")
table.add_column("URL", style="blue", overflow="fold")
for url in urls[:5]:
score = f"{url['relevance_score']:.2f}"
title = url['head_data'].get('title', 'No title')[:60] + "..."
table.add_row(score, title, url['url'])
console.print(table)
async def section_5_keyword_filter_to_agent(seed: AsyncUrlSeeder):
console.rule("[bold cyan]5. Complete Pipeline: Discover → Filter → Crawl")
cfg = SeedingConfig(
extract_head=True,
concurrency=20,
hits_per_sec=10,
max_urls=10,
pattern="*crawl4ai.com/*",
force=True,
)
urls = await seed.urls(DOMAIN, cfg)
keywords = ["deep crawling", "markdown", "llm"]
selected = [u for u in urls if any(k in str(u["head_data"]).lower() for k in keywords)]
console.print(f"[cyan]Selected {len(selected)} URLs with relevant keywords:")
for u in selected[:10]:
console.print("•", u["url"])
console.print("\n[yellow]Passing above URLs to arun_many() LLM agent for crawling...")
async with AsyncWebCrawler(verbose=True) as crawler:
crawl_run_config = CrawlerRunConfig(
# Example crawl settings for these URLs:
only_text=True, # Just get text content
screenshot=False,
pdf=False,
word_count_threshold=50, # Only process pages with at least 50 words
stream=True,
verbose=False # Keep logs clean for arun_many in this demo
)
# Extract just the URLs from the selected results
urls_to_crawl = [u["url"] for u in selected]
# We'll stream results for large lists, but collect them here for demonstration
crawled_results_stream = await crawler.arun_many(urls_to_crawl, config=crawl_run_config)
final_crawled_data = []
async for result in crawled_results_stream:
final_crawled_data.append(result)
if len(final_crawled_data) % 5 == 0:
print(f" Processed {len(final_crawled_data)}/{len(urls_to_crawl)} URLs...")
print(f"\n Successfully crawled {len(final_crawled_data)} URLs.")
if final_crawled_data:
print("\n Example of a crawled result's URL and Markdown (first successful one):")
for result in final_crawled_data:
if result.success and result.markdown.raw_markdown:
print(f" URL: {result.url}")
print(f" Markdown snippet: {result.markdown.raw_markdown[:200]}...")
break
else:
print(" No successful crawls with markdown found.")
else:
print(" No successful crawls found.")
async def section_6_multi_domain(seed: AsyncUrlSeeder):
console.rule("[bold cyan]6. Multi-Domain Discovery")
console.print("[yellow]Discovering Python tutorials across multiple educational sites[/yellow]\n")
domains = ["docs.python.org", "realpython.com", "docs.crawl4ai.com"]
cfg = SeedingConfig(
source="sitemap",
extract_head=True,
query="python tutorial guide",
scoring_method="bm25",
score_threshold=0.2,
max_urls=5 # Per domain
)
start_time = time.time()
with Progress(SpinnerColumn(), "[progress.description]{task.description}") as p:
task = p.add_task(description="Discovering across domains...", total=None)
results = await seed.many_urls(domains, cfg)
elapsed = time.time() - start_time
total_urls = sum(len(urls) for urls in results.values())
console.print(f"[green]✓ Found {total_urls} relevant URLs across {len(domains)} domains in {elapsed:.2f}s\n")
# Show results per domain
for domain, urls in results.items():
console.print(f"[bold]{domain}:[/bold] {len(urls)} relevant pages")
if urls:
top = urls[0]
console.print(f" Top result: [{top['relevance_score']:.2f}] {top['head_data'].get('title', 'No title')}")
async def main():
async with AsyncUrlSeeder() as seed:
# Interactive menu
sections = {
"1": ("Basic URL Discovery", section_1_basic_exploration),
"2": ("Cache Management Demo", section_2_cache_demo),
"3": ("Live Check & Metadata Extraction", section_3_live_head),
"4": ("BM25 Relevance Scoring", section_4_bm25_scoring),
"5": ("Complete Pipeline (Discover → Filter → Crawl)", section_5_keyword_filter_to_agent),
"6": ("Multi-Domain Discovery", section_6_multi_domain),
"7": ("Run All Demos", None)
}
console.print("\n[bold]Available Demos:[/bold]")
for key, (title, _) in sections.items():
console.print(f" {key}. {title}")
choice = Prompt.ask("\n[cyan]Which demo would you like to run?[/cyan]",
choices=list(sections.keys()),
default="7")
console.print()
if choice == "7":
# Run all demos
for key, (title, func) in sections.items():
if key != "7" and func:
await func(seed)
if key != "6": # Don't pause after the last demo
if not Confirm.ask("\n[yellow]Continue to next demo?[/yellow]", default=True):
break
console.print()
else:
# Run selected demo
_, func = sections[choice]
await func(seed)
console.rule("[bold green]Demo Complete ✔︎")
if __name__ == "__main__":
asyncio.run(main())
| python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/url_seeder/url_seeder_quick_demo.py | docs/examples/url_seeder/url_seeder_quick_demo.py | """
🚀 URL Seeder + AsyncWebCrawler = Magic!
Quick demo showing discovery → filter → crawl pipeline
Note: Uses context manager for automatic cleanup of resources.
"""
import asyncio, os
from crawl4ai import AsyncUrlSeeder, AsyncWebCrawler, SeedingConfig, CrawlerRunConfig, AsyncLogger, DefaultMarkdownGenerator
from crawl4ai.content_filter_strategy import PruningContentFilter
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
# 🔍 Example 1: Discover ALL → Filter → Crawl
async def discover_and_crawl():
"""Find Python module tutorials & extract them all!"""
async with AsyncUrlSeeder(logger=AsyncLogger()) as seeder:
# Step 1: See how many URLs exist (spoiler: A LOT!)
print("📊 Let's see what RealPython has...")
all_urls = await seeder.urls("realpython.com",
SeedingConfig(source="sitemap"))
print(f"😱 Found {len(all_urls)} total URLs!")
# Step 2: Filter for Python modules (perfect size ~13)
print("\n🎯 Filtering for 'python-modules' tutorials...")
module_urls = await seeder.urls("realpython.com",
SeedingConfig(
source="sitemap",
pattern="*python-modules*",
live_check=True # Make sure they're alive!
))
print(f"✨ Found {len(module_urls)} module tutorials")
for url in module_urls[:3]: # Show first 3
status = "✅" if url["status"] == "valid" else "❌"
print(f"{status} {url['url']}")
# Step 3: Crawl them all with pruning (keep it lean!)
print("\n🕷️ Crawling all module tutorials...")
async with AsyncWebCrawler() as crawler:
config = CrawlerRunConfig(
markdown_generator=DefaultMarkdownGenerator(
content_filter=PruningContentFilter( # Smart filtering!
threshold=0.48, # Remove fluff
threshold_type="fixed",
),
),
only_text=True,
stream=True,
)
# Extract just the URLs from the seeder results
urls_to_crawl = [u["url"] for u in module_urls[:5]]
results = await crawler.arun_many(urls_to_crawl, config=config)
# Process & save
saved = 0
async for result in results:
if result.success:
# Save each tutorial (name from URL)
name = result.url.split("/")[-2] + ".md"
name = os.path.join(CURRENT_DIR, name)
with open(name, "w") as f:
f.write(result.markdown.fit_markdown)
saved += 1
print(f"💾 Saved: {name}")
print(f"\n🎉 Successfully saved {saved} tutorials!")
# 🔍 Example 2: Beautiful Soup articles with metadata peek
async def explore_beautifulsoup():
"""Discover BeautifulSoup content & peek at metadata"""
async with AsyncUrlSeeder(logger=AsyncLogger()) as seeder:
print("🍲 Looking for Beautiful Soup articles...")
soup_urls = await seeder.urls("realpython.com",
SeedingConfig(
source="sitemap",
pattern="*beautiful-soup*",
extract_head=True # Get the metadata!
))
print(f"\n📚 Found {len(soup_urls)} Beautiful Soup articles:\n")
# Show what we discovered
for i, url in enumerate(soup_urls, 1):
meta = url["head_data"]["meta"]
print(f"{i}. {url['head_data']['title']}")
print(f" 📝 {meta.get('description', 'No description')[:60]}...")
print(f" 👤 By: {meta.get('author', 'Unknown')}")
print(f" 🔗 {url['url']}\n")
# 🔍 Example 3: Smart search with BM25 relevance scoring
async def smart_search_with_bm25():
"""Use AI-powered relevance scoring to find the best content"""
async with AsyncUrlSeeder(logger=AsyncLogger()) as seeder:
print("🧠 Smart search: 'web scraping tutorial quiz'")
# Search with BM25 scoring - AI finds the best matches!
results = await seeder.urls("realpython.com",
SeedingConfig(
source="sitemap",
pattern="*beautiful-soup*",
extract_head=True,
query="web scraping tutorial quiz", # Our search
scoring_method="bm25",
score_threshold=0.2 # Quality filter
))
print(f"\n🎯 Top {len(results)} most relevant results:\n")
# Show ranked results with relevance scores
for i, result in enumerate(results[:3], 1):
print(f"{i}. [{result['relevance_score']:.2f}] {result['head_data']['title']}")
print(f" 🔗 {result['url'][:60]}...")
print("\n✨ BM25 automatically ranked by relevance!")
# 🎬 Run the show!
async def main():
print("=" * 60)
await discover_and_crawl()
print("\n" + "=" * 60 + "\n")
await explore_beautifulsoup()
print("\n" + "=" * 60 + "\n")
await smart_search_with_bm25()
if __name__ == "__main__":
asyncio.run(main()) | python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/url_seeder/bbc_sport_research_assistant.py | docs/examples/url_seeder/bbc_sport_research_assistant.py | """
BBC Sport Research Assistant Pipeline
=====================================
This example demonstrates how URLSeeder helps create an efficient research pipeline:
1. Discover all available URLs without crawling
2. Filter and rank them based on relevance
3. Crawl only the most relevant content
4. Generate comprehensive research insights
Pipeline Steps:
1. Get user query
2. Optionally enhance query using LLM
3. Use URLSeeder to discover and rank URLs
4. Crawl top K URLs with BM25 filtering
5. Generate detailed response with citations
Requirements:
- pip install crawl4ai
- pip install litellm
- export GEMINI_API_KEY="your-api-key"
Usage:
- Run normally: python bbc_sport_research_assistant.py
- Run test mode: python bbc_sport_research_assistant.py test
Note: AsyncUrlSeeder now uses context manager for automatic cleanup.
"""
import asyncio
import json
import os
import hashlib
import pickle
from typing import List, Dict, Optional, Tuple
from dataclasses import dataclass, asdict
from datetime import datetime
from pathlib import Path
# Rich for colored output
from rich.console import Console
from rich.text import Text
from rich.panel import Panel
from rich.table import Table
from rich.progress import Progress, SpinnerColumn, TextColumn
# Crawl4AI imports
from crawl4ai import (
AsyncWebCrawler,
BrowserConfig,
CrawlerRunConfig,
AsyncUrlSeeder,
SeedingConfig,
AsyncLogger
)
from crawl4ai.content_filter_strategy import PruningContentFilter
from crawl4ai.markdown_generation_strategy import DefaultMarkdownGenerator
# LiteLLM for AI communication
import litellm
# Initialize Rich console
console = Console()
# Get the current directory where this script is located
SCRIPT_DIR = Path(__file__).parent.resolve()
# Cache configuration - relative to script directory
CACHE_DIR = SCRIPT_DIR / "temp_cache"
CACHE_DIR.mkdir(parents=True, exist_ok=True)
# Testing limits
TESTING_MODE = True
MAX_URLS_DISCOVERY = 100 if TESTING_MODE else 1000
MAX_URLS_TO_CRAWL = 5 if TESTING_MODE else 10
def get_cache_key(prefix: str, *args) -> str:
"""Generate cache key from prefix and arguments"""
content = f"{prefix}:{'|'.join(str(arg) for arg in args)}"
return hashlib.md5(content.encode()).hexdigest()
def load_from_cache(cache_key: str) -> Optional[any]:
"""Load data from cache if exists"""
cache_path = CACHE_DIR / f"{cache_key}.pkl"
if cache_path.exists():
with open(cache_path, 'rb') as f:
return pickle.load(f)
return None
def save_to_cache(cache_key: str, data: any) -> None:
"""Save data to cache"""
cache_path = CACHE_DIR / f"{cache_key}.pkl"
with open(cache_path, 'wb') as f:
pickle.dump(data, f)
@dataclass
class ResearchConfig:
"""Configuration for research pipeline"""
# Core settings
domain: str = "www.bbc.com/sport"
max_urls_discovery: int = 100
max_urls_to_crawl: int = 10
top_k_urls: int = 10
# Scoring and filtering
score_threshold: float = 0.1
scoring_method: str = "bm25"
# Processing options
use_llm_enhancement: bool = True
extract_head_metadata: bool = True
live_check: bool = True
force_refresh: bool = False
# Crawler settings
max_concurrent_crawls: int = 5
timeout: int = 30000
headless: bool = True
# Output settings
save_json: bool = True
save_markdown: bool = True
output_dir: str = None # Will be set in __post_init__
# Development settings
test_mode: bool = False
interactive_mode: bool = False
verbose: bool = True
def __post_init__(self):
"""Adjust settings based on test mode"""
if self.test_mode:
self.max_urls_discovery = 50
self.max_urls_to_crawl = 3
self.top_k_urls = 5
# Set default output directory relative to script location
if self.output_dir is None:
self.output_dir = str(SCRIPT_DIR / "research_results")
@dataclass
class ResearchQuery:
"""Container for research query and metadata"""
original_query: str
enhanced_query: Optional[str] = None
search_patterns: List[str] = None
timestamp: str = None
@dataclass
class ResearchResult:
"""Container for research results"""
query: ResearchQuery
discovered_urls: List[Dict]
crawled_content: List[Dict]
synthesis: str
citations: List[Dict]
metadata: Dict
async def get_user_query() -> str:
"""
Get research query from user input
"""
query = input("\n🔍 Enter your research query: ")
return query.strip()
async def enhance_query_with_llm(query: str) -> ResearchQuery:
"""
Use LLM to enhance the research query:
- Extract key terms
- Generate search patterns
- Identify related topics
"""
# Check cache
cache_key = get_cache_key("enhanced_query", query)
cached_result = load_from_cache(cache_key)
if cached_result:
console.print("[dim cyan]📦 Using cached enhanced query[/dim cyan]")
return cached_result
try:
response = await litellm.acompletion(
model="gemini/gemini-2.5-flash-preview-04-17",
messages=[{
"role": "user",
"content": f"""Given this research query: "{query}"
Extract:
1. Key terms and concepts (as a list)
2. Related search terms
3. A more specific/enhanced version of the query
Return as JSON:
{{
"key_terms": ["term1", "term2"],
"related_terms": ["related1", "related2"],
"enhanced_query": "enhanced version of query"
}}"""
}],
# reasoning_effort="low",
temperature=0.3,
response_format={"type": "json_object"}
)
data = json.loads(response.choices[0].message.content)
# Create search patterns
all_terms = data["key_terms"] + data["related_terms"]
patterns = [f"*{term.lower()}*" for term in all_terms]
result = ResearchQuery(
original_query=query,
enhanced_query=data["enhanced_query"],
search_patterns=patterns[:10], # Limit patterns
timestamp=datetime.now().isoformat()
)
# Cache the result
save_to_cache(cache_key, result)
return result
except Exception as e:
console.print(f"[yellow]⚠️ LLM enhancement failed: {e}[/yellow]")
# Fallback to simple tokenization
return ResearchQuery(
original_query=query,
enhanced_query=query,
search_patterns=tokenize_query_to_patterns(query),
timestamp=datetime.now().isoformat()
)
def tokenize_query_to_patterns(query: str) -> List[str]:
"""
Convert query into URL patterns for URLSeeder
Example: "AI startups funding" -> ["*ai*", "*startup*", "*funding*"]
"""
# Simple tokenization - split and create patterns
words = query.lower().split()
# Filter out common words
stop_words = {'the', 'a', 'an', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for', 'that'}
keywords = [w for w in words if w not in stop_words and len(w) > 2]
# Create patterns
patterns = [f"*{keyword}*" for keyword in keywords]
return patterns[:8] # Limit to 8 patterns
async def discover_urls(domain: str, query: str, config: ResearchConfig) -> List[Dict]:
"""
Use URLSeeder to discover and rank URLs:
1. Fetch all URLs from domain
2. Filter by patterns
3. Extract metadata (titles, descriptions)
4. Rank by BM25 relevance score
5. Return top K URLs
"""
# Check cache
cache_key = get_cache_key("discovered_urls", domain, query, config.top_k_urls)
cached_result = load_from_cache(cache_key)
if cached_result and not config.force_refresh:
console.print("[dim cyan]📦 Using cached URL discovery[/dim cyan]")
return cached_result
console.print(f"\n[cyan]🔍 Discovering URLs from {domain}...[/cyan]")
# Initialize URL seeder with context manager
async with AsyncUrlSeeder(logger=AsyncLogger(verbose=config.verbose)) as seeder:
# Configure seeding
seeding_config = SeedingConfig(
source="sitemap+cc", # Use both sitemap and Common Crawl
extract_head=config.extract_head_metadata,
query=query,
scoring_method=config.scoring_method,
score_threshold=config.score_threshold,
max_urls=config.max_urls_discovery,
live_check=config.live_check,
force=config.force_refresh
)
try:
# Discover URLs
urls = await seeder.urls(domain, seeding_config)
# Sort by relevance score (descending)
sorted_urls = sorted(
urls,
key=lambda x: x.get('relevance_score', 0),
reverse=True
)
# Take top K
top_urls = sorted_urls[:config.top_k_urls]
console.print(f"[green]✅ Discovered {len(urls)} URLs, selected top {len(top_urls)}[/green]")
# Cache the result
save_to_cache(cache_key, top_urls)
return top_urls
except Exception as e:
console.print(f"[red]❌ URL discovery failed: {e}[/red]")
return []
async def crawl_selected_urls(urls: List[str], query: str, config: ResearchConfig) -> List[Dict]:
"""
Crawl selected URLs with content filtering:
- Use AsyncWebCrawler.arun_many()
- Apply content filter
- Generate clean markdown
"""
# Extract just URLs from the discovery results
url_list = [u['url'] for u in urls if 'url' in u][:config.max_urls_to_crawl]
if not url_list:
console.print("[red]❌ No URLs to crawl[/red]")
return []
console.print(f"\n[cyan]🕷️ Crawling {len(url_list)} URLs...[/cyan]")
# Check cache for each URL
crawled_results = []
urls_to_crawl = []
for url in url_list:
cache_key = get_cache_key("crawled_content", url, query)
cached_content = load_from_cache(cache_key)
if cached_content and not config.force_refresh:
crawled_results.append(cached_content)
else:
urls_to_crawl.append(url)
if urls_to_crawl:
console.print(f"[cyan]📥 Crawling {len(urls_to_crawl)} new URLs (cached: {len(crawled_results)})[/cyan]")
# Configure markdown generator with content filter
md_generator = DefaultMarkdownGenerator(
content_filter=PruningContentFilter(
threshold=0.48,
threshold_type="dynamic",
min_word_threshold=10
),
)
# Configure crawler
crawler_config = CrawlerRunConfig(
markdown_generator=md_generator,
exclude_external_links=True,
excluded_tags=['nav', 'header', 'footer', 'aside'],
)
# Create crawler with browser config
async with AsyncWebCrawler(
config=BrowserConfig(
headless=config.headless,
verbose=config.verbose
)
) as crawler:
# Crawl URLs
results = await crawler.arun_many(
urls_to_crawl,
config=crawler_config,
max_concurrent=config.max_concurrent_crawls
)
# Process results
for url, result in zip(urls_to_crawl, results):
if result.success:
content_data = {
'url': url,
'title': result.metadata.get('title', ''),
'markdown': result.markdown.fit_markdown or result.markdown.raw_markdown,
'raw_length': len(result.markdown.raw_markdown),
'fit_length': len(result.markdown.fit_markdown) if result.markdown.fit_markdown else len(result.markdown.raw_markdown),
'metadata': result.metadata
}
crawled_results.append(content_data)
# Cache the result
cache_key = get_cache_key("crawled_content", url, query)
save_to_cache(cache_key, content_data)
else:
console.print(f" [red]❌ Failed: {url[:50]}... - {result.error}[/red]")
console.print(f"[green]✅ Successfully crawled {len(crawled_results)} URLs[/green]")
return crawled_results
async def generate_research_synthesis(
query: str,
crawled_content: List[Dict]
) -> Tuple[str, List[Dict]]:
"""
Use LLM to synthesize research findings:
- Analyze all crawled content
- Generate comprehensive answer
- Extract citations and references
"""
if not crawled_content:
return "No content available for synthesis.", []
console.print("\n[cyan]🤖 Generating research synthesis...[/cyan]")
# Prepare content for LLM
content_sections = []
for i, content in enumerate(crawled_content, 1):
section = f"""
SOURCE {i}:
Title: {content['title']}
URL: {content['url']}
Content Preview:
{content['markdown'][:1500]}...
"""
content_sections.append(section)
combined_content = "\n---\n".join(content_sections)
try:
response = await litellm.acompletion(
model="gemini/gemini-2.5-flash-preview-04-17",
messages=[{
"role": "user",
"content": f"""Research Query: "{query}"
Based on the following sources, provide a comprehensive research synthesis.
{combined_content}
Please provide:
1. An executive summary (2-3 sentences)
2. Key findings (3-5 bullet points)
3. Detailed analysis (2-3 paragraphs)
4. Future implications or trends
Format your response with clear sections and cite sources using [Source N] notation.
Keep the total response under 800 words."""
}],
# reasoning_effort="medium",
temperature=0.7
)
synthesis = response.choices[0].message.content
# Extract citations from the synthesis
citations = []
for i, content in enumerate(crawled_content, 1):
if f"[Source {i}]" in synthesis or f"Source {i}" in synthesis:
citations.append({
'source_id': i,
'title': content['title'],
'url': content['url']
})
return synthesis, citations
except Exception as e:
console.print(f"[red]❌ Synthesis generation failed: {e}[/red]")
# Fallback to simple summary
summary = f"Research on '{query}' found {len(crawled_content)} relevant articles:\n\n"
for content in crawled_content[:3]:
summary += f"- {content['title']}\n {content['url']}\n\n"
return summary, []
def format_research_output(result: ResearchResult) -> str:
"""
Format the final research output with:
- Executive summary
- Key findings
- Detailed analysis
- Citations and sources
"""
output = []
output.append("\n" + "=" * 60)
output.append("🔬 RESEARCH RESULTS")
output.append("=" * 60)
# Query info
output.append(f"\n📋 Query: {result.query.original_query}")
if result.query.enhanced_query != result.query.original_query:
output.append(f" Enhanced: {result.query.enhanced_query}")
# Discovery stats
output.append(f"\n📊 Statistics:")
output.append(f" - URLs discovered: {len(result.discovered_urls)}")
output.append(f" - URLs crawled: {len(result.crawled_content)}")
output.append(f" - Processing time: {result.metadata.get('duration', 'N/A')}")
# Synthesis
output.append(f"\n📝 SYNTHESIS")
output.append("-" * 60)
output.append(result.synthesis)
# Citations
if result.citations:
output.append(f"\n📚 SOURCES")
output.append("-" * 60)
for citation in result.citations:
output.append(f"[{citation['source_id']}] {citation['title']}")
output.append(f" {citation['url']}")
return "\n".join(output)
async def save_research_results(result: ResearchResult, config: ResearchConfig) -> Tuple[str, str]:
"""
Save research results in JSON and Markdown formats
Returns:
Tuple of (json_path, markdown_path)
"""
# Create output directory
output_dir = Path(config.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
# Generate filename based on query and timestamp
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
query_slug = result.query.original_query[:50].replace(" ", "_").replace("/", "_")
base_filename = f"{timestamp}_{query_slug}"
json_path = None
md_path = None
# Save JSON
if config.save_json:
json_path = output_dir / f"{base_filename}.json"
with open(json_path, 'w') as f:
json.dump(asdict(result), f, indent=2, default=str)
console.print(f"\n[green]💾 JSON saved: {json_path}[/green]")
# Save Markdown
if config.save_markdown:
md_path = output_dir / f"{base_filename}.md"
# Create formatted markdown
md_content = [
f"# Research Report: {result.query.original_query}",
f"\n**Generated on:** {result.metadata.get('timestamp', 'N/A')}",
f"\n**Domain:** {result.metadata.get('domain', 'N/A')}",
f"\n**Processing time:** {result.metadata.get('duration', 'N/A')}",
"\n---\n",
"## Query Information",
f"- **Original Query:** {result.query.original_query}",
f"- **Enhanced Query:** {result.query.enhanced_query or 'N/A'}",
f"- **Search Patterns:** {', '.join(result.query.search_patterns or [])}",
"\n## Statistics",
f"- **URLs Discovered:** {len(result.discovered_urls)}",
f"- **URLs Crawled:** {len(result.crawled_content)}",
f"- **Sources Cited:** {len(result.citations)}",
"\n## Research Synthesis\n",
result.synthesis,
"\n## Sources\n"
]
# Add citations
for citation in result.citations:
md_content.append(f"### [{citation['source_id']}] {citation['title']}")
md_content.append(f"- **URL:** [{citation['url']}]({citation['url']})")
md_content.append("")
# Add discovered URLs summary
md_content.extend([
"\n## Discovered URLs (Top 10)\n",
"| Score | URL | Title |",
"|-------|-----|-------|"
])
for url_data in result.discovered_urls[:10]:
score = url_data.get('relevance_score', 0)
url = url_data.get('url', '')
title = 'N/A'
if 'head_data' in url_data and url_data['head_data']:
title = url_data['head_data'].get('title', 'N/A')[:60] + '...'
md_content.append(f"| {score:.3f} | {url[:50]}... | {title} |")
# Write markdown
with open(md_path, 'w') as f:
f.write('\n'.join(md_content))
console.print(f"[green]📄 Markdown saved: {md_path}[/green]")
return str(json_path) if json_path else None, str(md_path) if md_path else None
async def wait_for_user(message: str = "\nPress Enter to continue..."):
"""Wait for user input in interactive mode"""
input(message)
async def research_pipeline(
query: str,
config: ResearchConfig
) -> ResearchResult:
"""
Main research pipeline orchestrator with configurable settings
"""
start_time = datetime.now()
# Display pipeline header
header = Panel(
f"[bold cyan]Research Pipeline[/bold cyan]\n\n"
f"[dim]Domain:[/dim] {config.domain}\n"
f"[dim]Mode:[/dim] {'Test' if config.test_mode else 'Production'}\n"
f"[dim]Interactive:[/dim] {'Yes' if config.interactive_mode else 'No'}",
title="🚀 Starting",
border_style="cyan"
)
console.print(header)
# Step 1: Enhance query (optional)
console.print(f"\n[bold cyan]📝 Step 1: Query Processing[/bold cyan]")
if config.interactive_mode:
await wait_for_user()
if config.use_llm_enhancement:
research_query = await enhance_query_with_llm(query)
else:
research_query = ResearchQuery(
original_query=query,
enhanced_query=query,
search_patterns=tokenize_query_to_patterns(query),
timestamp=datetime.now().isoformat()
)
console.print(f" [green]✅ Query ready:[/green] {research_query.enhanced_query or query}")
# Step 2: Discover URLs
console.print(f"\n[bold cyan]🔍 Step 2: URL Discovery[/bold cyan]")
if config.interactive_mode:
await wait_for_user()
discovered_urls = await discover_urls(
domain=config.domain,
query=research_query.enhanced_query or query,
config=config
)
if not discovered_urls:
return ResearchResult(
query=research_query,
discovered_urls=[],
crawled_content=[],
synthesis="No relevant URLs found for the given query.",
citations=[],
metadata={'duration': str(datetime.now() - start_time)}
)
console.print(f" [green]✅ Found {len(discovered_urls)} relevant URLs[/green]")
# Step 3: Crawl selected URLs
console.print(f"\n[bold cyan]🕷️ Step 3: Content Crawling[/bold cyan]")
if config.interactive_mode:
await wait_for_user()
crawled_content = await crawl_selected_urls(
urls=discovered_urls,
query=research_query.enhanced_query or query,
config=config
)
console.print(f" [green]✅ Successfully crawled {len(crawled_content)} pages[/green]")
# Step 4: Generate synthesis
console.print(f"\n[bold cyan]🤖 Step 4: Synthesis Generation[/bold cyan]")
if config.interactive_mode:
await wait_for_user()
synthesis, citations = await generate_research_synthesis(
query=research_query.enhanced_query or query,
crawled_content=crawled_content
)
console.print(f" [green]✅ Generated synthesis with {len(citations)} citations[/green]")
# Step 5: Create result
result = ResearchResult(
query=research_query,
discovered_urls=discovered_urls,
crawled_content=crawled_content,
synthesis=synthesis,
citations=citations,
metadata={
'duration': str(datetime.now() - start_time),
'domain': config.domain,
'timestamp': datetime.now().isoformat(),
'config': asdict(config)
}
)
duration = datetime.now() - start_time
console.print(f"\n[bold green]✅ Research completed in {duration}[/bold green]")
return result
async def main():
"""
Main entry point for the BBC Sport Research Assistant
"""
# Example queries
example_queries = [
"Premier League transfer news and rumors",
"Champions League match results and analysis",
"World Cup qualifying updates",
"Football injury reports and return dates",
"Tennis grand slam tournament results"
]
# Display header
console.print(Panel.fit(
"[bold cyan]BBC Sport Research Assistant[/bold cyan]\n\n"
"This tool demonstrates efficient research using URLSeeder:\n"
"[dim]• Discover all URLs without crawling\n"
"• Filter and rank by relevance\n"
"• Crawl only the most relevant content\n"
"• Generate AI-powered insights with citations[/dim]\n\n"
f"[dim]📁 Working directory: {SCRIPT_DIR}[/dim]",
title="🔬 Welcome",
border_style="cyan"
))
# Configuration options table
config_table = Table(title="\n⚙️ Configuration Options", show_header=False, box=None)
config_table.add_column(style="bold cyan", width=3)
config_table.add_column()
config_table.add_row("1", "Quick Test Mode (3 URLs, fast)")
config_table.add_row("2", "Standard Mode (10 URLs, balanced)")
config_table.add_row("3", "Comprehensive Mode (20 URLs, thorough)")
config_table.add_row("4", "Custom Configuration")
console.print(config_table)
config_choice = input("\nSelect configuration (1-4): ").strip()
# Create config based on choice
if config_choice == "1":
config = ResearchConfig(test_mode=True, interactive_mode=False)
elif config_choice == "2":
config = ResearchConfig(max_urls_to_crawl=10, top_k_urls=10)
elif config_choice == "3":
config = ResearchConfig(max_urls_to_crawl=20, top_k_urls=20, max_urls_discovery=200)
else:
# Custom configuration
config = ResearchConfig()
config.test_mode = input("\nTest mode? (y/n): ").lower() == 'y'
config.interactive_mode = input("Interactive mode (pause between steps)? (y/n): ").lower() == 'y'
config.use_llm_enhancement = input("Use AI to enhance queries? (y/n): ").lower() == 'y'
if not config.test_mode:
try:
config.max_urls_to_crawl = int(input("Max URLs to crawl (default 10): ") or "10")
config.top_k_urls = int(input("Top K URLs to select (default 10): ") or "10")
except ValueError:
console.print("[yellow]Using default values[/yellow]")
# Display example queries
query_table = Table(title="\n📋 Example Queries", show_header=False, box=None)
query_table.add_column(style="bold cyan", width=3)
query_table.add_column()
for i, q in enumerate(example_queries, 1):
query_table.add_row(str(i), q)
console.print(query_table)
query_input = input("\nSelect a query (1-5) or enter your own: ").strip()
if query_input.isdigit() and 1 <= int(query_input) <= len(example_queries):
query = example_queries[int(query_input) - 1]
else:
query = query_input if query_input else example_queries[0]
console.print(f"\n[bold cyan]📝 Selected Query:[/bold cyan] {query}")
# Run the research pipeline
result = await research_pipeline(query=query, config=config)
# Display results
formatted_output = format_research_output(result)
# print(formatted_output)
console.print(Panel.fit(
formatted_output,
title="🔬 Research Results",
border_style="green"
))
# Save results
if config.save_json or config.save_markdown:
json_path, md_path = await save_research_results(result, config)
# print(f"\n✅ Results saved successfully!")
if json_path:
console.print(f"[green]JSON saved at:[/green] {json_path}")
if md_path:
console.print(f"[green]Markdown saved at:[/green] {md_path}")
if __name__ == "__main__":
asyncio.run(main()) | python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/url_seeder/convert_tutorial_to_colab.py | docs/examples/url_seeder/convert_tutorial_to_colab.py | #!/usr/bin/env python3
"""
Convert Crawl4AI URL Seeder tutorial markdown to Colab notebook format
"""
import json
import re
from pathlib import Path
def parse_markdown_to_cells(markdown_content):
"""Parse markdown content and convert to notebook cells"""
cells = []
# Split content by cell markers
lines = markdown_content.split('\n')
# Extract the header content before first cell marker
header_lines = []
i = 0
while i < len(lines) and not lines[i].startswith('# cell'):
header_lines.append(lines[i])
i += 1
# Add header as markdown cell if it exists
if header_lines:
header_content = '\n'.join(header_lines).strip()
if header_content:
cells.append({
"cell_type": "markdown",
"metadata": {},
"source": header_content.split('\n')
})
# Process cells marked with # cell X type:Y
current_cell_content = []
current_cell_type = None
while i < len(lines):
line = lines[i]
# Check for cell marker
cell_match = re.match(r'^# cell (\d+) type:(markdown|code)$', line)
if cell_match:
# Save previous cell if exists
if current_cell_content and current_cell_type:
content = '\n'.join(current_cell_content).strip()
if content:
if current_cell_type == 'code':
cells.append({
"cell_type": "code",
"execution_count": None,
"metadata": {},
"outputs": [],
"source": content.split('\n')
})
else:
cells.append({
"cell_type": "markdown",
"metadata": {},
"source": content.split('\n')
})
# Start new cell
current_cell_type = cell_match.group(2)
current_cell_content = []
else:
# Add line to current cell
current_cell_content.append(line)
i += 1
# Add last cell if exists
if current_cell_content and current_cell_type:
content = '\n'.join(current_cell_content).strip()
if content:
if current_cell_type == 'code':
cells.append({
"cell_type": "code",
"execution_count": None,
"metadata": {},
"outputs": [],
"source": content.split('\n')
})
else:
cells.append({
"cell_type": "markdown",
"metadata": {},
"source": content.split('\n')
})
return cells
def create_colab_notebook(cells):
"""Create a Colab notebook structure"""
notebook = {
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"name": "Crawl4AI_URL_Seeder_Tutorial.ipynb",
"provenance": [],
"collapsed_sections": [],
"toc_visible": True
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"language_info": {
"name": "python"
}
},
"cells": cells
}
return notebook
def main():
# Read the markdown file
md_path = Path("tutorial_url_seeder.md")
if not md_path.exists():
print(f"Error: {md_path} not found!")
return
print(f"Reading {md_path}...")
with open(md_path, 'r', encoding='utf-8') as f:
markdown_content = f.read()
# Parse markdown to cells
print("Parsing markdown content...")
cells = parse_markdown_to_cells(markdown_content)
print(f"Created {len(cells)} cells")
# Create notebook
print("Creating Colab notebook...")
notebook = create_colab_notebook(cells)
# Save notebook
output_path = Path("Crawl4AI_URL_Seeder_Tutorial.ipynb")
with open(output_path, 'w', encoding='utf-8') as f:
json.dump(notebook, f, indent=2, ensure_ascii=False)
print(f"✅ Successfully created {output_path}")
print(f" - Total cells: {len(cells)}")
print(f" - Markdown cells: {sum(1 for c in cells if c['cell_type'] == 'markdown')}")
print(f" - Code cells: {sum(1 for c in cells if c['cell_type'] == 'code')}")
if __name__ == "__main__":
main() | python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/docker/demo_docker_api.py | docs/examples/docker/demo_docker_api.py | import asyncio
import httpx
import json
import os
import time
from typing import List, Dict, Any, AsyncGenerator, Optional
import textwrap # ← new: for pretty code literals
import urllib.parse # ← needed for URL-safe /llm calls
from dotenv import load_dotenv
from rich.console import Console
from rich.syntax import Syntax
from rich.panel import Panel
from rich.table import Table
# --- Setup & Configuration ---
load_dotenv() # Load environment variables from .env file
console = Console()
# --- Configuration ---
BASE_URL = os.getenv("CRAWL4AI_TEST_URL", "http://localhost:8020")
BASE_URL = os.getenv("CRAWL4AI_TEST_URL", "http://localhost:11235")
# Target URLs
SIMPLE_URL = "https://example.com" # For demo purposes
SIMPLE_URL = "https://httpbin.org/html"
LINKS_URL = "https://httpbin.org/links/10/0"
FORMS_URL = "https://httpbin.org/forms/post" # For JS demo
BOOKS_URL = "http://books.toscrape.com/" # For CSS extraction
PYTHON_URL = "https://python.org" # For deeper crawl
# Use the same sample site as deep crawl tests for consistency
DEEP_CRAWL_BASE_URL = os.getenv(
"DEEP_CRAWL_TEST_SITE", "https://docs.crawl4ai.com/samples/deepcrawl/")
DEEP_CRAWL_DOMAIN = "docs.crawl4ai.com"
# --- Helper Functions ---
async def check_server_health(client: httpx.AsyncClient):
"""Check if the server is healthy before running tests."""
console.print("[bold cyan]Checking server health...[/]", end="")
try:
response = await client.get("/health", timeout=10.0)
response.raise_for_status()
health_data = response.json()
console.print(
f"[bold green] Server OK! Version: {health_data.get('version', 'N/A')}[/]")
return True
except (httpx.RequestError, httpx.HTTPStatusError) as e:
console.print(f"\n[bold red]Server health check FAILED:[/]")
console.print(f"Error: {e}")
console.print(f"Is the server running at {BASE_URL}?")
return False
except Exception as e:
console.print(
f"\n[bold red]An unexpected error occurred during health check:[/]")
console.print(e)
return False
def print_payload(payload: Dict[str, Any]):
"""Prints the JSON payload nicely with a dark theme."""
syntax = Syntax(
json.dumps(payload, indent=2),
"json",
theme="monokai", # <--- Changed theme here
line_numbers=False,
word_wrap=True # Added word wrap for potentially long payloads
)
console.print(Panel(syntax, title="Request Payload",
border_style="blue", expand=False))
def print_result_summary(results: List[Dict[str, Any]], title: str = "Crawl Results Summary", max_items: int = 3):
"""Prints a concise summary of crawl results."""
if not results:
console.print(f"[yellow]{title}: No results received.[/]")
return
console.print(Panel(f"[bold]{title}[/]",
border_style="green", expand=False))
count = 0
for result in results:
if count >= max_items:
console.print(
f"... (showing first {max_items} of {len(results)} results)")
break
count += 1
success_icon = "[green]✔[/]" if result.get('success') else "[red]✘[/]"
url = result.get('url', 'N/A')
status = result.get('status_code', 'N/A')
content_info = ""
if result.get('extracted_content'):
content_str = json.dumps(result['extracted_content'])
snippet = (
content_str[:70] + '...') if len(content_str) > 70 else content_str
content_info = f" | Extracted: [cyan]{snippet}[/]"
elif result.get('markdown'):
content_info = f" | Markdown: [cyan]Present[/]"
elif result.get('html'):
content_info = f" | HTML Size: [cyan]{len(result['html'])}[/]"
console.print(
f"{success_icon} URL: [link={url}]{url}[/link] (Status: {status}){content_info}")
if "metadata" in result and "depth" in result["metadata"]:
console.print(f" Depth: {result['metadata']['depth']}")
if not result.get('success') and result.get('error_message'):
console.print(f" [red]Error: {result['error_message']}[/]")
async def make_request(client: httpx.AsyncClient, endpoint: str, payload: Dict[str, Any], title: str) -> Optional[List[Dict[str, Any]]]:
"""Handles non-streaming POST requests."""
console.rule(f"[bold blue]{title}[/]", style="blue")
print_payload(payload)
console.print(f"Sending POST request to {client.base_url}{endpoint}...")
try:
start_time = time.time()
response = await client.post(endpoint, json=payload)
duration = time.time() - start_time
console.print(
f"Response Status: [bold {'green' if response.is_success else 'red'}]{response.status_code}[/] (took {duration:.2f}s)")
response.raise_for_status()
data = response.json()
if data.get("success"):
results = data.get("results", [])
print_result_summary(results, title=f"{title} Results")
return results
else:
console.print("[bold red]Request reported failure:[/]")
console.print(data)
return None
except httpx.HTTPStatusError as e:
console.print(f"[bold red]HTTP Error:[/]")
console.print(f"Status: {e.response.status_code}")
try:
console.print(Panel(Syntax(json.dumps(
e.response.json(), indent=2), "json", theme="default"), title="Error Response"))
except json.JSONDecodeError:
console.print(f"Response Body: {e.response.text}")
except httpx.RequestError as e:
console.print(f"[bold red]Request Error: {e}[/]")
except Exception as e:
console.print(f"[bold red]Unexpected Error: {e}[/]")
return None
async def stream_request(client: httpx.AsyncClient, endpoint: str, payload: Dict[str, Any], title: str):
"""Handles streaming POST requests."""
console.rule(f"[bold magenta]{title}[/]", style="magenta")
print_payload(payload)
console.print(
f"Sending POST stream request to {client.base_url}{endpoint}...")
all_results = []
initial_status_code = None # Store initial status code
try:
start_time = time.time()
async with client.stream("POST", endpoint, json=payload) as response:
initial_status_code = response.status_code # Capture initial status
duration = time.time() - start_time # Time to first byte potentially
console.print(
f"Initial Response Status: [bold {'green' if response.is_success else 'red'}]{initial_status_code}[/] (first byte ~{duration:.2f}s)")
response.raise_for_status() # Raise exception for bad *initial* status codes
console.print("[magenta]--- Streaming Results ---[/]")
completed = False
async for line in response.aiter_lines():
if line:
try:
data = json.loads(line)
if data.get("status") == "completed":
completed = True
console.print(
"[bold green]--- Stream Completed ---[/]")
break
elif data.get("url"): # Looks like a result dictionary
all_results.append(data)
# Display summary info as it arrives
success_icon = "[green]✔[/]" if data.get(
'success') else "[red]✘[/]"
url = data.get('url', 'N/A')
# Display status code FROM THE RESULT DATA if available
result_status = data.get('status_code', 'N/A')
console.print(
f" {success_icon} Received: [link={url}]{url}[/link] (Status: {result_status})")
if not data.get('success') and data.get('error_message'):
console.print(
f" [red]Error: {data['error_message']}[/]")
else:
console.print(
f" [yellow]Stream meta-data:[/yellow] {data}")
except json.JSONDecodeError:
console.print(
f" [red]Stream decode error for line:[/red] {line}")
if not completed:
console.print(
"[bold yellow]Warning: Stream ended without 'completed' marker.[/]")
except httpx.HTTPStatusError as e:
# Use the captured initial status code if available, otherwise from the exception
status = initial_status_code if initial_status_code is not None else e.response.status_code
console.print(f"[bold red]HTTP Error (Initial Request):[/]")
console.print(f"Status: {status}")
try:
console.print(Panel(Syntax(json.dumps(
e.response.json(), indent=2), "json", theme="default"), title="Error Response"))
except json.JSONDecodeError:
console.print(f"Response Body: {e.response.text}")
except httpx.RequestError as e:
console.print(f"[bold red]Request Error: {e}[/]")
except Exception as e:
console.print(f"[bold red]Unexpected Error during streaming: {e}[/]")
# Print stack trace for unexpected errors
console.print_exception(show_locals=False)
# Call print_result_summary with the *collected* results AFTER the stream is done
print_result_summary(all_results, title=f"{title} Collected Results")
def load_proxies_from_env() -> List[Dict]:
"""
Load proxies from the PROXIES environment variable.
Expected format: IP:PORT:USER:PASS,IP:PORT,IP2:PORT2:USER2:PASS2,...
Returns a list of dictionaries suitable for the 'params' of ProxyConfig.
"""
proxies_params_list = []
proxies_str = os.getenv("PROXIES", "")
if not proxies_str:
# console.print("[yellow]PROXIES environment variable not set or empty.[/]")
return proxies_params_list # Return empty list if not set
try:
proxy_entries = proxies_str.split(",")
for entry in proxy_entries:
entry = entry.strip()
if not entry:
continue
parts = entry.split(":")
proxy_dict = {}
if len(parts) == 4: # Format: IP:PORT:USER:PASS
ip, port, username, password = parts
proxy_dict = {
"server": f"http://{ip}:{port}", # Assuming http protocol
"username": username,
"password": password,
# "ip": ip # 'ip' is not a standard ProxyConfig param, 'server' contains it
}
elif len(parts) == 2: # Format: IP:PORT
ip, port = parts
proxy_dict = {
"server": f"http://{ip}:{port}",
# "ip": ip
}
else:
console.print(
f"[yellow]Skipping invalid proxy string format:[/yellow] {entry}")
continue
proxies_params_list.append(proxy_dict)
except Exception as e:
console.print(
f"[red]Error loading proxies from environment:[/red] {e}")
if proxies_params_list:
console.print(
f"[cyan]Loaded {len(proxies_params_list)} proxies from environment.[/]")
# else:
# console.print("[yellow]No valid proxies loaded from environment.[/]")
return proxies_params_list
# --- Demo Functions ---
# 1. Basic Crawling
async def demo_basic_single_url(client: httpx.AsyncClient):
payload = {
"urls": [SIMPLE_URL],
"browser_config": {"type": "BrowserConfig", "params": {"headless": True}},
"crawler_config": {
"type": "CrawlerRunConfig",
"params": {
"cache_mode": "BYPASS"
}
}
}
result = await make_request(client, "/crawl", payload, "Demo 1a: Basic Single URL Crawl")
return result
async def demo_basic_multi_url(client: httpx.AsyncClient):
payload = {
"urls": [SIMPLE_URL, LINKS_URL],
"browser_config": {"type": "BrowserConfig", "params": {"headless": True}},
"crawler_config": {"type": "CrawlerRunConfig", "params": {"cache_mode": "BYPASS"}}
}
result = await make_request(client, "/crawl", payload, "Demo 1b: Basic Multi URL Crawl")
return result
async def demo_streaming_multi_url(client: httpx.AsyncClient):
payload = {
# "urls": [SIMPLE_URL, LINKS_URL, FORMS_URL, SIMPLE_URL, LINKS_URL, FORMS_URL], # Add another URL
"urls": [
"https://example.com/page1",
"https://example.com/page2",
"https://example.com/page3",
"https://example.com/page4",
"https://example.com/page5"
], # Add another URL
"browser_config": {"type": "BrowserConfig", "params": {"headless": True}},
"crawler_config": {
"type": "CrawlerRunConfig",
"params": {
"stream": True,
}
}
}
result = await stream_request(client, "/crawl/stream", payload, "Demo 1c: Streaming Multi URL Crawl")
return result
# 2. Markdown Generation & Content Filtering
async def demo_markdown_default(client: httpx.AsyncClient):
payload = {
"urls": [SIMPLE_URL],
"browser_config": {"type": "BrowserConfig", "params": {"headless": True}},
"crawler_config": {
"type": "CrawlerRunConfig",
"params": {
"markdown_generator": {
"type": "DefaultMarkdownGenerator",
"params": {
"content_source": "fit_html",
"options": {
"type": "dict",
"value": {
"ignore_links": True
}
}
}
} # Explicitly default
}
}
}
result = await make_request(client, "/crawl", payload, "Demo 2a: Default Markdown Generation")
return result
async def demo_markdown_pruning(client: httpx.AsyncClient):
payload = {
"urls": [PYTHON_URL], # Use a more complex page
"browser_config": {"type": "BrowserConfig", "params": {"headless": True}},
"crawler_config": {
"type": "CrawlerRunConfig",
"params": {
"cache_mode": "BYPASS",
"markdown_generator": {
"type": "DefaultMarkdownGenerator",
"params": {
"content_filter": {
"type": "PruningContentFilter",
"params": {
"threshold": 0.6,
"threshold_type": "relative"
}
}
}
}
}
}
}
result = await make_request(client, "/crawl", payload, "Demo 2b: Markdown with Pruning Filter")
return result
async def demo_markdown_bm25(client: httpx.AsyncClient):
payload = {
"urls": [PYTHON_URL],
"browser_config": {"type": "BrowserConfig", "params": {"headless": True}},
"crawler_config": {
"type": "CrawlerRunConfig",
"params": {
"cache_mode": "BYPASS",
"markdown_generator": {
"type": "DefaultMarkdownGenerator",
"params": {
"content_filter": {
"type": "BM25ContentFilter",
"params": {
"user_query": "Python documentation language reference"
}
}
}
}
}
}
}
result = await make_request(client, "/crawl", payload, "Demo 2c: Markdown with BM25 Filter")
return result
# 3. Specific Parameters
# Corrected Demo Function: demo_param_css_selector
async def demo_param_css_selector(client: httpx.AsyncClient):
css_selector = ".main-content" # Using the suggested correct selector
payload = {
"urls": [PYTHON_URL],
"browser_config": {"type": "BrowserConfig", "params": {"headless": True}},
"crawler_config": {
"type": "CrawlerRunConfig",
"params": {
"css_selector": css_selector # Target specific div
# No extraction strategy is needed to demo this parameter's effect on input HTML
}
}
}
results = await make_request(client, "/crawl", payload, f"Demo 3a: Using css_selector ('{css_selector}')")
if results:
result = results[0]
if result['success'] and result.get('html'):
# Check if the returned HTML is likely constrained
# A simple check: does it contain expected content from within the selector,
# and does it LACK content known to be outside (like footer links)?
html_content = result['html']
# Text likely within .main-content somewhere
content_present = 'Python Software Foundation' in html_content
# Text likely in the footer, outside .main-content
footer_absent = 'Legal Statements' not in html_content
console.print(
f" Content Check: Text inside '{css_selector}' likely present? {'[green]Yes[/]' if content_present else '[red]No[/]'}")
console.print(
f" Content Check: Text outside '{css_selector}' (footer) likely absent? {'[green]Yes[/]' if footer_absent else '[red]No[/]'}")
if not content_present or not footer_absent:
console.print(
f" [yellow]Note:[/yellow] HTML filtering might not be precise or page structure changed. Result HTML length: {len(html_content)}")
else:
console.print(
f" [green]Verified:[/green] Returned HTML appears limited by css_selector. Result HTML length: {len(html_content)}")
elif result['success']:
console.print(
"[yellow]HTML content was empty in the successful result.[/]")
# Error message is handled by print_result_summary called by make_request
async def demo_param_js_execution(client: httpx.AsyncClient):
payload = {
"urls": ["https://example.com"], # Use a page with a form
"browser_config": {"type": "BrowserConfig", "params": {"headless": True}},
"crawler_config": {
"type": "CrawlerRunConfig",
"params": {
"cache_mode": "BYPASS",
# Simple JS to fill and maybe click (won't submit without more complex setup)
"js_code": """
(() => {
document.querySelector('h1').innerText = 'Crawl4AI Demo';
return { filled_name: document.querySelector('h1').innerText };
})();
""",
"delay_before_return_html": 0.5 # Give JS time to potentially run
}
}
}
results = await make_request(client, "/crawl", payload, "Demo 3b: Using js_code Parameter")
if results and results[0].get("js_execution_result"):
console.print("[cyan]JS Execution Result:[/]",
results[0]["js_execution_result"])
elif results:
console.print("[yellow]JS Execution Result not found in response.[/]")
async def demo_param_screenshot(client: httpx.AsyncClient):
payload = {
"urls": [SIMPLE_URL],
"browser_config": {"type": "BrowserConfig", "params": {"headless": True}},
"crawler_config": {
"type": "CrawlerRunConfig",
"params": {"cache_mode": "BYPASS", "screenshot": True}
}
}
results = await make_request(client, "/crawl", payload, "Demo 3c: Taking a Screenshot")
if results and results[0].get("screenshot"):
console.print(
f"[cyan]Screenshot data received (length):[/] {len(results[0]['screenshot'])}")
elif results:
console.print("[yellow]Screenshot data not found in response.[/]")
async def demo_param_ssl_fetch(client: httpx.AsyncClient):
payload = {
"urls": [PYTHON_URL], # Needs HTTPS
"browser_config": {"type": "BrowserConfig", "params": {"headless": True}},
"crawler_config": {
"type": "CrawlerRunConfig",
"params": {"cache_mode": "BYPASS", "fetch_ssl_certificate": True}
}
}
results = await make_request(client, "/crawl", payload, "Demo 3d: Fetching SSL Certificate")
if results and results[0].get("ssl_certificate"):
console.print("[cyan]SSL Certificate Info:[/]")
console.print(results[0]["ssl_certificate"])
elif results:
console.print("[yellow]SSL Certificate data not found in response.[/]")
async def demo_param_proxy(client: httpx.AsyncClient):
proxy_params_list = load_proxies_from_env() # Get the list of parameter dicts
if not proxy_params_list:
console.rule(
"[bold yellow]Demo 3e: Using Proxies (SKIPPED)[/]", style="yellow")
console.print("Set the PROXIES environment variable to run this demo.")
console.print("Format: IP:PORT:USR:PWD,IP:PORT,...")
return
payload = {
"urls": ["https://httpbin.org/ip"], # URL that shows originating IP
"browser_config": {"type": "BrowserConfig", "params": {"headless": True}},
"crawler_config": {
"type": "CrawlerRunConfig",
"params": {
"cache_mode": "BYPASS",
"proxy_rotation_strategy": {
"type": "RoundRobinProxyStrategy",
"params": {
"proxies": [
# [
# {
# "type": "ProxyConfig",
# "params": {
# server:"...",
# "username": "...",
# "password": "..."
# }
# },
# ...
# ]
# Filter out the 'ip' key when sending to server, as it's not part of ProxyConfig
{"type": "ProxyConfig", "params": {
k: v for k, v in p.items() if k != 'ip'}}
for p in proxy_params_list
]
}
}
}
}
}
results = await make_request(client, "/crawl", payload, "Demo 3e: Using Proxies")
# --- Verification Logic ---
if results and results[0].get("success"):
result = results[0]
try:
# httpbin.org/ip returns JSON within the HTML body's <pre> tag
html_content = result.get('html', '')
# Basic extraction - find JSON within <pre> tags or just the JSON itself
json_str = None
if '<pre' in html_content:
start = html_content.find('{')
end = html_content.rfind('}')
if start != -1 and end != -1:
json_str = html_content[start:end+1]
elif html_content.strip().startswith('{'): # Maybe it's just JSON
json_str = html_content.strip()
if json_str:
ip_data = json.loads(json_str)
origin_ip = ip_data.get("origin")
console.print(
f" Origin IP reported by httpbin: [bold yellow]{origin_ip}[/]")
# Extract the IPs from the proxy list for comparison
proxy_ips = {p.get("server").split(
":")[1][2:] for p in proxy_params_list}
if origin_ip and origin_ip in proxy_ips:
console.print(
"[bold green] Verification SUCCESS: Origin IP matches one of the provided proxies![/]")
elif origin_ip:
console.print(
"[bold red] Verification FAILED: Origin IP does not match any provided proxy IPs.[/]")
console.print(f" Provided Proxy IPs: {proxy_ips}")
else:
console.print(
"[yellow] Verification SKIPPED: Could not extract origin IP from response.[/]")
else:
console.print(
"[yellow] Verification SKIPPED: Could not find JSON in httpbin response HTML.[/]")
# console.print(f"HTML Received:\n{html_content[:500]}...") # Uncomment for debugging
except json.JSONDecodeError:
console.print(
"[red] Verification FAILED: Could not parse JSON from httpbin response HTML.[/]")
except Exception as e:
console.print(
f"[red] Verification Error: An unexpected error occurred during IP check: {e}[/]")
elif results:
console.print(
"[yellow] Verification SKIPPED: Crawl for IP check was not successful.[/]")
# 4. Extraction Strategies
async def demo_extract_css(client: httpx.AsyncClient):
# Schema to extract book titles and prices
book_schema = {
"name": "BookList",
"baseSelector": "ol.row li.col-xs-6",
"fields": [
{"name": "title", "selector": "article.product_pod h3 a",
"type": "attribute", "attribute": "title"},
{"name": "price", "selector": "article.product_pod .price_color", "type": "text"},
]
}
payload = {
"urls": [BOOKS_URL],
"browser_config": {"type": "BrowserConfig", "params": {"headless": True}},
"crawler_config": {
"type": "CrawlerRunConfig",
"params": {
"cache_mode": "BYPASS",
"extraction_strategy": {
"type": "JsonCssExtractionStrategy",
"params": {
"schema": {
"type": "dict",
"value": book_schema
}
}
}
}
}
}
results = await make_request(client, "/crawl", payload, "Demo 4a: JSON/CSS Extraction")
if results and results[0].get("success") and results[0].get("extracted_content"):
try:
extracted_data = json.loads(results[0]["extracted_content"])
if isinstance(extracted_data, list) and extracted_data:
console.print("[cyan]Sample Extracted Books (CSS):[/]")
table = Table(show_header=True, header_style="bold magenta")
table.add_column("Title", style="dim")
table.add_column("Price")
for item in extracted_data[:5]: # Show first 5
table.add_row(item.get('title', 'N/A'),
item.get('price', 'N/A'))
console.print(table)
else:
console.print(
"[yellow]CSS extraction did not return a list of results.[/]")
console.print(extracted_data)
except json.JSONDecodeError:
console.print("[red]Failed to parse extracted_content as JSON.[/]")
except Exception as e:
console.print(
f"[red]Error processing extracted CSS content: {e}[/]")
# 5. LLM Extraction
async def demo_extract_llm(client: httpx.AsyncClient):
if not os.getenv("OPENAI_API_KEY"): # Basic check for a common key
console.rule(
"[bold yellow]Demo 4b: LLM Extraction (SKIPPED)[/]", style="yellow")
console.print(
"Set an LLM API key (e.g., OPENAI_API_KEY) in your .env file or environment.")
return
payload = {
"urls": [SIMPLE_URL],
"browser_config": {"type": "BrowserConfig", "params": {"headless": True}},
"crawler_config": {
"type": "CrawlerRunConfig",
"params": {
"cache_mode": "BYPASS",
"extraction_strategy": {
"type": "LLMExtractionStrategy",
"params": {
"instruction": "Extract title and author into JSON.",
"llm_config": { # Optional: Specify provider if not default
"type": "LLMConfig",
"params": {}
# Relies on server's default provider from config.yml & keys from .llm.env
# "params": {
# "provider": "openai/gpt-4o-mini",
# "api_key": os.getenv("OPENAI_API_KEY") # Optional: Override key
# }
},
"schema": { # Request structured output
"type": "dict",
"value": {
"title": "BookInfo", "type": "object",
"properties": {
"book_title": {"type": "string"},
"book_author": {"type": "string"}
}
}
}
}
}
}
}
}
results = await make_request(client, "/crawl", payload, "Demo 4b: LLM Extraction")
if results and results[0].get("success") and results[0].get("extracted_content"):
try:
extracted_data = json.loads(results[0]["extracted_content"])
# Handle potential list wrapper from server
if isinstance(extracted_data, list) and extracted_data:
extracted_data = extracted_data[0]
if isinstance(extracted_data, dict):
console.print("[cyan]Extracted Data (LLM):[/]")
syntax = Syntax(json.dumps(extracted_data, indent=2),
"json", theme="monokai", line_numbers=False)
console.print(Panel(syntax, border_style="cyan", expand=False))
else:
console.print(
"[yellow]LLM extraction did not return expected dictionary.[/]")
console.print(extracted_data)
except json.JSONDecodeError:
console.print(
"[red]Failed to parse LLM extracted_content as JSON.[/]")
except Exception as e:
console.print(
f"[red]Error processing extracted LLM content: {e}[/]")
# 6. Deep Crawling
async def demo_deep_basic(client: httpx.AsyncClient):
payload = {
"urls": [DEEP_CRAWL_BASE_URL],
"browser_config": {"type": "BrowserConfig", "params": {"headless": True}},
"crawler_config": {
"type": "CrawlerRunConfig",
"params": {
"cache_mode": "BYPASS",
"deep_crawl_strategy": {
"type": "BFSDeepCrawlStrategy",
"params": {
"max_depth": 1,
"max_pages": 4,
"filter_chain": {
"type": "FilterChain",
"params": {
"filters": [
{
"type": "DomainFilter",
"params":
{
"allowed_domains": [DEEP_CRAWL_DOMAIN]
}
}
]
}
}
}
}
}
}
}
results = await make_request(client, "/crawl", payload, "Demo 5a: Basic Deep Crawl")
# print_result_summary is called by make_request, showing URLs and depths
for result in results:
if result.get("success") and result.get("metadata"):
depth = result["metadata"].get("depth", "N/A")
| python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | true |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/docker/demo_docker_polling.py | docs/examples/docker/demo_docker_polling.py |
#!/usr/bin/env python3
"""
demo_docker_polling.py
Quick sanity-check for the asynchronous crawl job endpoints:
• POST /crawl/job – enqueue work, get task_id
• GET /crawl/job/{id} – poll status / fetch result
The style matches demo_docker_api.py (console.rule banners, helper
functions, coloured status lines). Adjust BASE_URL as needed.
Run: python demo_docker_polling.py
"""
import asyncio, json, os, time, urllib.parse
from typing import Dict, List
import httpx
from rich.console import Console
from rich.panel import Panel
from rich.syntax import Syntax
console = Console()
BASE_URL = os.getenv("BASE_URL", "http://localhost:11234")
SIMPLE_URL = "https://example.org"
LINKS_URL = "https://httpbin.org/links/10/1"
# --- helpers --------------------------------------------------------------
def print_payload(payload: Dict):
console.print(Panel(Syntax(json.dumps(payload, indent=2),
"json", theme="monokai", line_numbers=False),
title="Payload", border_style="cyan", expand=False))
async def check_server_health(client: httpx.AsyncClient) -> bool:
try:
resp = await client.get("/health")
if resp.is_success:
console.print("[green]Server healthy[/]")
return True
except Exception:
pass
console.print("[bold red]Server is not responding on /health[/]")
return False
async def poll_for_result(client: httpx.AsyncClient, task_id: str,
poll_interval: float = 1.5, timeout: float = 90.0):
"""Hit /crawl/job/{id} until COMPLETED/FAILED or timeout."""
start = time.time()
while True:
resp = await client.get(f"/crawl/job/{task_id}")
resp.raise_for_status()
data = resp.json()
status = data.get("status")
if status.upper() in ("COMPLETED", "FAILED"):
return data
if time.time() - start > timeout:
raise TimeoutError(f"Task {task_id} did not finish in {timeout}s")
await asyncio.sleep(poll_interval)
# --- demo functions -------------------------------------------------------
async def demo_poll_single_url(client: httpx.AsyncClient):
payload = {
"urls": [SIMPLE_URL],
"browser_config": {"type": "BrowserConfig",
"params": {"headless": True}},
"crawler_config": {"type": "CrawlerRunConfig",
"params": {"cache_mode": "BYPASS"}}
}
console.rule("[bold blue]Demo A: /crawl/job Single URL[/]", style="blue")
print_payload(payload)
# enqueue
resp = await client.post("/crawl/job", json=payload)
console.print(f"Enqueue status: [bold]{resp.status_code}[/]")
resp.raise_for_status()
task_id = resp.json()["task_id"]
console.print(f"Task ID: [yellow]{task_id}[/]")
# poll
console.print("Polling…")
result = await poll_for_result(client, task_id)
console.print(Panel(Syntax(json.dumps(result, indent=2),
"json", theme="fruity"),
title="Final result", border_style="green"))
if result["status"] == "COMPLETED":
console.print("[green]✅ Crawl succeeded[/]")
else:
console.print("[red]❌ Crawl failed[/]")
async def demo_poll_multi_url(client: httpx.AsyncClient):
payload = {
"urls": [SIMPLE_URL, LINKS_URL],
"browser_config": {"type": "BrowserConfig",
"params": {"headless": True}},
"crawler_config": {"type": "CrawlerRunConfig",
"params": {"cache_mode": "BYPASS"}}
}
console.rule("[bold magenta]Demo B: /crawl/job Multi-URL[/]",
style="magenta")
print_payload(payload)
resp = await client.post("/crawl/job", json=payload)
console.print(f"Enqueue status: [bold]{resp.status_code}[/]")
resp.raise_for_status()
task_id = resp.json()["task_id"]
console.print(f"Task ID: [yellow]{task_id}[/]")
console.print("Polling…")
result = await poll_for_result(client, task_id)
console.print(Panel(Syntax(json.dumps(result, indent=2),
"json", theme="fruity"),
title="Final result", border_style="green"))
if result["status"] == "COMPLETED":
console.print(
f"[green]✅ {len(json.loads(result['result'])['results'])} URLs crawled[/]")
else:
console.print("[red]❌ Crawl failed[/]")
# --- main runner ----------------------------------------------------------
async def main_demo():
async with httpx.AsyncClient(base_url=BASE_URL, timeout=300.0) as client:
if not await check_server_health(client):
return
await demo_poll_single_url(client)
await demo_poll_multi_url(client)
console.rule("[bold green]Polling demos complete[/]", style="green")
if __name__ == "__main__":
try:
asyncio.run(main_demo())
except KeyboardInterrupt:
console.print("\n[yellow]Interrupted by user[/]")
except Exception:
console.print_exception(show_locals=False)
| python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/adaptive_crawling/advanced_configuration.py | docs/examples/adaptive_crawling/advanced_configuration.py | """
Advanced Adaptive Crawling Configuration
This example demonstrates all configuration options available for adaptive crawling,
including threshold tuning, persistence, and custom parameters.
"""
import asyncio
from pathlib import Path
from crawl4ai import AsyncWebCrawler, AdaptiveCrawler, AdaptiveConfig
async def main():
"""Demonstrate advanced configuration options"""
# Example 1: Custom thresholds for different use cases
print("="*60)
print("EXAMPLE 1: Custom Confidence Thresholds")
print("="*60)
# High-precision configuration (exhaustive crawling)
high_precision_config = AdaptiveConfig(
confidence_threshold=0.9, # Very high confidence required
max_pages=50, # Allow more pages
top_k_links=5, # Follow more links per page
min_gain_threshold=0.02 # Lower threshold to continue
)
# Balanced configuration (default use case)
balanced_config = AdaptiveConfig(
confidence_threshold=0.7, # Moderate confidence
max_pages=20, # Reasonable limit
top_k_links=3, # Moderate branching
min_gain_threshold=0.05 # Standard gain threshold
)
# Quick exploration configuration
quick_config = AdaptiveConfig(
confidence_threshold=0.5, # Lower confidence acceptable
max_pages=10, # Strict limit
top_k_links=2, # Minimal branching
min_gain_threshold=0.1 # High gain required
)
async with AsyncWebCrawler(verbose=False) as crawler:
# Test different configurations
for config_name, config in [
("High Precision", high_precision_config),
("Balanced", balanced_config),
("Quick Exploration", quick_config)
]:
print(f"\nTesting {config_name} configuration...")
adaptive = AdaptiveCrawler(crawler, config=config)
result = await adaptive.digest(
start_url="https://httpbin.org",
query="http headers authentication"
)
print(f" - Pages crawled: {len(result.crawled_urls)}")
print(f" - Confidence achieved: {adaptive.confidence:.2%}")
print(f" - Coverage score: {adaptive.coverage_stats['coverage']:.2f}")
# Example 2: Persistence and state management
print("\n" + "="*60)
print("EXAMPLE 2: State Persistence")
print("="*60)
state_file = "crawl_state_demo.json"
# Configuration with persistence
persistent_config = AdaptiveConfig(
confidence_threshold=0.8,
max_pages=30,
save_state=True, # Enable auto-save
state_path=state_file # Specify save location
)
async with AsyncWebCrawler(verbose=False) as crawler:
# First crawl - will be interrupted
print("\nStarting initial crawl (will interrupt after 5 pages)...")
interrupt_config = AdaptiveConfig(
confidence_threshold=0.8,
max_pages=5, # Artificially low to simulate interruption
save_state=True,
state_path=state_file
)
adaptive = AdaptiveCrawler(crawler, config=interrupt_config)
result1 = await adaptive.digest(
start_url="https://docs.python.org/3/",
query="exception handling try except finally"
)
print(f"First crawl completed: {len(result1.crawled_urls)} pages")
print(f"Confidence reached: {adaptive.confidence:.2%}")
# Resume crawl with higher page limit
print("\nResuming crawl from saved state...")
resume_config = AdaptiveConfig(
confidence_threshold=0.8,
max_pages=20, # Increase limit
save_state=True,
state_path=state_file
)
adaptive2 = AdaptiveCrawler(crawler, config=resume_config)
result2 = await adaptive2.digest(
start_url="https://docs.python.org/3/",
query="exception handling try except finally",
resume_from=state_file
)
print(f"Resumed crawl completed: {len(result2.crawled_urls)} total pages")
print(f"Final confidence: {adaptive2.confidence:.2%}")
# Clean up
Path(state_file).unlink(missing_ok=True)
# Example 3: Link selection strategies
print("\n" + "="*60)
print("EXAMPLE 3: Link Selection Strategies")
print("="*60)
# Conservative link following
conservative_config = AdaptiveConfig(
confidence_threshold=0.7,
max_pages=15,
top_k_links=1, # Only follow best link
min_gain_threshold=0.15 # High threshold
)
# Aggressive link following
aggressive_config = AdaptiveConfig(
confidence_threshold=0.7,
max_pages=15,
top_k_links=10, # Follow many links
min_gain_threshold=0.01 # Very low threshold
)
async with AsyncWebCrawler(verbose=False) as crawler:
for strategy_name, config in [
("Conservative", conservative_config),
("Aggressive", aggressive_config)
]:
print(f"\n{strategy_name} link selection:")
adaptive = AdaptiveCrawler(crawler, config=config)
result = await adaptive.digest(
start_url="https://httpbin.org",
query="api endpoints"
)
# Analyze crawl pattern
print(f" - Total pages: {len(result.crawled_urls)}")
print(f" - Unique domains: {len(set(url.split('/')[2] for url in result.crawled_urls))}")
print(f" - Max depth reached: {max(url.count('/') for url in result.crawled_urls) - 2}")
# Show saturation trend
if hasattr(result, 'new_terms_history') and result.new_terms_history:
print(f" - New terms discovered: {result.new_terms_history[:5]}...")
print(f" - Saturation trend: {'decreasing' if result.new_terms_history[-1] < result.new_terms_history[0] else 'increasing'}")
# Example 4: Monitoring crawl progress
print("\n" + "="*60)
print("EXAMPLE 4: Progress Monitoring")
print("="*60)
# Configuration with detailed monitoring
monitor_config = AdaptiveConfig(
confidence_threshold=0.75,
max_pages=10,
top_k_links=3
)
async with AsyncWebCrawler(verbose=False) as crawler:
adaptive = AdaptiveCrawler(crawler, config=monitor_config)
# Start crawl
print("\nMonitoring crawl progress...")
result = await adaptive.digest(
start_url="https://httpbin.org",
query="http methods headers"
)
# Detailed statistics
print("\nDetailed crawl analysis:")
adaptive.print_stats(detailed=True)
# Export for analysis
print("\nExporting knowledge base for external analysis...")
adaptive.export_knowledge_base("knowledge_export_demo.jsonl")
print("Knowledge base exported to: knowledge_export_demo.jsonl")
# Show sample of exported data
with open("knowledge_export_demo.jsonl", 'r') as f:
first_line = f.readline()
print(f"Sample export: {first_line[:100]}...")
# Clean up
Path("knowledge_export_demo.jsonl").unlink(missing_ok=True)
if __name__ == "__main__":
asyncio.run(main()) | python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/adaptive_crawling/basic_usage.py | docs/examples/adaptive_crawling/basic_usage.py | """
Basic Adaptive Crawling Example
This example demonstrates the simplest use case of adaptive crawling:
finding information about a specific topic and knowing when to stop.
"""
import asyncio
from crawl4ai import AsyncWebCrawler, AdaptiveCrawler
async def main():
"""Basic adaptive crawling example"""
# Initialize the crawler
async with AsyncWebCrawler(verbose=True) as crawler:
# Create an adaptive crawler with default settings (statistical strategy)
adaptive = AdaptiveCrawler(crawler)
# Note: You can also use embedding strategy for semantic understanding:
# from crawl4ai import AdaptiveConfig
# config = AdaptiveConfig(strategy="embedding")
# adaptive = AdaptiveCrawler(crawler, config)
# Start adaptive crawling
print("Starting adaptive crawl for Python async programming information...")
result = await adaptive.digest(
start_url="https://docs.python.org/3/library/asyncio.html",
query="async await context managers coroutines"
)
# Display crawl statistics
print("\n" + "="*50)
print("CRAWL STATISTICS")
print("="*50)
adaptive.print_stats(detailed=False)
# Get the most relevant content found
print("\n" + "="*50)
print("MOST RELEVANT PAGES")
print("="*50)
relevant_pages = adaptive.get_relevant_content(top_k=5)
for i, page in enumerate(relevant_pages, 1):
print(f"\n{i}. {page['url']}")
print(f" Relevance Score: {page['score']:.2%}")
# Show a snippet of the content
content = page['content'] or ""
if content:
snippet = content[:200].replace('\n', ' ')
if len(content) > 200:
snippet += "..."
print(f" Preview: {snippet}")
# Show final confidence
print(f"\n{'='*50}")
print(f"Final Confidence: {adaptive.confidence:.2%}")
print(f"Total Pages Crawled: {len(result.crawled_urls)}")
print(f"Knowledge Base Size: {len(adaptive.state.knowledge_base)} documents")
# Example: Check if we can answer specific questions
print(f"\n{'='*50}")
print("INFORMATION SUFFICIENCY CHECK")
print(f"{'='*50}")
if adaptive.confidence >= 0.8:
print("✓ High confidence - can answer detailed questions about async Python")
elif adaptive.confidence >= 0.6:
print("~ Moderate confidence - can answer basic questions")
else:
print("✗ Low confidence - need more information")
if __name__ == "__main__":
asyncio.run(main()) | python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/adaptive_crawling/embedding_strategy.py | docs/examples/adaptive_crawling/embedding_strategy.py | """
Embedding Strategy Example for Adaptive Crawling
This example demonstrates how to use the embedding-based strategy
for semantic understanding and intelligent crawling.
"""
import asyncio
import os
from crawl4ai import AsyncWebCrawler, AdaptiveCrawler, AdaptiveConfig
async def main():
"""Demonstrate embedding strategy for adaptive crawling"""
# Configure embedding strategy
config = AdaptiveConfig(
strategy="embedding", # Use embedding strategy
embedding_model="sentence-transformers/all-MiniLM-L6-v2", # Default model
n_query_variations=10, # Generate 10 semantic variations
max_pages=15,
top_k_links=3,
min_gain_threshold=0.05,
# Embedding-specific parameters
embedding_k_exp=3.0, # Higher = stricter similarity requirements
embedding_min_confidence_threshold=0.1, # Stop if <10% relevant
embedding_validation_min_score=0.4 # Validation threshold
)
# Optional: Use OpenAI embeddings instead
if os.getenv('OPENAI_API_KEY'):
config.embedding_llm_config = {
'provider': 'openai/text-embedding-3-small',
'api_token': os.getenv('OPENAI_API_KEY')
}
print("Using OpenAI embeddings")
else:
print("Using sentence-transformers (local embeddings)")
async with AsyncWebCrawler(verbose=True) as crawler:
adaptive = AdaptiveCrawler(crawler, config)
# Test 1: Relevant query with semantic understanding
print("\n" + "="*50)
print("TEST 1: Semantic Query Understanding")
print("="*50)
result = await adaptive.digest(
start_url="https://docs.python.org/3/library/asyncio.html",
query="concurrent programming event-driven architecture"
)
print("\nQuery Expansion:")
print(f"Original query expanded to {len(result.expanded_queries)} variations")
for i, q in enumerate(result.expanded_queries[:3], 1):
print(f" {i}. {q}")
print(" ...")
print("\nResults:")
adaptive.print_stats(detailed=False)
# Test 2: Detecting irrelevant queries
print("\n" + "="*50)
print("TEST 2: Irrelevant Query Detection")
print("="*50)
# Reset crawler for new query
adaptive = AdaptiveCrawler(crawler, config)
result = await adaptive.digest(
start_url="https://docs.python.org/3/library/asyncio.html",
query="how to bake chocolate chip cookies"
)
if result.metrics.get('is_irrelevant', False):
print("\n✅ Successfully detected irrelevant query!")
print(f"Stopped after just {len(result.crawled_urls)} pages")
print(f"Reason: {result.metrics.get('stopped_reason', 'unknown')}")
else:
print("\n❌ Failed to detect irrelevance")
print(f"Final confidence: {adaptive.confidence:.1%}")
# Test 3: Semantic gap analysis
print("\n" + "="*50)
print("TEST 3: Semantic Gap Analysis")
print("="*50)
# Show how embedding strategy identifies gaps
adaptive = AdaptiveCrawler(crawler, config)
result = await adaptive.digest(
start_url="https://realpython.com",
query="python decorators advanced patterns"
)
print(f"\nSemantic gaps identified: {len(result.semantic_gaps)}")
print(f"Knowledge base embeddings shape: {result.kb_embeddings.shape if result.kb_embeddings is not None else 'None'}")
# Show coverage metrics specific to embedding strategy
print("\nEmbedding-specific metrics:")
print(f" Average best similarity: {result.metrics.get('avg_best_similarity', 0):.3f}")
print(f" Coverage score: {result.metrics.get('coverage_score', 0):.3f}")
print(f" Validation confidence: {result.metrics.get('validation_confidence', 0):.2%}")
if __name__ == "__main__":
asyncio.run(main()) | python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/adaptive_crawling/export_import_kb.py | docs/examples/adaptive_crawling/export_import_kb.py | """
Knowledge Base Export and Import
This example demonstrates how to export crawled knowledge bases and
import them for reuse, sharing, or analysis.
"""
import asyncio
import json
from pathlib import Path
from crawl4ai import AsyncWebCrawler, AdaptiveCrawler, AdaptiveConfig
async def build_knowledge_base():
"""Build a knowledge base about web technologies"""
print("="*60)
print("PHASE 1: Building Knowledge Base")
print("="*60)
async with AsyncWebCrawler(verbose=False) as crawler:
adaptive = AdaptiveCrawler(crawler)
# Crawl information about HTTP
print("\n1. Gathering HTTP protocol information...")
await adaptive.digest(
start_url="https://httpbin.org",
query="http methods headers status codes"
)
print(f" - Pages crawled: {len(adaptive.state.crawled_urls)}")
print(f" - Confidence: {adaptive.confidence:.2%}")
# Add more information about APIs
print("\n2. Adding API documentation knowledge...")
await adaptive.digest(
start_url="https://httpbin.org/anything",
query="rest api json response request"
)
print(f" - Total pages: {len(adaptive.state.crawled_urls)}")
print(f" - Confidence: {adaptive.confidence:.2%}")
# Export the knowledge base
export_path = "web_tech_knowledge.jsonl"
print(f"\n3. Exporting knowledge base to {export_path}")
adaptive.export_knowledge_base(export_path)
# Show export statistics
export_size = Path(export_path).stat().st_size / 1024
with open(export_path, 'r') as f:
line_count = sum(1 for _ in f)
print(f" - Exported {line_count} documents")
print(f" - File size: {export_size:.1f} KB")
return export_path
async def analyze_knowledge_base(kb_path):
"""Analyze the exported knowledge base"""
print("\n" + "="*60)
print("PHASE 2: Analyzing Exported Knowledge Base")
print("="*60)
# Read and analyze JSONL
documents = []
with open(kb_path, 'r') as f:
for line in f:
documents.append(json.loads(line))
print(f"\nKnowledge base contains {len(documents)} documents:")
# Analyze document properties
total_content_length = 0
urls_by_domain = {}
for doc in documents:
# Content analysis
content_length = len(doc.get('content', ''))
total_content_length += content_length
# URL analysis
url = doc.get('url', '')
domain = url.split('/')[2] if url.startswith('http') else 'unknown'
urls_by_domain[domain] = urls_by_domain.get(domain, 0) + 1
# Show sample document
if documents.index(doc) == 0:
print(f"\nSample document structure:")
print(f" - URL: {url}")
print(f" - Content length: {content_length} chars")
print(f" - Has metadata: {'metadata' in doc}")
print(f" - Has links: {len(doc.get('links', []))} links")
print(f" - Query: {doc.get('query', 'N/A')}")
print(f"\nContent statistics:")
print(f" - Total content: {total_content_length:,} characters")
print(f" - Average per document: {total_content_length/len(documents):,.0f} chars")
print(f"\nDomain distribution:")
for domain, count in urls_by_domain.items():
print(f" - {domain}: {count} pages")
async def import_and_continue():
"""Import a knowledge base and continue crawling"""
print("\n" + "="*60)
print("PHASE 3: Importing and Extending Knowledge Base")
print("="*60)
kb_path = "web_tech_knowledge.jsonl"
async with AsyncWebCrawler(verbose=False) as crawler:
# Create new adaptive crawler
adaptive = AdaptiveCrawler(crawler)
# Import existing knowledge base
print(f"\n1. Importing knowledge base from {kb_path}")
adaptive.import_knowledge_base(kb_path)
print(f" - Imported {len(adaptive.state.knowledge_base)} documents")
print(f" - Existing URLs: {len(adaptive.state.crawled_urls)}")
# Check current state
print("\n2. Checking imported knowledge state:")
adaptive.print_stats(detailed=False)
# Continue crawling with new query
print("\n3. Extending knowledge with new query...")
await adaptive.digest(
start_url="https://httpbin.org/status/200",
query="error handling retry timeout"
)
print("\n4. Final knowledge base state:")
adaptive.print_stats(detailed=False)
# Export extended knowledge base
extended_path = "web_tech_knowledge_extended.jsonl"
adaptive.export_knowledge_base(extended_path)
print(f"\n5. Extended knowledge base exported to {extended_path}")
async def share_knowledge_bases():
"""Demonstrate sharing knowledge bases between projects"""
print("\n" + "="*60)
print("PHASE 4: Sharing Knowledge Between Projects")
print("="*60)
# Simulate two different projects
project_a_kb = "project_a_knowledge.jsonl"
project_b_kb = "project_b_knowledge.jsonl"
async with AsyncWebCrawler(verbose=False) as crawler:
# Project A: Security documentation
print("\n1. Project A: Building security knowledge...")
crawler_a = AdaptiveCrawler(crawler)
await crawler_a.digest(
start_url="https://httpbin.org/basic-auth/user/pass",
query="authentication security headers"
)
crawler_a.export_knowledge_base(project_a_kb)
print(f" - Exported {len(crawler_a.state.knowledge_base)} documents")
# Project B: API testing
print("\n2. Project B: Building testing knowledge...")
crawler_b = AdaptiveCrawler(crawler)
await crawler_b.digest(
start_url="https://httpbin.org/anything",
query="testing endpoints mocking"
)
crawler_b.export_knowledge_base(project_b_kb)
print(f" - Exported {len(crawler_b.state.knowledge_base)} documents")
# Merge knowledge bases
print("\n3. Merging knowledge bases...")
merged_crawler = AdaptiveCrawler(crawler)
# Import both knowledge bases
merged_crawler.import_knowledge_base(project_a_kb)
initial_size = len(merged_crawler.state.knowledge_base)
merged_crawler.import_knowledge_base(project_b_kb)
final_size = len(merged_crawler.state.knowledge_base)
print(f" - Project A documents: {initial_size}")
print(f" - Additional from Project B: {final_size - initial_size}")
print(f" - Total merged documents: {final_size}")
# Export merged knowledge
merged_kb = "merged_knowledge.jsonl"
merged_crawler.export_knowledge_base(merged_kb)
print(f"\n4. Merged knowledge base exported to {merged_kb}")
# Show combined coverage
print("\n5. Combined knowledge coverage:")
merged_crawler.print_stats(detailed=False)
async def main():
"""Run all examples"""
try:
# Build initial knowledge base
kb_path = await build_knowledge_base()
# Analyze the export
await analyze_knowledge_base(kb_path)
# Import and extend
await import_and_continue()
# Demonstrate sharing
await share_knowledge_bases()
print("\n" + "="*60)
print("All examples completed successfully!")
print("="*60)
finally:
# Clean up generated files
print("\nCleaning up generated files...")
for file in [
"web_tech_knowledge.jsonl",
"web_tech_knowledge_extended.jsonl",
"project_a_knowledge.jsonl",
"project_b_knowledge.jsonl",
"merged_knowledge.jsonl"
]:
Path(file).unlink(missing_ok=True)
print("Cleanup complete.")
if __name__ == "__main__":
asyncio.run(main()) | python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/adaptive_crawling/embedding_vs_statistical.py | docs/examples/adaptive_crawling/embedding_vs_statistical.py | """
Comparison: Embedding vs Statistical Strategy
This example demonstrates the differences between statistical and embedding
strategies for adaptive crawling, showing when to use each approach.
"""
import asyncio
import time
import os
from crawl4ai import AsyncWebCrawler, AdaptiveCrawler, AdaptiveConfig
async def crawl_with_strategy(url: str, query: str, strategy: str, **kwargs):
"""Helper function to crawl with a specific strategy"""
config = AdaptiveConfig(
strategy=strategy,
max_pages=20,
top_k_links=3,
min_gain_threshold=0.05,
**kwargs
)
async with AsyncWebCrawler(verbose=False) as crawler:
adaptive = AdaptiveCrawler(crawler, config)
start_time = time.time()
result = await adaptive.digest(start_url=url, query=query)
elapsed = time.time() - start_time
return {
'result': result,
'crawler': adaptive,
'elapsed': elapsed,
'pages': len(result.crawled_urls),
'confidence': adaptive.confidence
}
async def main():
"""Compare embedding and statistical strategies"""
# Test scenarios
test_cases = [
{
'name': 'Technical Documentation (Specific Terms)',
'url': 'https://docs.python.org/3/library/asyncio.html',
'query': 'asyncio.create_task event_loop.run_until_complete'
},
{
'name': 'Conceptual Query (Semantic Understanding)',
'url': 'https://docs.python.org/3/library/asyncio.html',
'query': 'concurrent programming patterns'
},
{
'name': 'Ambiguous Query',
'url': 'https://realpython.com',
'query': 'python performance optimization'
}
]
# Configure embedding strategy
embedding_config = {}
if os.getenv('OPENAI_API_KEY'):
embedding_config['embedding_llm_config'] = {
'provider': 'openai/text-embedding-3-small',
'api_token': os.getenv('OPENAI_API_KEY')
}
for test in test_cases:
print("\n" + "="*70)
print(f"TEST: {test['name']}")
print(f"URL: {test['url']}")
print(f"Query: '{test['query']}'")
print("="*70)
# Run statistical strategy
print("\n📊 Statistical Strategy:")
stat_result = await crawl_with_strategy(
test['url'],
test['query'],
'statistical'
)
print(f" Pages crawled: {stat_result['pages']}")
print(f" Time taken: {stat_result['elapsed']:.2f}s")
print(f" Confidence: {stat_result['confidence']:.1%}")
print(f" Sufficient: {'Yes' if stat_result['crawler'].is_sufficient else 'No'}")
# Show term coverage
if hasattr(stat_result['result'], 'term_frequencies'):
query_terms = test['query'].lower().split()
covered = sum(1 for term in query_terms
if term in stat_result['result'].term_frequencies)
print(f" Term coverage: {covered}/{len(query_terms)} query terms found")
# Run embedding strategy
print("\n🧠 Embedding Strategy:")
emb_result = await crawl_with_strategy(
test['url'],
test['query'],
'embedding',
**embedding_config
)
print(f" Pages crawled: {emb_result['pages']}")
print(f" Time taken: {emb_result['elapsed']:.2f}s")
print(f" Confidence: {emb_result['confidence']:.1%}")
print(f" Sufficient: {'Yes' if emb_result['crawler'].is_sufficient else 'No'}")
# Show semantic understanding
if emb_result['result'].expanded_queries:
print(f" Query variations: {len(emb_result['result'].expanded_queries)}")
print(f" Semantic gaps: {len(emb_result['result'].semantic_gaps)}")
# Compare results
print("\n📈 Comparison:")
efficiency_diff = ((stat_result['pages'] - emb_result['pages']) /
stat_result['pages'] * 100) if stat_result['pages'] > 0 else 0
print(f" Efficiency: ", end="")
if efficiency_diff > 0:
print(f"Embedding used {efficiency_diff:.0f}% fewer pages")
else:
print(f"Statistical used {-efficiency_diff:.0f}% fewer pages")
print(f" Speed: ", end="")
if stat_result['elapsed'] < emb_result['elapsed']:
print(f"Statistical was {emb_result['elapsed']/stat_result['elapsed']:.1f}x faster")
else:
print(f"Embedding was {stat_result['elapsed']/emb_result['elapsed']:.1f}x faster")
print(f" Confidence difference: {abs(stat_result['confidence'] - emb_result['confidence'])*100:.0f} percentage points")
# Recommendation
print("\n💡 Recommendation:")
if 'specific' in test['name'].lower() or all(len(term) > 5 for term in test['query'].split()):
print(" → Statistical strategy is likely better for this use case (specific terms)")
elif 'conceptual' in test['name'].lower() or 'semantic' in test['name'].lower():
print(" → Embedding strategy is likely better for this use case (semantic understanding)")
else:
if emb_result['confidence'] > stat_result['confidence'] + 0.1:
print(" → Embedding strategy achieved significantly better understanding")
elif stat_result['elapsed'] < emb_result['elapsed'] / 2:
print(" → Statistical strategy is much faster with similar results")
else:
print(" → Both strategies performed similarly; choose based on your priorities")
# Summary recommendations
print("\n" + "="*70)
print("STRATEGY SELECTION GUIDE")
print("="*70)
print("\n✅ Use STATISTICAL strategy when:")
print(" - Queries contain specific technical terms")
print(" - Speed is critical")
print(" - No API access available")
print(" - Working with well-structured documentation")
print("\n✅ Use EMBEDDING strategy when:")
print(" - Queries are conceptual or ambiguous")
print(" - Semantic understanding is important")
print(" - Need to detect irrelevant content")
print(" - Working with diverse content sources")
if __name__ == "__main__":
asyncio.run(main()) | python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/adaptive_crawling/embedding_configuration.py | docs/examples/adaptive_crawling/embedding_configuration.py | """
Advanced Embedding Configuration Example
This example demonstrates all configuration options available for the
embedding strategy, including fine-tuning parameters for different use cases.
"""
import asyncio
import os
from crawl4ai import AsyncWebCrawler, AdaptiveCrawler, AdaptiveConfig
async def test_configuration(name: str, config: AdaptiveConfig, url: str, query: str):
"""Test a specific configuration"""
print(f"\n{'='*60}")
print(f"Configuration: {name}")
print(f"{'='*60}")
async with AsyncWebCrawler(verbose=False) as crawler:
adaptive = AdaptiveCrawler(crawler, config)
result = await adaptive.digest(start_url=url, query=query)
print(f"Pages crawled: {len(result.crawled_urls)}")
print(f"Final confidence: {adaptive.confidence:.1%}")
print(f"Stopped reason: {result.metrics.get('stopped_reason', 'max_pages')}")
if result.metrics.get('is_irrelevant', False):
print("⚠️ Query detected as irrelevant!")
return result
async def main():
"""Demonstrate various embedding configurations"""
print("EMBEDDING STRATEGY CONFIGURATION EXAMPLES")
print("=" * 60)
# Base URL and query for testing
test_url = "https://docs.python.org/3/library/asyncio.html"
# 1. Default Configuration
config_default = AdaptiveConfig(
strategy="embedding",
max_pages=10
)
await test_configuration(
"Default Settings",
config_default,
test_url,
"async programming patterns"
)
# 2. Strict Coverage Requirements
config_strict = AdaptiveConfig(
strategy="embedding",
max_pages=20,
# Stricter similarity requirements
embedding_k_exp=5.0, # Default is 3.0, higher = stricter
embedding_coverage_radius=0.15, # Default is 0.2, lower = stricter
# Higher validation threshold
embedding_validation_min_score=0.6, # Default is 0.3
# More query variations for better coverage
n_query_variations=15 # Default is 10
)
await test_configuration(
"Strict Coverage (Research/Academic)",
config_strict,
test_url,
"comprehensive guide async await"
)
# 3. Fast Exploration
config_fast = AdaptiveConfig(
strategy="embedding",
max_pages=10,
top_k_links=5, # Follow more links per page
# Relaxed requirements for faster convergence
embedding_k_exp=1.0, # Lower = more lenient
embedding_min_relative_improvement=0.05, # Stop earlier
# Lower quality thresholds
embedding_quality_min_confidence=0.5, # Display lower confidence
embedding_quality_max_confidence=0.85,
# Fewer query variations for speed
n_query_variations=5
)
await test_configuration(
"Fast Exploration (Quick Overview)",
config_fast,
test_url,
"async basics"
)
# 4. Irrelevance Detection Focus
config_irrelevance = AdaptiveConfig(
strategy="embedding",
max_pages=5,
# Aggressive irrelevance detection
embedding_min_confidence_threshold=0.2, # Higher threshold (default 0.1)
embedding_k_exp=5.0, # Strict similarity
# Quick stopping for irrelevant content
embedding_min_relative_improvement=0.15
)
await test_configuration(
"Irrelevance Detection",
config_irrelevance,
test_url,
"recipe for chocolate cake" # Irrelevant query
)
# 5. High-Quality Knowledge Base
config_quality = AdaptiveConfig(
strategy="embedding",
max_pages=30,
# Deduplication settings
embedding_overlap_threshold=0.75, # More aggressive deduplication
# Quality focus
embedding_validation_min_score=0.5,
embedding_quality_scale_factor=1.0, # Linear quality mapping
# Balanced parameters
embedding_k_exp=3.0,
embedding_nearest_weight=0.8, # Focus on best matches
embedding_top_k_weight=0.2
)
await test_configuration(
"High-Quality Knowledge Base",
config_quality,
test_url,
"asyncio advanced patterns best practices"
)
# 6. Custom Embedding Provider
if os.getenv('OPENAI_API_KEY'):
config_openai = AdaptiveConfig(
strategy="embedding",
max_pages=10,
# Use OpenAI embeddings
embedding_llm_config={
'provider': 'openai/text-embedding-3-small',
'api_token': os.getenv('OPENAI_API_KEY')
},
# OpenAI embeddings are high quality, can be stricter
embedding_k_exp=4.0,
n_query_variations=12
)
await test_configuration(
"OpenAI Embeddings",
config_openai,
test_url,
"event-driven architecture patterns"
)
# Parameter Guide
print("\n" + "="*60)
print("PARAMETER TUNING GUIDE")
print("="*60)
print("\n📊 Key Parameters and Their Effects:")
print("\n1. embedding_k_exp (default: 3.0)")
print(" - Lower (1-2): More lenient, faster convergence")
print(" - Higher (4-5): Stricter, better precision")
print("\n2. embedding_coverage_radius (default: 0.2)")
print(" - Lower (0.1-0.15): Requires closer matches")
print(" - Higher (0.25-0.3): Accepts broader matches")
print("\n3. n_query_variations (default: 10)")
print(" - Lower (5-7): Faster, less comprehensive")
print(" - Higher (15-20): Better coverage, slower")
print("\n4. embedding_min_confidence_threshold (default: 0.1)")
print(" - Set to 0.15-0.2 for aggressive irrelevance detection")
print(" - Set to 0.05 to crawl even barely relevant content")
print("\n5. embedding_validation_min_score (default: 0.3)")
print(" - Higher (0.5-0.6): Requires strong validation")
print(" - Lower (0.2): More permissive stopping")
print("\n💡 Tips:")
print("- For research: High k_exp, more variations, strict validation")
print("- For exploration: Low k_exp, fewer variations, relaxed thresholds")
print("- For quality: Focus on overlap_threshold and validation scores")
print("- For speed: Reduce variations, increase min_relative_improvement")
if __name__ == "__main__":
asyncio.run(main()) | python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/adaptive_crawling/llm_config_example.py | docs/examples/adaptive_crawling/llm_config_example.py | import asyncio
import os
from crawl4ai import AsyncWebCrawler, AdaptiveCrawler, AdaptiveConfig, LLMConfig
async def test_configuration(name: str, config: AdaptiveConfig, url: str, query: str):
"""Test a specific configuration"""
print(f"\n{'='*60}")
print(f"Configuration: {name}")
print(f"{'='*60}")
async with AsyncWebCrawler(verbose=False) as crawler:
adaptive = AdaptiveCrawler(crawler, config)
result = await adaptive.digest(start_url=url, query=query)
print("\n" + "="*50)
print("CRAWL STATISTICS")
print("="*50)
adaptive.print_stats(detailed=False)
# Get the most relevant content found
print("\n" + "="*50)
print("MOST RELEVANT PAGES")
print("="*50)
relevant_pages = adaptive.get_relevant_content(top_k=5)
for i, page in enumerate(relevant_pages, 1):
print(f"\n{i}. {page['url']}")
print(f" Relevance Score: {page['score']:.2%}")
# Show a snippet of the content
content = page['content'] or ""
if content:
snippet = content[:200].replace('\n', ' ')
if len(content) > 200:
snippet += "..."
print(f" Preview: {snippet}")
print(f"\n{'='*50}")
print(f"Pages crawled: {len(result.crawled_urls)}")
print(f"Final confidence: {adaptive.confidence:.1%}")
print(f"Stopped reason: {result.metrics.get('stopped_reason', 'max_pages')}")
if result.metrics.get('is_irrelevant', False):
print("⚠️ Query detected as irrelevant!")
return result
async def llm_embedding():
"""Demonstrate various embedding configurations"""
print("EMBEDDING STRATEGY CONFIGURATION EXAMPLES")
print("=" * 60)
# Base URL and query for testing
test_url = "https://docs.python.org/3/library/asyncio.html"
openai_llm_config = LLMConfig(
provider='openai/text-embedding-3-small',
api_token=os.getenv('OPENAI_API_KEY'),
temperature=0.7,
max_tokens=2000
)
config_openai = AdaptiveConfig(
strategy="embedding",
max_pages=10,
# Use OpenAI embeddings
embedding_llm_config=openai_llm_config,
# embedding_llm_config={
# 'provider': 'openai/text-embedding-3-small',
# 'api_token': os.getenv('OPENAI_API_KEY')
# },
# OpenAI embeddings are high quality, can be stricter
embedding_k_exp=4.0,
n_query_variations=12
)
await test_configuration(
"OpenAI Embeddings",
config_openai,
test_url,
# "event-driven architecture patterns"
"async await context managers coroutines"
)
return
async def basic_adaptive_crawling():
"""Basic adaptive crawling example"""
# Initialize the crawler
async with AsyncWebCrawler(verbose=True) as crawler:
# Create an adaptive crawler with default settings (statistical strategy)
adaptive = AdaptiveCrawler(crawler)
# Note: You can also use embedding strategy for semantic understanding:
# from crawl4ai import AdaptiveConfig
# config = AdaptiveConfig(strategy="embedding")
# adaptive = AdaptiveCrawler(crawler, config)
# Start adaptive crawling
print("Starting adaptive crawl for Python async programming information...")
result = await adaptive.digest(
start_url="https://docs.python.org/3/library/asyncio.html",
query="async await context managers coroutines"
)
# Display crawl statistics
print("\n" + "="*50)
print("CRAWL STATISTICS")
print("="*50)
adaptive.print_stats(detailed=False)
# Get the most relevant content found
print("\n" + "="*50)
print("MOST RELEVANT PAGES")
print("="*50)
relevant_pages = adaptive.get_relevant_content(top_k=5)
for i, page in enumerate(relevant_pages, 1):
print(f"\n{i}. {page['url']}")
print(f" Relevance Score: {page['score']:.2%}")
# Show a snippet of the content
content = page['content'] or ""
if content:
snippet = content[:200].replace('\n', ' ')
if len(content) > 200:
snippet += "..."
print(f" Preview: {snippet}")
# Show final confidence
print(f"\n{'='*50}")
print(f"Final Confidence: {adaptive.confidence:.2%}")
print(f"Total Pages Crawled: {len(result.crawled_urls)}")
print(f"Knowledge Base Size: {len(adaptive.state.knowledge_base)} documents")
if adaptive.confidence >= 0.8:
print("✓ High confidence - can answer detailed questions about async Python")
elif adaptive.confidence >= 0.6:
print("~ Moderate confidence - can answer basic questions")
else:
print("✗ Low confidence - need more information")
if __name__ == "__main__":
asyncio.run(llm_embedding())
# asyncio.run(basic_adaptive_crawling()) | python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/adaptive_crawling/custom_strategies.py | docs/examples/adaptive_crawling/custom_strategies.py | """
Custom Adaptive Crawling Strategies
This example demonstrates how to implement custom scoring strategies
for domain-specific crawling needs.
"""
import asyncio
import re
from typing import List, Dict, Set
from crawl4ai import AsyncWebCrawler, AdaptiveCrawler, AdaptiveConfig
from crawl4ai.adaptive_crawler import CrawlState, Link
import math
class APIDocumentationStrategy:
"""
Custom strategy optimized for API documentation crawling.
Prioritizes endpoint references, code examples, and parameter descriptions.
"""
def __init__(self):
# Keywords that indicate high-value API documentation
self.api_keywords = {
'endpoint', 'request', 'response', 'parameter', 'authentication',
'header', 'body', 'query', 'path', 'method', 'get', 'post', 'put',
'delete', 'patch', 'status', 'code', 'example', 'curl', 'python'
}
# URL patterns that typically contain API documentation
self.valuable_patterns = [
r'/api/',
r'/reference/',
r'/endpoints?/',
r'/methods?/',
r'/resources?/'
]
# Patterns to avoid
self.avoid_patterns = [
r'/blog/',
r'/news/',
r'/about/',
r'/contact/',
r'/legal/'
]
def score_link(self, link: Link, query: str, state: CrawlState) -> float:
"""Custom link scoring for API documentation"""
score = 1.0
url = link.href.lower()
# Boost API-related URLs
for pattern in self.valuable_patterns:
if re.search(pattern, url):
score *= 2.0
break
# Reduce score for non-API content
for pattern in self.avoid_patterns:
if re.search(pattern, url):
score *= 0.1
break
# Boost if preview contains API keywords
if link.text:
preview_lower = link.text.lower()
keyword_count = sum(1 for kw in self.api_keywords if kw in preview_lower)
score *= (1 + keyword_count * 0.2)
# Prioritize shallow URLs (likely overview pages)
depth = url.count('/') - 2 # Subtract protocol slashes
if depth <= 3:
score *= 1.5
elif depth > 6:
score *= 0.5
return score
def calculate_api_coverage(self, state: CrawlState, query: str) -> Dict[str, float]:
"""Calculate specialized coverage metrics for API documentation"""
metrics = {
'endpoint_coverage': 0.0,
'example_coverage': 0.0,
'parameter_coverage': 0.0
}
# Analyze knowledge base for API-specific content
endpoint_patterns = [r'GET\s+/', r'POST\s+/', r'PUT\s+/', r'DELETE\s+/']
example_patterns = [r'```\w+', r'curl\s+-', r'import\s+requests']
param_patterns = [r'param(?:eter)?s?\s*:', r'required\s*:', r'optional\s*:']
total_docs = len(state.knowledge_base)
if total_docs == 0:
return metrics
docs_with_endpoints = 0
docs_with_examples = 0
docs_with_params = 0
for doc in state.knowledge_base:
content = doc.markdown.raw_markdown if hasattr(doc, 'markdown') else str(doc)
# Check for endpoints
if any(re.search(pattern, content, re.IGNORECASE) for pattern in endpoint_patterns):
docs_with_endpoints += 1
# Check for examples
if any(re.search(pattern, content, re.IGNORECASE) for pattern in example_patterns):
docs_with_examples += 1
# Check for parameters
if any(re.search(pattern, content, re.IGNORECASE) for pattern in param_patterns):
docs_with_params += 1
metrics['endpoint_coverage'] = docs_with_endpoints / total_docs
metrics['example_coverage'] = docs_with_examples / total_docs
metrics['parameter_coverage'] = docs_with_params / total_docs
return metrics
class ResearchPaperStrategy:
"""
Strategy optimized for crawling research papers and academic content.
Prioritizes citations, abstracts, and methodology sections.
"""
def __init__(self):
self.academic_keywords = {
'abstract', 'introduction', 'methodology', 'results', 'conclusion',
'references', 'citation', 'paper', 'study', 'research', 'analysis',
'hypothesis', 'experiment', 'findings', 'doi'
}
self.citation_patterns = [
r'\[\d+\]', # [1] style citations
r'\(\w+\s+\d{4}\)', # (Author 2024) style
r'doi:\s*\S+', # DOI references
]
def calculate_academic_relevance(self, content: str, query: str) -> float:
"""Calculate relevance score for academic content"""
score = 0.0
content_lower = content.lower()
# Check for academic keywords
keyword_matches = sum(1 for kw in self.academic_keywords if kw in content_lower)
score += keyword_matches * 0.1
# Check for citations
citation_count = sum(
len(re.findall(pattern, content))
for pattern in self.citation_patterns
)
score += min(citation_count * 0.05, 1.0) # Cap at 1.0
# Check for query terms in academic context
query_terms = query.lower().split()
for term in query_terms:
# Boost if term appears near academic keywords
for keyword in ['abstract', 'conclusion', 'results']:
if keyword in content_lower:
section = content_lower[content_lower.find(keyword):content_lower.find(keyword) + 500]
if term in section:
score += 0.2
return min(score, 2.0) # Cap total score
async def demo_custom_strategies():
"""Demonstrate custom strategy usage"""
# Example 1: API Documentation Strategy
print("="*60)
print("EXAMPLE 1: Custom API Documentation Strategy")
print("="*60)
api_strategy = APIDocumentationStrategy()
async with AsyncWebCrawler() as crawler:
# Standard adaptive crawler
config = AdaptiveConfig(
confidence_threshold=0.8,
max_pages=15
)
adaptive = AdaptiveCrawler(crawler, config)
# Override link scoring with custom strategy
original_rank_links = adaptive._rank_links
def custom_rank_links(links, query, state):
# Apply custom scoring
scored_links = []
for link in links:
base_score = api_strategy.score_link(link, query, state)
scored_links.append((link, base_score))
# Sort by score
scored_links.sort(key=lambda x: x[1], reverse=True)
return [link for link, _ in scored_links[:config.top_k_links]]
adaptive._rank_links = custom_rank_links
# Crawl API documentation
print("\nCrawling API documentation with custom strategy...")
state = await adaptive.digest(
start_url="https://httpbin.org",
query="api endpoints authentication headers"
)
# Calculate custom metrics
api_metrics = api_strategy.calculate_api_coverage(state, "api endpoints")
print(f"\nResults:")
print(f"Pages crawled: {len(state.crawled_urls)}")
print(f"Confidence: {adaptive.confidence:.2%}")
print(f"\nAPI-Specific Metrics:")
print(f" - Endpoint coverage: {api_metrics['endpoint_coverage']:.2%}")
print(f" - Example coverage: {api_metrics['example_coverage']:.2%}")
print(f" - Parameter coverage: {api_metrics['parameter_coverage']:.2%}")
# Example 2: Combined Strategy
print("\n" + "="*60)
print("EXAMPLE 2: Hybrid Strategy Combining Multiple Approaches")
print("="*60)
class HybridStrategy:
"""Combines multiple strategies with weights"""
def __init__(self):
self.api_strategy = APIDocumentationStrategy()
self.research_strategy = ResearchPaperStrategy()
self.weights = {
'api': 0.7,
'research': 0.3
}
def score_content(self, content: str, query: str) -> float:
# Get scores from each strategy
api_score = self._calculate_api_score(content, query)
research_score = self.research_strategy.calculate_academic_relevance(content, query)
# Weighted combination
total_score = (
api_score * self.weights['api'] +
research_score * self.weights['research']
)
return total_score
def _calculate_api_score(self, content: str, query: str) -> float:
# Simplified API scoring based on keyword presence
content_lower = content.lower()
api_keywords = self.api_strategy.api_keywords
keyword_count = sum(1 for kw in api_keywords if kw in content_lower)
return min(keyword_count * 0.1, 2.0)
hybrid_strategy = HybridStrategy()
async with AsyncWebCrawler() as crawler:
adaptive = AdaptiveCrawler(crawler)
# Crawl with hybrid scoring
print("\nTesting hybrid strategy on technical documentation...")
state = await adaptive.digest(
start_url="https://docs.python.org/3/library/asyncio.html",
query="async await coroutines api"
)
# Analyze results with hybrid strategy
print(f"\nHybrid Strategy Analysis:")
total_score = 0
for doc in adaptive.get_relevant_content(top_k=5):
content = doc['content'] or ""
score = hybrid_strategy.score_content(content, "async await api")
total_score += score
print(f" - {doc['url'][:50]}... Score: {score:.2f}")
print(f"\nAverage hybrid score: {total_score/5:.2f}")
async def demo_performance_optimization():
"""Demonstrate performance optimization with custom strategies"""
print("\n" + "="*60)
print("EXAMPLE 3: Performance-Optimized Strategy")
print("="*60)
class PerformanceOptimizedStrategy:
"""Strategy that balances thoroughness with speed"""
def __init__(self):
self.url_cache: Set[str] = set()
self.domain_scores: Dict[str, float] = {}
def should_crawl_domain(self, url: str) -> bool:
"""Implement domain-level filtering"""
domain = url.split('/')[2] if url.startswith('http') else url
# Skip if we've already crawled many pages from this domain
domain_count = sum(1 for cached in self.url_cache if domain in cached)
if domain_count > 5:
return False
# Skip low-scoring domains
if domain in self.domain_scores and self.domain_scores[domain] < 0.3:
return False
return True
def update_domain_score(self, url: str, relevance: float):
"""Track domain-level performance"""
domain = url.split('/')[2] if url.startswith('http') else url
if domain not in self.domain_scores:
self.domain_scores[domain] = relevance
else:
# Moving average
self.domain_scores[domain] = (
0.7 * self.domain_scores[domain] + 0.3 * relevance
)
perf_strategy = PerformanceOptimizedStrategy()
async with AsyncWebCrawler() as crawler:
config = AdaptiveConfig(
confidence_threshold=0.7,
max_pages=10,
top_k_links=2 # Fewer links for speed
)
adaptive = AdaptiveCrawler(crawler, config)
# Track performance
import time
start_time = time.time()
state = await adaptive.digest(
start_url="https://httpbin.org",
query="http methods headers"
)
elapsed = time.time() - start_time
print(f"\nPerformance Results:")
print(f" - Time elapsed: {elapsed:.2f} seconds")
print(f" - Pages crawled: {len(state.crawled_urls)}")
print(f" - Pages per second: {len(state.crawled_urls)/elapsed:.2f}")
print(f" - Final confidence: {adaptive.confidence:.2%}")
print(f" - Efficiency: {adaptive.confidence/len(state.crawled_urls):.2%} confidence per page")
async def main():
"""Run all demonstrations"""
try:
await demo_custom_strategies()
await demo_performance_optimization()
print("\n" + "="*60)
print("All custom strategy examples completed!")
print("="*60)
except Exception as e:
print(f"Error: {e}")
import traceback
traceback.print_exc()
if __name__ == "__main__":
asyncio.run(main()) | python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/c4a_script/c4a_script_hello_world_error.py | docs/examples/c4a_script/c4a_script_hello_world_error.py | """
C4A-Script Hello World - Error Example
Shows how error handling works
"""
from crawl4ai.script.c4a_compile import compile
# Define a script with an error (missing THEN)
script = """
GO https://example.com
WAIT `#content` 5
IF (EXISTS `.cookie-banner`) CLICK `.accept`
CLICK `button.submit`
"""
# Compile the script
result = compile(script)
# Check if compilation was successful
if result.success:
# Success! Use the generated JavaScript
print("✅ Compilation successful!")
print(f"Generated {len(result.js_code)} JavaScript statements:\n")
for i, js in enumerate(result.js_code, 1):
print(f"{i}. {js}\n")
# In real usage, you'd pass result.js_code to Crawl4AI:
# config = CrawlerRunConfig(js_code=result.js_code)
else:
# Error! Handle the compilation error
print("❌ Compilation failed!")
# Get the first error (there might be multiple)
error = result.first_error
# Show error details
print(f"Error at line {error.line}, column {error.column}")
print(f"Message: {error.message}")
# Show the problematic code
print(f"\nCode: {error.source_line}")
print(" " * (6 + error.column) + "^")
# Show suggestions if available
if error.suggestions:
print("\n💡 How to fix:")
for suggestion in error.suggestions:
print(f" {suggestion.message}")
# For debugging or logging, you can also get JSON
# error_json = result.to_json() | python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/c4a_script/c4a_script_hello_world.py | docs/examples/c4a_script/c4a_script_hello_world.py | """
C4A-Script Hello World
A concise example showing how to use the C4A-Script compiler
"""
from crawl4ai.script.c4a_compile import compile
# Define your C4A-Script
script = """
GO https://example.com
WAIT `#content` 5
IF (EXISTS `.cookie-banner`) THEN CLICK `.accept`
CLICK `button.submit`
"""
# Compile the script
result = compile(script)
# Check if compilation was successful
if result.success:
# Success! Use the generated JavaScript
print("✅ Compilation successful!")
print(f"Generated {len(result.js_code)} JavaScript statements:\n")
for i, js in enumerate(result.js_code, 1):
print(f"{i}. {js}\n")
# In real usage, you'd pass result.js_code to Crawl4AI:
# config = CrawlerRunConfig(js_code=result.js_code)
else:
# Error! Handle the compilation error
print("❌ Compilation failed!")
# Get the first error (there might be multiple)
error = result.first_error
# Show error details
print(f"Error at line {error.line}, column {error.column}")
print(f"Message: {error.message}")
# Show the problematic code
print(f"\nCode: {error.source_line}")
print(" " * (6 + error.column) + "^")
# Show suggestions if available
if error.suggestions:
print("\n💡 How to fix:")
for suggestion in error.suggestions:
print(f" {suggestion.message}")
# For debugging or logging, you can also get JSON
# error_json = result.to_json() | python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/c4a_script/api_usage_examples.py | docs/examples/c4a_script/api_usage_examples.py | """
C4A-Script API Usage Examples
Shows how to use the new Result-based API in various scenarios
"""
from crawl4ai.script.c4a_compile import compile, validate, compile_file
from crawl4ai.script.c4a_result import CompilationResult, ValidationResult
import json
print("C4A-Script API Usage Examples")
print("=" * 80)
# Example 1: Basic compilation
print("\n1. Basic Compilation")
print("-" * 40)
script = """
GO https://example.com
WAIT 2
IF (EXISTS `.cookie-banner`) THEN CLICK `.accept`
REPEAT (SCROLL DOWN 300, 3)
"""
result = compile(script)
print(f"Success: {result.success}")
print(f"Statements generated: {len(result.js_code) if result.js_code else 0}")
# Example 2: Error handling
print("\n\n2. Error Handling")
print("-" * 40)
error_script = """
GO https://example.com
IF (EXISTS `.modal`) CLICK `.close`
undefined_procedure
"""
result = compile(error_script)
if not result.success:
# Access error details
error = result.first_error
print(f"Error on line {error.line}: {error.message}")
print(f"Error code: {error.code}")
# Show suggestions if available
if error.suggestions:
print("Suggestions:")
for suggestion in error.suggestions:
print(f" - {suggestion.message}")
# Example 3: Validation only
print("\n\n3. Validation (no code generation)")
print("-" * 40)
validation_script = """
PROC validate_form
IF (EXISTS `#email`) THEN TYPE "test@example.com"
PRESS Tab
ENDPROC
validate_form
"""
validation = validate(validation_script)
print(f"Valid: {validation.valid}")
if validation.errors:
print(f"Errors found: {len(validation.errors)}")
# Example 4: JSON output for UI
print("\n\n4. JSON Output for UI Integration")
print("-" * 40)
ui_script = """
CLICK button.submit
"""
result = compile(ui_script)
if not result.success:
# Get JSON for UI
error_json = result.to_dict()
print("Error data for UI:")
print(json.dumps(error_json["errors"][0], indent=2))
# Example 5: File compilation
print("\n\n5. File Compilation")
print("-" * 40)
# Create a test file
test_file = "test_script.c4a"
with open(test_file, "w") as f:
f.write("""
GO https://example.com
WAIT `.content` 5
CLICK `.main-button`
""")
result = compile_file(test_file)
print(f"File compilation: {'Success' if result.success else 'Failed'}")
if result.success:
print(f"Generated {len(result.js_code)} JavaScript statements")
# Clean up
import os
os.remove(test_file)
# Example 6: Batch processing
print("\n\n6. Batch Processing Multiple Scripts")
print("-" * 40)
scripts = [
"GO https://example1.com\nCLICK `.button`",
"GO https://example2.com\nWAIT 2",
"GO https://example3.com\nINVALID_CMD"
]
results = []
for i, script in enumerate(scripts, 1):
result = compile(script)
results.append(result)
status = "✓" if result.success else "✗"
print(f"Script {i}: {status}")
# Summary
successful = sum(1 for r in results if r.success)
print(f"\nBatch result: {successful}/{len(scripts)} successful")
# Example 7: Custom error formatting
print("\n\n7. Custom Error Formatting")
print("-" * 40)
def format_error_for_ide(error):
"""Format error for IDE integration"""
return f"{error.source_line}:{error.line}:{error.column}: {error.type.value}: {error.message} [{error.code}]"
error_script = "IF EXISTS `.button` THEN CLICK `.button`"
result = compile(error_script)
if not result.success:
error = result.first_error
print("IDE format:", format_error_for_ide(error))
print("Simple format:", error.simple_message)
print("Full format:", error.formatted_message)
# Example 8: Working with warnings (future feature)
print("\n\n8. Handling Warnings")
print("-" * 40)
# In the future, we might have warnings
result = compile("GO https://example.com\nWAIT 100") # Very long wait
print(f"Success: {result.success}")
print(f"Warnings: {len(result.warnings)}")
# Example 9: Metadata usage
print("\n\n9. Using Metadata")
print("-" * 40)
complex_script = """
PROC helper1
CLICK `.btn1`
ENDPROC
PROC helper2
CLICK `.btn2`
ENDPROC
GO https://example.com
helper1
helper2
"""
result = compile(complex_script)
if result.success:
print(f"Script metadata:")
for key, value in result.metadata.items():
print(f" {key}: {value}")
# Example 10: Integration patterns
print("\n\n10. Integration Patterns")
print("-" * 40)
# Web API endpoint simulation
def api_compile(request_body):
"""Simulate API endpoint"""
script = request_body.get("script", "")
result = compile(script)
response = {
"status": "success" if result.success else "error",
"data": result.to_dict()
}
return response
# CLI tool simulation
def cli_compile(script, output_format="text"):
"""Simulate CLI tool"""
result = compile(script)
if output_format == "json":
return result.to_json()
elif output_format == "simple":
if result.success:
return f"OK: {len(result.js_code)} statements"
else:
return f"ERROR: {result.first_error.simple_message}"
else:
return str(result)
# Test the patterns
api_response = api_compile({"script": "GO https://example.com"})
print(f"API response status: {api_response['status']}")
cli_output = cli_compile("WAIT 2", "simple")
print(f"CLI output: {cli_output}")
print("\n" + "=" * 80)
print("All examples completed successfully!") | python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/c4a_script/generate_script_hello_world.py | docs/examples/c4a_script/generate_script_hello_world.py | #!/usr/bin/env python3
"""
Hello World Example: LLM-Generated C4A-Script
This example shows how to use the new generate_script() function to automatically
create C4A-Script automation from natural language descriptions and HTML.
"""
from crawl4ai.script.c4a_compile import C4ACompiler
def main():
print("🤖 C4A-Script Generation Hello World")
print("=" * 50)
# Example 1: Simple login form
html = """
<html>
<body>
<form id="login">
<input id="email" type="email" placeholder="Email">
<input id="password" type="password" placeholder="Password">
<button id="submit">Login</button>
</form>
</body>
</html>
"""
goal = "Fill in email 'user@example.com', password 'secret123', and submit the form"
print("📝 Goal:", goal)
print("🌐 HTML: Simple login form")
print()
# Generate C4A-Script
print("🔧 Generated C4A-Script:")
print("-" * 30)
c4a_script = C4ACompiler.generate_script(
html=html,
query=goal,
mode="c4a"
)
print(c4a_script)
print()
# Generate JavaScript
print("🔧 Generated JavaScript:")
print("-" * 30)
js_script = C4ACompiler.generate_script(
html=html,
query=goal,
mode="js"
)
print(js_script)
print()
# Example 2: Simple button click
html2 = """
<html>
<body>
<div class="content">
<h1>Welcome!</h1>
<button id="start-btn" class="primary">Get Started</button>
</div>
</body>
</html>
"""
goal2 = "Click the 'Get Started' button"
print("=" * 50)
print("📝 Goal:", goal2)
print("🌐 HTML: Simple button")
print()
print("🔧 Generated C4A-Script:")
print("-" * 30)
c4a_script2 = C4ACompiler.generate_script(
html=html2,
query=goal2,
mode="c4a"
)
print(c4a_script2)
print()
print("✅ Done! The LLM automatically converted natural language goals")
print(" into executable automation scripts.")
if __name__ == "__main__":
main() | python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/c4a_script/demo_c4a_crawl4ai.py | docs/examples/c4a_script/demo_c4a_crawl4ai.py | """
Demonstration of C4A-Script integration with Crawl4AI
Shows various use cases and features
"""
import asyncio
from crawl4ai import AsyncWebCrawler, CrawlerRunConfig
from crawl4ai import c4a_compile, CompilationResult
async def example_basic_usage():
"""Basic C4A-Script usage with Crawl4AI"""
print("\n" + "="*60)
print("Example 1: Basic C4A-Script Usage")
print("="*60)
# Define your automation script
c4a_script = """
# Wait for page to load
WAIT `body` 2
# Handle cookie banner if present
IF (EXISTS `.cookie-banner`) THEN CLICK `.accept-btn`
# Scroll down to load more content
SCROLL DOWN 500
WAIT 1
# Click load more button if exists
IF (EXISTS `.load-more`) THEN CLICK `.load-more`
"""
# Create crawler config with C4A script
config = CrawlerRunConfig(
url="https://example.com",
c4a_script=c4a_script,
wait_for="css:.content",
verbose=False
)
print("✅ C4A Script compiled successfully!")
print(f"Generated {len(config.js_code)} JavaScript commands")
# In production, you would run:
# async with AsyncWebCrawler() as crawler:
# result = await crawler.arun(config=config)
async def example_form_filling():
"""Form filling with C4A-Script"""
print("\n" + "="*60)
print("Example 2: Form Filling with C4A-Script")
print("="*60)
# Form automation script
form_script = """
# Set form values
SET email = "test@example.com"
SET message = "This is a test message"
# Fill the form
CLICK `#email-input`
TYPE $email
CLICK `#message-textarea`
TYPE $message
# Submit the form
CLICK `button[type="submit"]`
# Wait for success message
WAIT `.success-message` 10
"""
config = CrawlerRunConfig(
url="https://example.com/contact",
c4a_script=form_script
)
print("✅ Form filling script ready")
print("Script will:")
print(" - Fill email field")
print(" - Fill message textarea")
print(" - Submit form")
print(" - Wait for confirmation")
async def example_dynamic_loading():
"""Handle dynamic content loading"""
print("\n" + "="*60)
print("Example 3: Dynamic Content Loading")
print("="*60)
# Script for infinite scroll or pagination
pagination_script = """
# Initial wait
WAIT `.product-list` 5
# Load all products by clicking "Load More" repeatedly
REPEAT (CLICK `.load-more`, `document.querySelector('.load-more') !== null`)
# Alternative: Scroll to load (infinite scroll)
# REPEAT (SCROLL DOWN 1000, `document.querySelectorAll('.product').length < 100`)
# Extract count
EVAL `console.log('Products loaded: ' + document.querySelectorAll('.product').length)`
"""
config = CrawlerRunConfig(
url="https://example.com/products",
c4a_script=pagination_script,
screenshot=True # Capture final state
)
print("✅ Dynamic loading script ready")
print("Script will load all products by repeatedly clicking 'Load More'")
async def example_multi_step_workflow():
"""Complex multi-step workflow with procedures"""
print("\n" + "="*60)
print("Example 4: Multi-Step Workflow with Procedures")
print("="*60)
# Complex workflow with reusable procedures
workflow_script = """
# Define login procedure
PROC login
CLICK `#username`
TYPE "demo_user"
CLICK `#password`
TYPE "demo_pass"
CLICK `#login-btn`
WAIT `.dashboard` 10
ENDPROC
# Define search procedure
PROC search_product
CLICK `.search-box`
TYPE "laptop"
PRESS Enter
WAIT `.search-results` 5
ENDPROC
# Main workflow
GO https://example.com
login
search_product
# Process results
IF (EXISTS `.no-results`) THEN EVAL `console.log('No products found')`
ELSE REPEAT (CLICK `.add-to-cart`, 3)
"""
# Compile to check for errors
result = c4a_compile(workflow_script)
if result.success:
print("✅ Complex workflow compiled successfully!")
print("Workflow includes:")
print(" - Login procedure")
print(" - Product search")
print(" - Conditional cart additions")
config = CrawlerRunConfig(
url="https://example.com",
c4a_script=workflow_script
)
else:
print("❌ Compilation error:")
error = result.first_error
print(f" Line {error.line}: {error.message}")
async def example_error_handling():
"""Demonstrate error handling"""
print("\n" + "="*60)
print("Example 5: Error Handling")
print("="*60)
# Script with intentional error
bad_script = """
WAIT body 2
CLICK button
IF (EXISTS .modal) CLICK .close
"""
try:
config = CrawlerRunConfig(
url="https://example.com",
c4a_script=bad_script
)
except ValueError as e:
print("✅ Error caught as expected:")
print(f" {e}")
# Fixed version
good_script = """
WAIT `body` 2
CLICK `button`
IF (EXISTS `.modal`) THEN CLICK `.close`
"""
config = CrawlerRunConfig(
url="https://example.com",
c4a_script=good_script
)
print("\n✅ Fixed script compiled successfully!")
async def example_combining_with_extraction():
"""Combine C4A-Script with extraction strategies"""
print("\n" + "="*60)
print("Example 6: C4A-Script + Extraction Strategies")
print("="*60)
from crawl4ai import JsonCssExtractionStrategy
# Script to prepare page for extraction
prep_script = """
# Expand all collapsed sections
REPEAT (CLICK `.expand-btn`, `document.querySelectorAll('.expand-btn:not(.expanded)').length > 0`)
# Load all comments
IF (EXISTS `.load-comments`) THEN CLICK `.load-comments`
WAIT `.comments-section` 5
# Close any popups
IF (EXISTS `.popup-close`) THEN CLICK `.popup-close`
"""
# Define extraction schema
schema = {
"name": "article",
"selector": "article.main",
"fields": {
"title": {"selector": "h1", "type": "text"},
"content": {"selector": ".content", "type": "text"},
"comments": {
"selector": ".comment",
"type": "list",
"fields": {
"author": {"selector": ".author", "type": "text"},
"text": {"selector": ".text", "type": "text"}
}
}
}
}
config = CrawlerRunConfig(
url="https://example.com/article",
c4a_script=prep_script,
extraction_strategy=JsonCssExtractionStrategy(schema),
wait_for="css:.comments-section"
)
print("✅ Combined C4A + Extraction ready")
print("Workflow:")
print(" 1. Expand collapsed sections")
print(" 2. Load comments")
print(" 3. Extract structured data")
async def main():
"""Run all examples"""
print("\n🚀 C4A-Script + Crawl4AI Integration Demo\n")
# Run all examples
await example_basic_usage()
await example_form_filling()
await example_dynamic_loading()
await example_multi_step_workflow()
await example_error_handling()
await example_combining_with_extraction()
print("\n" + "="*60)
print("✅ All examples completed successfully!")
print("="*60)
print("\nTo run actual crawls, uncomment the AsyncWebCrawler sections")
print("or create your own scripts using these examples as templates.")
if __name__ == "__main__":
asyncio.run(main()) | python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/c4a_script/amazon_example/amazon_r2d2_search.py | docs/examples/c4a_script/amazon_example/amazon_r2d2_search.py | #!/usr/bin/env python3
"""
Amazon R2D2 Product Search Example using Crawl4AI
This example demonstrates:
1. Using LLM to generate C4A-Script from HTML snippets
2. Multi-step crawling with session persistence
3. JSON CSS extraction for structured product data
4. Complete workflow: homepage → search → extract products
Requirements:
- Crawl4AI with generate_script support
- LLM API key (configured in environment)
"""
import asyncio
import json
import os
from pathlib import Path
from typing import List, Dict, Any
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode
from crawl4ai import JsonCssExtractionStrategy
from crawl4ai.script.c4a_compile import C4ACompiler
class AmazonR2D2Scraper:
def __init__(self):
self.base_dir = Path(__file__).parent
self.search_script_path = self.base_dir / "generated_search_script.js"
self.schema_path = self.base_dir / "generated_product_schema.json"
self.results_path = self.base_dir / "extracted_products.json"
self.session_id = "amazon_r2d2_session"
async def generate_search_script(self) -> str:
"""Generate JavaScript for Amazon search interaction"""
print("🔧 Generating search script from header.html...")
# Check if already generated
if self.search_script_path.exists():
print("✅ Using cached search script")
return self.search_script_path.read_text()
# Read the header HTML
header_html = (self.base_dir / "header.html").read_text()
# Generate script using LLM
search_goal = """
Find the search box and search button, then:
1. Wait for the search box to be visible
2. Click on the search box to focus it
3. Clear any existing text
4. Type "r2d2" into the search box
5. Click the search submit button
6. Wait for navigation to complete and search results to appear
"""
try:
script = C4ACompiler.generate_script(
html=header_html,
query=search_goal,
mode="js"
)
# Save for future use
self.search_script_path.write_text(script)
print("✅ Search script generated and saved!")
print(f"📄 Script:\n{script}")
return script
except Exception as e:
print(f"❌ Error generating search script: {e}")
async def generate_product_schema(self) -> Dict[str, Any]:
"""Generate JSON CSS extraction schema from product HTML"""
print("\n🔧 Generating product extraction schema...")
# Check if already generated
if self.schema_path.exists():
print("✅ Using cached extraction schema")
return json.loads(self.schema_path.read_text())
# Read the product HTML
product_html = (self.base_dir / "product.html").read_text()
# Generate extraction schema using LLM
schema_goal = """
Create a JSON CSS extraction schema to extract:
- Product title (from the h2 element)
- Price (the dollar amount)
- Rating (star rating value)
- Number of reviews
- Delivery information
- Product URL (from the main product link)
- Whether it's sponsored
- Small business badge if present
The schema should handle multiple products on a search results page.
"""
try:
# Generate JavaScript that returns the schema
schema = JsonCssExtractionStrategy.generate_schema(
html=product_html,
query=schema_goal,
)
# Save for future use
self.schema_path.write_text(json.dumps(schema, indent=2))
print("✅ Extraction schema generated and saved!")
print(f"📄 Schema fields: {[f['name'] for f in schema['fields']]}")
return schema
except Exception as e:
print(f"❌ Error generating schema: {e}")
async def crawl_amazon(self):
"""Main crawling logic with 2 calls using same session"""
print("\n🚀 Starting Amazon R2D2 product search...")
# Generate scripts and schemas
search_script = await self.generate_search_script()
product_schema = await self.generate_product_schema()
# Configure browser (headless=False to see the action)
browser_config = BrowserConfig(
headless=False,
verbose=True,
viewport_width=1920,
viewport_height=1080
)
async with AsyncWebCrawler(config=browser_config) as crawler:
print("\n📍 Step 1: Navigate to Amazon and search for R2D2")
# FIRST CALL: Navigate to Amazon and execute search
search_config = CrawlerRunConfig(
session_id=self.session_id,
js_code= f"(() => {{ {search_script} }})()", # Execute generated JS
wait_for=".s-search-results", # Wait for search results
extraction_strategy=JsonCssExtractionStrategy(schema=product_schema),
delay_before_return_html=3.0 # Give time for results to load
)
results = await crawler.arun(
url="https://www.amazon.com",
config=search_config
)
if not results.success:
print("❌ Failed to search Amazon")
print(f"Error: {results.error_message}")
return
print("✅ Search completed successfully!")
print("✅ Product extraction completed!")
# Extract and save results
print("\n📍 Extracting product data")
if results[0].extracted_content:
products = json.loads(results[0].extracted_content)
print(f"🔍 Found {len(products)} products in search results")
print(f"✅ Extracted {len(products)} R2D2 products")
# Save results
self.results_path.write_text(
json.dumps(products, indent=2)
)
print(f"💾 Results saved to: {self.results_path}")
# Print sample results
print("\n📊 Sample Results:")
for i, product in enumerate(products[:3], 1):
print(f"\n{i}. {product['title'][:60]}...")
print(f" Price: ${product['price']}")
print(f" Rating: {product['rating']} ({product['number_of_reviews']} reviews)")
print(f" {'🏪 Small Business' if product['small_business_badge'] else ''}")
print(f" {'📢 Sponsored' if product['sponsored'] else ''}")
else:
print("❌ No products extracted")
async def main():
"""Run the Amazon scraper"""
scraper = AmazonR2D2Scraper()
await scraper.crawl_amazon()
print("\n🎉 Amazon R2D2 search example completed!")
print("Check the generated files:")
print(" - generated_search_script.js")
print(" - generated_product_schema.json")
print(" - extracted_products.json")
print(" - search_results_screenshot.png")
if __name__ == "__main__":
asyncio.run(main()) | python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/c4a_script/tutorial/server.py | docs/examples/c4a_script/tutorial/server.py | #!/usr/bin/env python3
"""
C4A-Script Tutorial Server
Serves the tutorial app and provides C4A compilation API
"""
import sys
import os
from pathlib import Path
from flask import Flask, render_template_string, request, jsonify, send_from_directory
from flask_cors import CORS
# Add parent directories to path to import crawl4ai
sys.path.insert(0, str(Path(__file__).parent.parent.parent.parent.parent))
try:
from crawl4ai.script import compile as c4a_compile
C4A_AVAILABLE = True
except ImportError:
print("⚠️ C4A compiler not available. Using mock compiler.")
C4A_AVAILABLE = False
app = Flask(__name__)
CORS(app)
# Serve static files
@app.route('/')
def index():
return send_from_directory('.', 'index.html')
@app.route('/assets/<path:path>')
def serve_assets(path):
return send_from_directory('assets', path)
@app.route('/playground/')
def playground():
return send_from_directory('playground', 'index.html')
@app.route('/playground/<path:path>')
def serve_playground(path):
return send_from_directory('playground', path)
# API endpoint for C4A compilation
@app.route('/api/compile', methods=['POST'])
def compile_endpoint():
try:
data = request.get_json()
script = data.get('script', '')
if not script:
return jsonify({
'success': False,
'error': {
'line': 1,
'column': 1,
'message': 'No script provided',
'suggestion': 'Write some C4A commands'
}
})
if C4A_AVAILABLE:
# Use real C4A compiler
result = c4a_compile(script)
if result.success:
return jsonify({
'success': True,
'jsCode': result.js_code,
'metadata': {
'lineCount': len(result.js_code),
'sourceLines': len(script.split('\n'))
}
})
else:
error = result.first_error
return jsonify({
'success': False,
'error': {
'line': error.line,
'column': error.column,
'message': error.message,
'suggestion': error.suggestions[0].message if error.suggestions else None,
'code': error.code,
'sourceLine': error.source_line
}
})
else:
# Use mock compiler for demo
result = mock_compile(script)
return jsonify(result)
except Exception as e:
return jsonify({
'success': False,
'error': {
'line': 1,
'column': 1,
'message': f'Server error: {str(e)}',
'suggestion': 'Check server logs'
}
}), 500
def mock_compile(script):
"""Simple mock compiler for demo when C4A is not available"""
lines = [line for line in script.split('\n') if line.strip() and not line.strip().startswith('#')]
js_code = []
for i, line in enumerate(lines):
line = line.strip()
try:
if line.startswith('GO '):
url = line[3:].strip()
# Handle relative URLs
if not url.startswith(('http://', 'https://')):
url = '/' + url.lstrip('/')
js_code.append(f"await page.goto('{url}');")
elif line.startswith('WAIT '):
parts = line[5:].strip().split(' ')
if parts[0].startswith('`'):
selector = parts[0].strip('`')
timeout = parts[1] if len(parts) > 1 else '5'
js_code.append(f"await page.waitForSelector('{selector}', {{ timeout: {timeout}000 }});")
else:
seconds = parts[0]
js_code.append(f"await page.waitForTimeout({seconds}000);")
elif line.startswith('CLICK '):
selector = line[6:].strip().strip('`')
js_code.append(f"await page.click('{selector}');")
elif line.startswith('TYPE '):
text = line[5:].strip().strip('"')
js_code.append(f"await page.keyboard.type('{text}');")
elif line.startswith('SCROLL '):
parts = line[7:].strip().split(' ')
direction = parts[0]
amount = parts[1] if len(parts) > 1 else '500'
if direction == 'DOWN':
js_code.append(f"await page.evaluate(() => window.scrollBy(0, {amount}));")
elif direction == 'UP':
js_code.append(f"await page.evaluate(() => window.scrollBy(0, -{amount}));")
elif line.startswith('IF '):
if 'THEN' not in line:
return {
'success': False,
'error': {
'line': i + 1,
'column': len(line),
'message': "Missing 'THEN' keyword after IF condition",
'suggestion': "Add 'THEN' after the condition",
'sourceLine': line
}
}
condition = line[3:line.index('THEN')].strip()
action = line[line.index('THEN') + 4:].strip()
if 'EXISTS' in condition:
selector_match = condition.split('`')
if len(selector_match) >= 2:
selector = selector_match[1]
action_selector = action.split('`')[1] if '`' in action else ''
js_code.append(
f"if (await page.$$('{selector}').length > 0) {{ "
f"await page.click('{action_selector}'); }}"
)
elif line.startswith('PRESS '):
key = line[6:].strip()
js_code.append(f"await page.keyboard.press('{key}');")
else:
# Unknown command
return {
'success': False,
'error': {
'line': i + 1,
'column': 1,
'message': f"Unknown command: {line.split()[0]}",
'suggestion': "Check command syntax",
'sourceLine': line
}
}
except Exception as e:
return {
'success': False,
'error': {
'line': i + 1,
'column': 1,
'message': f"Failed to parse: {str(e)}",
'suggestion': "Check syntax",
'sourceLine': line
}
}
return {
'success': True,
'jsCode': js_code,
'metadata': {
'lineCount': len(js_code),
'sourceLines': len(lines)
}
}
# Example scripts endpoint
@app.route('/api/examples')
def get_examples():
examples = [
{
'id': 'cookie-banner',
'name': 'Handle Cookie Banner',
'description': 'Accept cookies and close newsletter popup',
'script': '''# Handle cookie banner and newsletter
GO http://127.0.0.1:8000/playground/
WAIT `body` 2
IF (EXISTS `.cookie-banner`) THEN CLICK `.accept`
IF (EXISTS `.newsletter-popup`) THEN CLICK `.close`'''
},
{
'id': 'login',
'name': 'Login Flow',
'description': 'Complete login with credentials',
'script': '''# Login to the site
CLICK `#login-btn`
WAIT `.login-form` 2
CLICK `#email`
TYPE "demo@example.com"
CLICK `#password`
TYPE "demo123"
IF (EXISTS `#remember-me`) THEN CLICK `#remember-me`
CLICK `button[type="submit"]`
WAIT `.welcome-message` 5'''
},
{
'id': 'infinite-scroll',
'name': 'Infinite Scroll',
'description': 'Load products with scrolling',
'script': '''# Navigate to catalog and scroll
CLICK `#catalog-link`
WAIT `.product-grid` 3
# Scroll multiple times to load products
SCROLL DOWN 1000
WAIT 1
SCROLL DOWN 1000
WAIT 1
SCROLL DOWN 1000'''
},
{
'id': 'form-wizard',
'name': 'Multi-step Form',
'description': 'Complete a multi-step survey',
'script': '''# Navigate to forms
CLICK `a[href="#forms"]`
WAIT `#survey-form` 2
# Step 1: Basic info
CLICK `#full-name`
TYPE "John Doe"
CLICK `#survey-email`
TYPE "john@example.com"
CLICK `.next-step`
WAIT 1
# Step 2: Preferences
CLICK `#interests`
CLICK `option[value="tech"]`
CLICK `option[value="music"]`
CLICK `.next-step`
WAIT 1
# Step 3: Submit
CLICK `#submit-survey`
WAIT `.success-message` 5'''
}
]
return jsonify(examples)
if __name__ == '__main__':
port = int(os.environ.get('PORT', 8000))
print(f"""
╔══════════════════════════════════════════════════════════╗
║ C4A-Script Interactive Tutorial Server ║
╠══════════════════════════════════════════════════════════╣
║ ║
║ Server running at: http://localhost:{port:<6} ║
║ ║
║ Features: ║
║ • C4A-Script compilation API ║
║ • Interactive playground ║
║ • Real-time execution visualization ║
║ ║
║ C4A Compiler: {'✓ Available' if C4A_AVAILABLE else '✗ Using mock compiler':<30} ║
║ ║
╚══════════════════════════════════════════════════════════╝
""")
app.run(host='0.0.0.0', port=port, debug=True) | python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/c4a_script/github_search/github_search_crawler.py | docs/examples/c4a_script/github_search/github_search_crawler.py | #!/usr/bin/env python3
"""
GitHub Advanced Search Example using Crawl4AI
This example demonstrates:
1. Using LLM to generate C4A-Script from HTML snippets
2. Single arun() call with navigation, search form filling, and extraction
3. JSON CSS extraction for structured repository data
4. Complete workflow: navigate → fill form → submit → extract results
Requirements:
- Crawl4AI with generate_script support
- LLM API key (configured in environment)
"""
import asyncio
import json
import os
from pathlib import Path
from typing import List, Dict, Any
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode
from crawl4ai import JsonCssExtractionStrategy
from crawl4ai.script.c4a_compile import C4ACompiler
class GitHubSearchScraper:
def __init__(self):
self.base_dir = Path(__file__).parent
self.search_script_path = self.base_dir / "generated_search_script.js"
self.schema_path = self.base_dir / "generated_result_schema.json"
self.results_path = self.base_dir / "extracted_repositories.json"
self.session_id = "github_search_session"
async def generate_search_script(self) -> str:
"""Generate JavaScript for GitHub advanced search interaction"""
print("🔧 Generating search script from search_form.html...")
# Check if already generated
if self.search_script_path.exists():
print("✅ Using cached search script")
return self.search_script_path.read_text()
# Read the search form HTML
search_form_html = (self.base_dir / "search_form.html").read_text()
# Generate script using LLM
search_goal = """
Search for crawl4AI repositories written in Python with more than 10000 stars:
1. Wait for the main search input to be visible
2. Type "crawl4AI" into the main search box
3. Select "Python" from the language dropdown (#search_language)
4. Type ">10000" into the stars input field (#search_stars)
5. Click the search button to submit the form
6. Wait for the search results to appear
"""
try:
script = C4ACompiler.generate_script(
html=search_form_html,
query=search_goal,
mode="js"
)
# Save for future use
self.search_script_path.write_text(script)
print("✅ Search script generated and saved!")
print(f"📄 Script preview:\n{script[:500]}...")
return script
except Exception as e:
print(f"❌ Error generating search script: {e}")
raise
async def generate_result_schema(self) -> Dict[str, Any]:
"""Generate JSON CSS extraction schema from result HTML"""
print("\n🔧 Generating result extraction schema...")
# Check if already generated
if self.schema_path.exists():
print("✅ Using cached extraction schema")
return json.loads(self.schema_path.read_text())
# Read the result HTML
result_html = (self.base_dir / "result.html").read_text()
# Generate extraction schema using LLM
schema_goal = """
Create a JSON CSS extraction schema to extract from each repository card:
- Repository name (the repository name only, not including owner)
- Repository owner (organization or username)
- Repository URL (full GitHub URL)
- Description
- Primary programming language
- Star count (numeric value)
- Topics/tags (array of topic names)
- Last updated (time ago string)
- Whether it has a sponsor button
The schema should handle multiple repository results on the search results page.
"""
try:
# Generate schema
schema = JsonCssExtractionStrategy.generate_schema(
html=result_html,
query=schema_goal,
)
# Save for future use
self.schema_path.write_text(json.dumps(schema, indent=2))
print("✅ Extraction schema generated and saved!")
print(f"📄 Schema fields: {[f['name'] for f in schema['fields']]}")
return schema
except Exception as e:
print(f"❌ Error generating schema: {e}")
raise
async def crawl_github(self):
"""Main crawling logic with single arun() call"""
print("\n🚀 Starting GitHub repository search...")
# Generate scripts and schemas
search_script = await self.generate_search_script()
result_schema = await self.generate_result_schema()
# Configure browser (headless=False to see the action)
browser_config = BrowserConfig(
headless=False,
verbose=True,
viewport_width=1920,
viewport_height=1080
)
async with AsyncWebCrawler(config=browser_config) as crawler:
print("\n📍 Navigating to GitHub advanced search and executing search...")
# Single call: Navigate, execute search, and extract results
search_config = CrawlerRunConfig(
session_id=self.session_id,
js_code=search_script, # Execute generated JS
# wait_for="[data-testid='results-list']", # Wait for search results
wait_for=".Box-sc-g0xbh4-0.iwUbcA", # Wait for search results
extraction_strategy=JsonCssExtractionStrategy(schema=result_schema),
delay_before_return_html=3.0, # Give time for results to fully load
cache_mode=CacheMode.BYPASS # Don't cache for fresh results
)
result = await crawler.arun(
url="https://github.com/search/advanced",
config=search_config
)
if not result.success:
print("❌ Failed to search GitHub")
print(f"Error: {result.error_message}")
return
print("✅ Search and extraction completed successfully!")
# Extract and save results
if result.extracted_content:
repositories = json.loads(result.extracted_content)
print(f"\n🔍 Found {len(repositories)} repositories matching criteria")
# Save results
self.results_path.write_text(
json.dumps(repositories, indent=2)
)
print(f"💾 Results saved to: {self.results_path}")
# Print sample results
print("\n📊 Sample Results:")
for i, repo in enumerate(repositories[:5], 1):
print(f"\n{i}. {repo.get('owner', 'Unknown')}/{repo.get('name', 'Unknown')}")
print(f" Description: {repo.get('description', 'No description')[:80]}...")
print(f" Language: {repo.get('language', 'Unknown')}")
print(f" Stars: {repo.get('stars', 'Unknown')}")
print(f" Updated: {repo.get('last_updated', 'Unknown')}")
if repo.get('topics'):
print(f" Topics: {', '.join(repo['topics'][:5])}")
print(f" URL: {repo.get('url', 'Unknown')}")
else:
print("❌ No repositories extracted")
# Save screenshot for reference
if result.screenshot:
screenshot_path = self.base_dir / "search_results_screenshot.png"
with open(screenshot_path, "wb") as f:
f.write(result.screenshot)
print(f"\n📸 Screenshot saved to: {screenshot_path}")
async def main():
"""Run the GitHub search scraper"""
scraper = GitHubSearchScraper()
await scraper.crawl_github()
print("\n🎉 GitHub search example completed!")
print("Check the generated files:")
print(" - generated_search_script.js")
print(" - generated_result_schema.json")
print(" - extracted_repositories.json")
print(" - search_results_screenshot.png")
if __name__ == "__main__":
asyncio.run(main()) | python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/nst_proxy/nstproxy_example.py | docs/examples/nst_proxy/nstproxy_example.py | """
NSTProxy Integration Examples for crawl4ai
------------------------------------------
NSTProxy is a premium residential proxy provider.
👉 Purchase Proxies: https://nstproxy.com
💰 Use coupon code "crawl4ai" for 10% off your plan.
"""
import asyncio
from crawl4ai import AsyncWebCrawler, BrowserConfig
async def main():
"""
Example: Using NSTProxy with AsyncWebCrawler.
"""
NST_TOKEN = "YOUR_NST_PROXY_TOKEN" # Get from https://app.nstproxy.com/profile
CHANNEL_ID = "YOUR_NST_PROXY_CHANNEL_ID" # Your NSTProxy Channel ID
browser_config = BrowserConfig()
browser_config.set_nstproxy(
token=NST_TOKEN,
channel_id=CHANNEL_ID,
country="ANY", # e.g. "US", "JP", or "ANY"
state="", # optional, leave empty if not needed
city="", # optional, leave empty if not needed
session_duration=0 # Session duration in minutes,0 = rotate on every request
)
# === Run crawler ===
async with AsyncWebCrawler(config=browser_config) as crawler:
result = await crawler.arun(url="https://example.com")
print("[Nstproxy] Status:", result.status_code)
if __name__ == "__main__":
asyncio.run(main()) | python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/nst_proxy/auth_proxy_example.py | docs/examples/nst_proxy/auth_proxy_example.py | """
NSTProxy Integration Examples for crawl4ai
------------------------------------------
NSTProxy is a premium residential proxy provider.
👉 Purchase Proxies: https://nstproxy.com
💰 Use coupon code "crawl4ai" for 10% off your plan.
"""
import asyncio
from crawl4ai import AsyncWebCrawler, BrowserConfig
async def main():
"""
Example: Use NSTProxy with manual username/password authentication.
"""
browser_config = BrowserConfig(proxy_config={
"server": "http://gate.nstproxy.io:24125",
"username": "your_username",
"password": "your_password",
})
async with AsyncWebCrawler(config=browser_config) as crawler:
result = await crawler.arun(url="https://example.com")
print("[Auth Proxy] Status:", result.status_code)
if __name__ == "__main__":
asyncio.run(main()) | python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/nst_proxy/api_proxy_example.py | docs/examples/nst_proxy/api_proxy_example.py | """
NSTProxy Integration Examples for crawl4ai
------------------------------------------
NSTProxy is a premium residential proxy provider.
👉 Purchase Proxies: https://nstproxy.com
💰 Use coupon code "crawl4ai" for 10% off your plan.
"""
import asyncio, requests
from crawl4ai import AsyncWebCrawler, BrowserConfig
async def main():
"""
Example: Dynamically fetch a proxy from NSTProxy API before crawling.
"""
NST_TOKEN = "YOUR_NST_PROXY_TOKEN" # Get from https://app.nstproxy.com/profile
CHANNEL_ID = "YOUR_NST_PROXY_CHANNEL_ID" # Your NSTProxy Channel ID
country = "ANY" # e.g. "ANY", "US", "DE"
# Fetch proxy from NSTProxy API
api_url = (
f"https://api.nstproxy.com/api/v1/generate/apiproxies"
f"?fType=2&channelId={CHANNEL_ID}&country={country}"
f"&protocol=http&sessionDuration=10&count=1&token={NST_TOKEN}"
)
response = requests.get(api_url, timeout=10).json()
proxy = response[0]
ip = proxy.get("ip")
port = proxy.get("port")
username = proxy.get("username", "")
password = proxy.get("password", "")
browser_config = BrowserConfig(proxy_config={
"server": f"http://{ip}:{port}",
"username": username,
"password": password,
})
async with AsyncWebCrawler(config=browser_config) as crawler:
result = await crawler.arun(url="https://example.com")
print("[API Proxy] Status:", result.status_code)
if __name__ == "__main__":
asyncio.run(main()) | python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/nst_proxy/basic_proxy_example.py | docs/examples/nst_proxy/basic_proxy_example.py | """
NSTProxy Integration Examples for crawl4ai
------------------------------------------
NSTProxy is a premium residential proxy provider.
👉 Purchase Proxies: https://nstproxy.com
💰 Use coupon code "crawl4ai" for 10% off your plan.
"""
import asyncio
from crawl4ai import AsyncWebCrawler, BrowserConfig
async def main():
# Using HTTP proxy
browser_config = BrowserConfig(proxy_config={"server": "http://gate.nstproxy.io:24125"})
async with AsyncWebCrawler(config=browser_config) as crawler:
result = await crawler.arun(url="https://example.com")
print("[HTTP Proxy] Status:", result.status_code)
# Using SOCKS proxy
browser_config = BrowserConfig(proxy_config={"server": "socks5://gate.nstproxy.io:24125"})
async with AsyncWebCrawler(config=browser_config) as crawler:
result = await crawler.arun(url="https://example.com")
print("[SOCKS5 Proxy] Status:", result.status_code)
if __name__ == "__main__":
asyncio.run(main()) | python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/website-to-api/web_scraper_lib.py | docs/examples/website-to-api/web_scraper_lib.py | from crawl4ai import (
AsyncWebCrawler,
BrowserConfig,
CacheMode,
CrawlerRunConfig,
LLMConfig,
JsonCssExtractionStrategy,
LLMExtractionStrategy
)
import os
import json
import hashlib
from typing import Dict, Any, Optional, List
from litellm import completion
class ModelConfig:
"""Configuration for LLM models."""
def __init__(self, provider: str, api_token: str):
self.provider = provider
self.api_token = api_token
def to_dict(self) -> Dict[str, Any]:
return {
"provider": self.provider,
"api_token": self.api_token
}
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> 'ModelConfig':
return cls(
provider=data["provider"],
api_token=data["api_token"]
)
class WebScraperAgent:
"""
A mini library that converts any website into a structured data API.
Features:
1. Provide a URL and tell AI what data you need in plain English
2. Generate: Agent reverse-engineers the site and deploys custom scraper
3. Integrate: Use private API endpoint to get structured data
4. Support for custom LLM models and API keys
"""
def __init__(self, schemas_dir: str = "schemas", models_dir: str = "models"):
self.schemas_dir = schemas_dir
self.models_dir = models_dir
os.makedirs(self.schemas_dir, exist_ok=True)
os.makedirs(self.models_dir, exist_ok=True)
def _generate_schema_key(self, url: str, query: str) -> str:
"""Generate a unique key for schema caching based on URL and query."""
content = f"{url}:{query}"
return hashlib.md5(content.encode()).hexdigest()
def save_model_config(self, model_name: str, provider: str, api_token: str) -> bool:
"""
Save a model configuration for later use.
Args:
model_name: User-friendly name for the model
provider: LLM provider (e.g., 'gemini', 'openai', 'anthropic')
api_token: API token for the provider
Returns:
True if saved successfully
"""
try:
model_config = ModelConfig(provider, api_token)
config_path = os.path.join(self.models_dir, f"{model_name}.json")
with open(config_path, "w") as f:
json.dump(model_config.to_dict(), f, indent=2)
print(f"Model configuration saved: {model_name}")
return True
except Exception as e:
print(f"Failed to save model configuration: {e}")
return False
def load_model_config(self, model_name: str) -> Optional[ModelConfig]:
"""
Load a saved model configuration.
Args:
model_name: Name of the saved model configuration
Returns:
ModelConfig object or None if not found
"""
try:
config_path = os.path.join(self.models_dir, f"{model_name}.json")
if not os.path.exists(config_path):
return None
with open(config_path, "r") as f:
data = json.load(f)
return ModelConfig.from_dict(data)
except Exception as e:
print(f"Failed to load model configuration: {e}")
return None
def list_saved_models(self) -> List[str]:
"""List all saved model configurations."""
models = []
for filename in os.listdir(self.models_dir):
if filename.endswith('.json'):
models.append(filename[:-5]) # Remove .json extension
return models
def delete_model_config(self, model_name: str) -> bool:
"""
Delete a saved model configuration.
Args:
model_name: Name of the model configuration to delete
Returns:
True if deleted successfully
"""
try:
config_path = os.path.join(self.models_dir, f"{model_name}.json")
if os.path.exists(config_path):
os.remove(config_path)
print(f"Model configuration deleted: {model_name}")
return True
return False
except Exception as e:
print(f"Failed to delete model configuration: {e}")
return False
async def _load_or_generate_schema(self, url: str, query: str, session_id: str = "schema_generator", model_name: Optional[str] = None) -> Dict[str, Any]:
"""
Loads schema from cache if exists, otherwise generates using AI.
This is the "Generate" step - our agent reverse-engineers the site.
Args:
url: URL to scrape
query: Query for data extraction
session_id: Session identifier
model_name: Name of saved model configuration to use
"""
schema_key = self._generate_schema_key(url, query)
schema_path = os.path.join(self.schemas_dir, f"{schema_key}.json")
if os.path.exists(schema_path):
print(f"Schema found in cache for {url}")
with open(schema_path, "r") as f:
return json.load(f)
print(f"Generating new schema for {url}")
print(f"Query: {query}")
query += """
IMPORTANT:
GENERATE THE SCHEMA WITH ONLY THE FIELDS MENTIONED IN THE QUERY. MAKE SURE THE NUMBER OF FIELDS IN THE SCHEME MATCH THE NUMBER OF FIELDS IN THE QUERY.
"""
# Step 1: Fetch the page HTML
async with AsyncWebCrawler(config=BrowserConfig(headless=True)) as crawler:
result = await crawler.arun(
url=url,
config=CrawlerRunConfig(
cache_mode=CacheMode.BYPASS,
session_id=session_id,
simulate_user=True,
remove_overlay_elements=True,
delay_before_return_html=5,
)
)
html = result.fit_html
# Step 2: Generate schema using AI with custom model if specified
print("AI is analyzing the page structure...")
# Use custom model configuration if provided
if model_name:
model_config = self.load_model_config(model_name)
if model_config:
llm_config = LLMConfig(
provider=model_config.provider,
api_token=model_config.api_token
)
print(f"Using custom model: {model_name}")
else:
raise ValueError(f"Model configuration '{model_name}' not found. Please add it from the Models page.")
else:
# Require a model to be specified
raise ValueError("No model specified. Please select a model from the dropdown or add one from the Models page.")
schema = JsonCssExtractionStrategy.generate_schema(
html=html,
llm_config=llm_config,
query=query
)
# Step 3: Cache the generated schema
print(f"Schema generated and cached: {json.dumps(schema, indent=2)}")
with open(schema_path, "w") as f:
json.dump(schema, f, indent=2)
return schema
def _generate_llm_schema(self, query: str, llm_config: LLMConfig) -> Dict[str, Any]:
"""
Generate a schema for a given query using a custom LLM model.
Args:
query: Plain English description of what data to extract
model_config: Model configuration to use
"""
# ask the model to generate a schema for the given query in the form of a json.
prompt = f"""
IDENTIFY THE FIELDS FOR EXTRACTION MENTIONED IN THE QUERY and GENERATE A JSON SCHEMA FOR THE FIELDS.
eg.
{{
"name": "str",
"age": "str",
"email": "str",
"product_name": "str",
"product_price": "str",
"product_description": "str",
"product_image": "str",
"product_url": "str",
"product_rating": "str",
"product_reviews": "str",
}}
Here is the query:
{query}
IMPORTANT:
THE RESULT SHOULD BE A JSON OBJECT.
MAKE SURE THE NUMBER OF FIELDS IN THE RESULT MATCH THE NUMBER OF FIELDS IN THE QUERY.
THE RESULT SHOULD BE A JSON OBJECT.
"""
response = completion(
model=llm_config.provider,
messages=[{"role": "user", "content": prompt}],
api_key=llm_config.api_token,
result_type="json"
)
return response.json()["choices"][0]["message"]["content"]
async def scrape_data_with_llm(self, url: str, query: str, model_name: Optional[str] = None) -> Dict[str, Any]:
"""
Scrape structured data from any website using a custom LLM model.
Args:
url: The website URL to scrape
query: Plain English description of what data to extract
model_name: Name of saved model configuration to use
"""
if model_name:
model_config = self.load_model_config(model_name)
if model_config:
llm_config = LLMConfig(
provider=model_config.provider,
api_token=model_config.api_token
)
print(f"Using custom model: {model_name}")
else:
raise ValueError(f"Model configuration '{model_name}' not found. Please add it from the Models page.")
else:
# Require a model to be specified
raise ValueError("No model specified. Please select a model from the dropdown or add one from the Models page.")
query += """\n
IMPORTANT:
THE RESULT SHOULD BE A JSON OBJECT WITH THE ONLY THE FIELDS MENTIONED IN THE QUERY.
MAKE SURE THE NUMBER OF FIELDS IN THE RESULT MATCH THE NUMBER OF FIELDS IN THE QUERY.
THE RESULT SHOULD BE A JSON OBJECT.
"""
schema = self._generate_llm_schema(query, llm_config)
print(f"Schema: {schema}")
llm_extraction_strategy = LLMExtractionStrategy(
llm_config=llm_config,
instruction=query,
result_type="json",
schema=schema
)
async with AsyncWebCrawler() as crawler:
result = await crawler.arun(
url=url,
config=CrawlerRunConfig(
cache_mode=CacheMode.BYPASS,
simulate_user=True,
extraction_strategy=llm_extraction_strategy,
)
)
extracted_data = result.extracted_content
if isinstance(extracted_data, str):
try:
extracted_data = json.loads(extracted_data)
except json.JSONDecodeError:
# If it's not valid JSON, keep it as string
pass
return {
"url": url,
"query": query,
"extracted_data": extracted_data,
"timestamp": result.timestamp if hasattr(result, 'timestamp') else None
}
async def scrape_data(self, url: str, query: str, model_name: Optional[str] = None) -> Dict[str, Any]:
"""
Main method to scrape structured data from any website.
Args:
url: The website URL to scrape
query: Plain English description of what data to extract
model_name: Name of saved model configuration to use
Returns:
Structured data extracted from the website
"""
# Step 1: Generate or load schema (reverse-engineer the site)
schema = await self._load_or_generate_schema(url=url, query=query, model_name=model_name)
# Step 2: Deploy custom high-speed scraper
print(f"Deploying custom scraper for {url}")
browser_config = BrowserConfig(headless=True)
async with AsyncWebCrawler(config=browser_config) as crawler:
run_config = CrawlerRunConfig(
extraction_strategy=JsonCssExtractionStrategy(schema=schema),
)
result = await crawler.arun(url=url, config=run_config)
# Step 3: Return structured data
# Parse extracted_content if it's a JSON string
extracted_data = result.extracted_content
if isinstance(extracted_data, str):
try:
extracted_data = json.loads(extracted_data)
except json.JSONDecodeError:
# If it's not valid JSON, keep it as string
pass
return {
"url": url,
"query": query,
"extracted_data": extracted_data,
"schema_used": schema,
"timestamp": result.timestamp if hasattr(result, 'timestamp') else None
}
async def get_cached_schemas(self) -> Dict[str, str]:
"""Get list of cached schemas."""
schemas = {}
for filename in os.listdir(self.schemas_dir):
if filename.endswith('.json'):
schema_key = filename[:-5] # Remove .json extension
schemas[schema_key] = filename
return schemas
def clear_cache(self):
"""Clear all cached schemas."""
import shutil
if os.path.exists(self.schemas_dir):
shutil.rmtree(self.schemas_dir)
os.makedirs(self.schemas_dir, exist_ok=True)
print("Schema cache cleared")
# Convenience function for simple usage
async def scrape_website(url: str, query: str, model_name: Optional[str] = None) -> Dict[str, Any]:
"""
Simple function to scrape any website with plain English instructions.
Args:
url: Website URL
query: Plain English description of what data to extract
model_name: Name of saved model configuration to use
Returns:
Extracted structured data
"""
agent = WebScraperAgent()
return await agent.scrape_data(url, query, model_name)
async def scrape_website_with_llm(url: str, query: str, model_name: Optional[str] = None):
"""
Scrape structured data from any website using a custom LLM model.
Args:
url: The website URL to scrape
query: Plain English description of what data to extract
model_name: Name of saved model configuration to use
"""
agent = WebScraperAgent()
return await agent.scrape_data_with_llm(url, query, model_name) | python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/website-to-api/test_models.py | docs/examples/website-to-api/test_models.py | #!/usr/bin/env python3
"""
Test script for the new model management functionality.
This script demonstrates how to save and use custom model configurations.
"""
import asyncio
import requests
import json
# API base URL
BASE_URL = "http://localhost:8000"
def test_model_management():
"""Test the model management endpoints."""
print("=== Testing Model Management ===")
# 1. List current models
print("\n1. Listing current models:")
response = requests.get(f"{BASE_URL}/models")
print(f"Status: {response.status_code}")
print(f"Response: {json.dumps(response.json(), indent=2)}")
# 2. Save another model configuration (OpenAI example)
print("\n2. Saving OpenAI model configuration:")
openai_config = {
"model_name": "my-openai",
"provider": "openai",
"api_token": "your-openai-api-key-here"
}
response = requests.post(f"{BASE_URL}/models", json=openai_config)
print(f"Status: {response.status_code}")
print(f"Response: {json.dumps(response.json(), indent=2)}")
# 3. List models again to see the new ones
print("\n3. Listing models after adding new ones:")
response = requests.get(f"{BASE_URL}/models")
print(f"Status: {response.status_code}")
print(f"Response: {json.dumps(response.json(), indent=2)}")
# 4. Delete a model configuration
print("\n4. Deleting a model configuration:")
response = requests.delete(f"{BASE_URL}/models/my-openai")
print(f"Status: {response.status_code}")
print(f"Response: {json.dumps(response.json(), indent=2)}")
# 5. Final list of models
print("\n5. Final list of models:")
response = requests.get(f"{BASE_URL}/models")
print(f"Status: {response.status_code}")
print(f"Response: {json.dumps(response.json(), indent=2)}")
if __name__ == "__main__":
print("Model Management Test Script")
print("Make sure the API server is running on http://localhost:8000")
print("=" * 50)
try:
test_model_management()
except requests.exceptions.ConnectionError:
print("Error: Could not connect to the API server.")
print("Make sure the server is running with: python api_server.py")
except Exception as e:
print(f"Error: {e}") | python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/website-to-api/test_api.py | docs/examples/website-to-api/test_api.py | import asyncio
from web_scraper_lib import scrape_website
import os
async def test_library():
"""Test the mini library directly."""
print("=== Testing Mini Library ===")
# Test 1: Scrape with a custom model
url = "https://marketplace.mainstreet.co.in/collections/adidas-yeezy/products/adidas-yeezy-boost-350-v2-yecheil-non-reflective"
query = "Extract the following data: Product name, Product price, Product description, Product size. DO NOT EXTRACT ANYTHING ELSE."
if os.path.exists("models"):
model_name = os.listdir("models")[0].split(".")[0]
else:
raise Exception("No models found in models directory")
print(f"Scraping: {url}")
print(f"Query: {query}")
try:
result = await scrape_website(url, query, model_name)
print("✅ Library test successful!")
print(f"Extracted data: {result['extracted_data']}")
except Exception as e:
print(f"❌ Library test failed: {e}")
if __name__ == "__main__":
asyncio.run(test_library()) | python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/website-to-api/api_server.py | docs/examples/website-to-api/api_server.py | from fastapi import FastAPI, HTTPException
from fastapi.staticfiles import StaticFiles
from fastapi.responses import FileResponse
from pydantic import BaseModel, HttpUrl
from typing import Dict, Any, Optional, Union, List
import uvicorn
import asyncio
import os
import json
from datetime import datetime
from web_scraper_lib import WebScraperAgent, scrape_website
app = FastAPI(
title="Web Scraper API",
description="Convert any website into a structured data API. Provide a URL and tell AI what data you need in plain English.",
version="1.0.0"
)
# Mount static files
if os.path.exists("static"):
app.mount("/static", StaticFiles(directory="static"), name="static")
# Mount assets directory
if os.path.exists("assets"):
app.mount("/assets", StaticFiles(directory="assets"), name="assets")
# Initialize the scraper agent
scraper_agent = WebScraperAgent()
# Create directory for saved API requests
os.makedirs("saved_requests", exist_ok=True)
class ScrapeRequest(BaseModel):
url: HttpUrl
query: str
model_name: Optional[str] = None
class ModelConfigRequest(BaseModel):
model_name: str
provider: str
api_token: str
class ScrapeResponse(BaseModel):
success: bool
url: str
query: str
extracted_data: Union[Dict[str, Any], list]
schema_used: Optional[Dict[str, Any]] = None
timestamp: Optional[str] = None
error: Optional[str] = None
class SavedApiRequest(BaseModel):
id: str
endpoint: str
method: str
headers: Dict[str, str]
body: Dict[str, Any]
timestamp: str
response: Optional[Dict[str, Any]] = None
def save_api_request(endpoint: str, method: str, headers: Dict[str, str], body: Dict[str, Any], response: Optional[Dict[str, Any]] = None) -> str:
"""Save an API request to a JSON file."""
# Check for duplicate requests (same URL and query)
if endpoint in ["/scrape", "/scrape-with-llm"] and "url" in body and "query" in body:
existing_requests = get_saved_requests()
for existing_request in existing_requests:
if (existing_request.endpoint == endpoint and
existing_request.body.get("url") == body["url"] and
existing_request.body.get("query") == body["query"]):
print(f"Duplicate request found for URL: {body['url']} and query: {body['query']}")
return existing_request.id # Return existing request ID instead of creating new one
request_id = datetime.now().strftime("%Y%m%d_%H%M%S_%f")[:-3]
saved_request = SavedApiRequest(
id=request_id,
endpoint=endpoint,
method=method,
headers=headers,
body=body,
timestamp=datetime.now().isoformat(),
response=response
)
file_path = os.path.join("saved_requests", f"{request_id}.json")
with open(file_path, "w") as f:
json.dump(saved_request.dict(), f, indent=2)
return request_id
def get_saved_requests() -> List[SavedApiRequest]:
"""Get all saved API requests."""
requests = []
if os.path.exists("saved_requests"):
for filename in os.listdir("saved_requests"):
if filename.endswith('.json'):
file_path = os.path.join("saved_requests", filename)
try:
with open(file_path, "r") as f:
data = json.load(f)
requests.append(SavedApiRequest(**data))
except Exception as e:
print(f"Error loading saved request {filename}: {e}")
# Sort by timestamp (newest first)
requests.sort(key=lambda x: x.timestamp, reverse=True)
return requests
@app.get("/")
async def root():
"""Serve the frontend interface."""
if os.path.exists("static/index.html"):
return FileResponse("static/index.html")
else:
return {
"message": "Web Scraper API",
"description": "Convert any website into structured data with AI",
"endpoints": {
"/scrape": "POST - Scrape data from a website",
"/schemas": "GET - List cached schemas",
"/clear-cache": "POST - Clear schema cache",
"/models": "GET - List saved model configurations",
"/models": "POST - Save a new model configuration",
"/models/{model_name}": "DELETE - Delete a model configuration",
"/saved-requests": "GET - List saved API requests"
}
}
@app.post("/scrape", response_model=ScrapeResponse)
async def scrape_website_endpoint(request: ScrapeRequest):
"""
Scrape structured data from any website.
This endpoint:
1. Takes a URL and plain English query
2. Generates a custom scraper using AI
3. Returns structured data
"""
try:
# Save the API request
headers = {"Content-Type": "application/json"}
body = {
"url": str(request.url),
"query": request.query,
"model_name": request.model_name
}
result = await scraper_agent.scrape_data(
url=str(request.url),
query=request.query,
model_name=request.model_name
)
response_data = ScrapeResponse(
success=True,
url=result["url"],
query=result["query"],
extracted_data=result["extracted_data"],
schema_used=result["schema_used"],
timestamp=result["timestamp"]
)
# Save the request with response
save_api_request(
endpoint="/scrape",
method="POST",
headers=headers,
body=body,
response=response_data.dict()
)
return response_data
except Exception as e:
# Save the failed request
headers = {"Content-Type": "application/json"}
body = {
"url": str(request.url),
"query": request.query,
"model_name": request.model_name
}
save_api_request(
endpoint="/scrape",
method="POST",
headers=headers,
body=body,
response={"error": str(e)}
)
raise HTTPException(status_code=500, detail=f"Scraping failed: {str(e)}")
@app.post("/scrape-with-llm", response_model=ScrapeResponse)
async def scrape_website_endpoint_with_llm(request: ScrapeRequest):
"""
Scrape structured data from any website using a custom LLM model.
"""
try:
# Save the API request
headers = {"Content-Type": "application/json"}
body = {
"url": str(request.url),
"query": request.query,
"model_name": request.model_name
}
result = await scraper_agent.scrape_data_with_llm(
url=str(request.url),
query=request.query,
model_name=request.model_name
)
response_data = ScrapeResponse(
success=True,
url=result["url"],
query=result["query"],
extracted_data=result["extracted_data"],
timestamp=result["timestamp"]
)
# Save the request with response
save_api_request(
endpoint="/scrape-with-llm",
method="POST",
headers=headers,
body=body,
response=response_data.dict()
)
return response_data
except Exception as e:
# Save the failed request
headers = {"Content-Type": "application/json"}
body = {
"url": str(request.url),
"query": request.query,
"model_name": request.model_name
}
save_api_request(
endpoint="/scrape-with-llm",
method="POST",
headers=headers,
body=body,
response={"error": str(e)}
)
raise HTTPException(status_code=500, detail=f"Scraping failed: {str(e)}")
@app.get("/saved-requests")
async def list_saved_requests():
"""List all saved API requests."""
try:
requests = get_saved_requests()
return {
"success": True,
"requests": [req.dict() for req in requests],
"count": len(requests)
}
except Exception as e:
raise HTTPException(status_code=500, detail=f"Failed to list saved requests: {str(e)}")
@app.delete("/saved-requests/{request_id}")
async def delete_saved_request(request_id: str):
"""Delete a saved API request."""
try:
file_path = os.path.join("saved_requests", f"{request_id}.json")
if os.path.exists(file_path):
os.remove(file_path)
return {
"success": True,
"message": f"Saved request '{request_id}' deleted successfully"
}
else:
raise HTTPException(status_code=404, detail=f"Saved request '{request_id}' not found")
except Exception as e:
raise HTTPException(status_code=500, detail=f"Failed to delete saved request: {str(e)}")
@app.get("/schemas")
async def list_cached_schemas():
"""List all cached schemas."""
try:
schemas = await scraper_agent.get_cached_schemas()
return {
"success": True,
"cached_schemas": schemas,
"count": len(schemas)
}
except Exception as e:
raise HTTPException(status_code=500, detail=f"Failed to list schemas: {str(e)}")
@app.post("/clear-cache")
async def clear_schema_cache():
"""Clear all cached schemas."""
try:
scraper_agent.clear_cache()
return {
"success": True,
"message": "Schema cache cleared successfully"
}
except Exception as e:
raise HTTPException(status_code=500, detail=f"Failed to clear cache: {str(e)}")
@app.get("/models")
async def list_models():
"""List all saved model configurations."""
try:
models = scraper_agent.list_saved_models()
return {
"success": True,
"models": models,
"count": len(models)
}
except Exception as e:
raise HTTPException(status_code=500, detail=f"Failed to list models: {str(e)}")
@app.post("/models")
async def save_model_config(request: ModelConfigRequest):
"""Save a new model configuration."""
try:
success = scraper_agent.save_model_config(
model_name=request.model_name,
provider=request.provider,
api_token=request.api_token
)
if success:
return {
"success": True,
"message": f"Model configuration '{request.model_name}' saved successfully"
}
else:
raise HTTPException(status_code=500, detail="Failed to save model configuration")
except Exception as e:
raise HTTPException(status_code=500, detail=f"Failed to save model: {str(e)}")
@app.delete("/models/{model_name}")
async def delete_model_config(model_name: str):
"""Delete a model configuration."""
try:
success = scraper_agent.delete_model_config(model_name)
if success:
return {
"success": True,
"message": f"Model configuration '{model_name}' deleted successfully"
}
else:
raise HTTPException(status_code=404, detail=f"Model configuration '{model_name}' not found")
except Exception as e:
raise HTTPException(status_code=500, detail=f"Failed to delete model: {str(e)}")
@app.get("/health")
async def health_check():
"""Health check endpoint."""
return {"status": "healthy", "service": "web-scraper-api"}
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000) | python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/website-to-api/app.py | docs/examples/website-to-api/app.py | #!/usr/bin/env python3
"""
Startup script for the Web Scraper API with frontend interface.
"""
import os
import sys
import uvicorn
from pathlib import Path
def main():
# Check if static directory exists
static_dir = Path("static")
if not static_dir.exists():
print("❌ Static directory not found!")
print("Please make sure the 'static' directory exists with the frontend files.")
sys.exit(1)
# Check if required frontend files exist
required_files = ["index.html", "styles.css", "script.js"]
missing_files = []
for file in required_files:
if not (static_dir / file).exists():
missing_files.append(file)
if missing_files:
print(f"❌ Missing frontend files: {', '.join(missing_files)}")
print("Please make sure all frontend files are present in the static directory.")
sys.exit(1)
print("🚀 Starting Web Scraper API with Frontend Interface")
print("=" * 50)
print("📁 Static files found and ready to serve")
print("🌐 Frontend will be available at: http://localhost:8000")
print("🔌 API endpoints available at: http://localhost:8000/docs")
print("=" * 50)
# Start the server
uvicorn.run(
"api_server:app",
host="0.0.0.0",
port=8000,
reload=True,
log_level="info"
)
if __name__ == "__main__":
main() | python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/cloud_browser/scrapeless_browser.py | docs/examples/cloud_browser/scrapeless_browser.py | import json
import asyncio
from urllib.parse import quote, urlencode
from crawl4ai import CrawlerRunConfig, BrowserConfig, AsyncWebCrawler
# Scrapeless provides a free anti-detection fingerprint browser client and cloud browsers:
# https://www.scrapeless.com/en/blog/scrapeless-nstbrowser-strategic-integration
async def main():
# customize browser fingerprint
fingerprint = {
"userAgent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.1.2.3 Safari/537.36",
"platform": "Windows",
"screen": {
"width": 1280, "height": 1024
},
"localization": {
"languages": ["zh-HK", "en-US", "en"], "timezone": "Asia/Hong_Kong",
}
}
fingerprint_json = json.dumps(fingerprint)
encoded_fingerprint = quote(fingerprint_json)
scrapeless_params = {
"token": "your token",
"sessionTTL": 1000,
"sessionName": "Demo",
"fingerprint": encoded_fingerprint,
# Sets the target country/region for the proxy, sending requests via an IP address from that region. You can specify a country code (e.g., US for the United States, GB for the United Kingdom, ANY for any country). See country codes for all supported options.
# "proxyCountry": "ANY",
# create profile on scrapeless
# "profileId": "your profileId",
# For more usage details, please refer to https://docs.scrapeless.com/en/scraping-browser/quickstart/getting-started
}
query_string = urlencode(scrapeless_params)
scrapeless_connection_url = f"wss://browser.scrapeless.com/api/v2/browser?{query_string}"
async with AsyncWebCrawler(
config=BrowserConfig(
headless=False,
browser_mode="cdp",
cdp_url=scrapeless_connection_url,
)
) as crawler:
result = await crawler.arun(
url="https://www.scrapeless.com/en",
config=CrawlerRunConfig(
wait_for="css:.content",
scan_full_page=True,
),
)
print("-" * 20)
print(f'Status Code: {result.status_code}')
print("-" * 20)
print(f'Title: {result.metadata["title"]}')
print(f'Description: {result.metadata["description"]}')
print("-" * 20)
if __name__ == "__main__":
asyncio.run(main())
| python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/markdown/content_source_example.py | docs/examples/markdown/content_source_example.py | """
Example showing how to use the content_source parameter to control HTML input for markdown generation.
"""
import asyncio
from crawl4ai import AsyncWebCrawler, CrawlerRunConfig, DefaultMarkdownGenerator
async def demo_content_source():
"""Demonstrates different content_source options for markdown generation."""
url = "https://example.com" # Simple demo site
print("Crawling with different content_source options...")
# --- Example 1: Default Behavior (cleaned_html) ---
# This uses the HTML after it has been processed by the scraping strategy
# The HTML is cleaned, simplified, and optimized for readability
default_generator = DefaultMarkdownGenerator() # content_source="cleaned_html" is default
default_config = CrawlerRunConfig(markdown_generator=default_generator)
# --- Example 2: Raw HTML ---
# This uses the original HTML directly from the webpage
# Preserves more original content but may include navigation, ads, etc.
raw_generator = DefaultMarkdownGenerator(content_source="raw_html")
raw_config = CrawlerRunConfig(markdown_generator=raw_generator)
# --- Example 3: Fit HTML ---
# This uses preprocessed HTML optimized for schema extraction
# Better for structured data extraction but may lose some formatting
fit_generator = DefaultMarkdownGenerator(content_source="fit_html")
fit_config = CrawlerRunConfig(markdown_generator=fit_generator)
# Execute all three crawlers in sequence
async with AsyncWebCrawler() as crawler:
# Default (cleaned_html)
result_default = await crawler.arun(url=url, config=default_config)
# Raw HTML
result_raw = await crawler.arun(url=url, config=raw_config)
# Fit HTML
result_fit = await crawler.arun(url=url, config=fit_config)
# Print a summary of the results
print("\nMarkdown Generation Results:\n")
print("1. Default (cleaned_html):")
print(f" Length: {len(result_default.markdown.raw_markdown)} chars")
print(f" First 80 chars: {result_default.markdown.raw_markdown[:80]}...\n")
print("2. Raw HTML:")
print(f" Length: {len(result_raw.markdown.raw_markdown)} chars")
print(f" First 80 chars: {result_raw.markdown.raw_markdown[:80]}...\n")
print("3. Fit HTML:")
print(f" Length: {len(result_fit.markdown.raw_markdown)} chars")
print(f" First 80 chars: {result_fit.markdown.raw_markdown[:80]}...\n")
# Demonstrate differences in output
print("\nKey Takeaways:")
print("- cleaned_html: Best for readable, focused content")
print("- raw_html: Preserves more original content, but may include noise")
print("- fit_html: Optimized for schema extraction and structured data")
if __name__ == "__main__":
asyncio.run(demo_content_source()) | python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/markdown/content_source_short_example.py | docs/examples/markdown/content_source_short_example.py | """
Example demonstrating how to use the content_source parameter in MarkdownGenerationStrategy
"""
import asyncio
from crawl4ai import AsyncWebCrawler, CrawlerRunConfig, DefaultMarkdownGenerator
async def demo_markdown_source_config():
print("\n=== Demo: Configuring Markdown Source ===")
# Example 1: Generate markdown from cleaned HTML (default behavior)
cleaned_md_generator = DefaultMarkdownGenerator(content_source="cleaned_html")
config_cleaned = CrawlerRunConfig(markdown_generator=cleaned_md_generator)
async with AsyncWebCrawler() as crawler:
result_cleaned = await crawler.arun(url="https://example.com", config=config_cleaned)
print("Markdown from Cleaned HTML (default):")
print(f" Length: {len(result_cleaned.markdown.raw_markdown)}")
print(f" Start: {result_cleaned.markdown.raw_markdown[:100]}...")
# Example 2: Generate markdown directly from raw HTML
raw_md_generator = DefaultMarkdownGenerator(content_source="raw_html")
config_raw = CrawlerRunConfig(markdown_generator=raw_md_generator)
async with AsyncWebCrawler() as crawler:
result_raw = await crawler.arun(url="https://example.com", config=config_raw)
print("\nMarkdown from Raw HTML:")
print(f" Length: {len(result_raw.markdown.raw_markdown)}")
print(f" Start: {result_raw.markdown.raw_markdown[:100]}...")
# Example 3: Generate markdown from preprocessed 'fit' HTML
fit_md_generator = DefaultMarkdownGenerator(content_source="fit_html")
config_fit = CrawlerRunConfig(markdown_generator=fit_md_generator)
async with AsyncWebCrawler() as crawler:
result_fit = await crawler.arun(url="https://example.com", config=config_fit)
print("\nMarkdown from Fit HTML:")
print(f" Length: {len(result_fit.markdown.raw_markdown)}")
print(f" Start: {result_fit.markdown.raw_markdown[:100]}...")
if __name__ == "__main__":
asyncio.run(demo_markdown_source_config()) | python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/capsolver_captcha_solver/capsolver_extension_integration/solve_cloudflare_challenge.py | docs/examples/capsolver_captcha_solver/capsolver_extension_integration/solve_cloudflare_challenge.py | import time
import asyncio
from crawl4ai import *
# TODO: the user data directory that includes the capsolver extension
user_data_dir = "/browser-profile/Default1"
"""
The capsolver extension supports more features, such as:
- Telling the extension when to start solving captcha.
- Calling functions to check whether the captcha has been solved, etc.
Reference blog: https://docs.capsolver.com/guide/automation-tool-integration/
"""
browser_config = BrowserConfig(
verbose=True,
headless=False,
user_data_dir=user_data_dir,
use_persistent_context=True,
)
async def main():
async with AsyncWebCrawler(config=browser_config) as crawler:
result_initial = await crawler.arun(
url="https://gitlab.com/users/sign_in",
cache_mode=CacheMode.BYPASS,
session_id="session_captcha_test"
)
# do something later
time.sleep(300)
if __name__ == "__main__":
asyncio.run(main())
| python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/capsolver_captcha_solver/capsolver_extension_integration/solve_aws_waf.py | docs/examples/capsolver_captcha_solver/capsolver_extension_integration/solve_aws_waf.py | import time
import asyncio
from crawl4ai import *
# TODO: the user data directory that includes the capsolver extension
user_data_dir = "/browser-profile/Default1"
"""
The capsolver extension supports more features, such as:
- Telling the extension when to start solving captcha.
- Calling functions to check whether the captcha has been solved, etc.
Reference blog: https://docs.capsolver.com/guide/automation-tool-integration/
"""
browser_config = BrowserConfig(
verbose=True,
headless=False,
user_data_dir=user_data_dir,
use_persistent_context=True,
)
async def main():
async with AsyncWebCrawler(config=browser_config) as crawler:
result_initial = await crawler.arun(
url="https://nft.porsche.com/onboarding@6",
cache_mode=CacheMode.BYPASS,
session_id="session_captcha_test"
)
# do something later
time.sleep(300)
if __name__ == "__main__":
asyncio.run(main())
| python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/capsolver_captcha_solver/capsolver_extension_integration/solve_recaptcha_v2.py | docs/examples/capsolver_captcha_solver/capsolver_extension_integration/solve_recaptcha_v2.py | import time
import asyncio
from crawl4ai import *
# TODO: the user data directory that includes the capsolver extension
user_data_dir = "/browser-profile/Default1"
"""
The capsolver extension supports more features, such as:
- Telling the extension when to start solving captcha.
- Calling functions to check whether the captcha has been solved, etc.
Reference blog: https://docs.capsolver.com/guide/automation-tool-integration/
"""
browser_config = BrowserConfig(
verbose=True,
headless=False,
user_data_dir=user_data_dir,
use_persistent_context=True,
)
async def main():
async with AsyncWebCrawler(config=browser_config) as crawler:
result_initial = await crawler.arun(
url="https://recaptcha-demo.appspot.com/recaptcha-v2-checkbox.php",
cache_mode=CacheMode.BYPASS,
session_id="session_captcha_test"
)
# do something later
time.sleep(300)
if __name__ == "__main__":
asyncio.run(main())
| python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/capsolver_captcha_solver/capsolver_extension_integration/solve_recaptcha_v3.py | docs/examples/capsolver_captcha_solver/capsolver_extension_integration/solve_recaptcha_v3.py | import time
import asyncio
from crawl4ai import *
# TODO: the user data directory that includes the capsolver extension
user_data_dir = "/browser-profile/Default1"
"""
The capsolver extension supports more features, such as:
- Telling the extension when to start solving captcha.
- Calling functions to check whether the captcha has been solved, etc.
Reference blog: https://docs.capsolver.com/guide/automation-tool-integration/
"""
browser_config = BrowserConfig(
verbose=True,
headless=False,
user_data_dir=user_data_dir,
use_persistent_context=True,
)
async def main():
async with AsyncWebCrawler(config=browser_config) as crawler:
result_initial = await crawler.arun(
url="https://recaptcha-demo.appspot.com/recaptcha-v3-request-scores.php",
cache_mode=CacheMode.BYPASS,
session_id="session_captcha_test"
)
# do something later
time.sleep(300)
if __name__ == "__main__":
asyncio.run(main()) | python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/capsolver_captcha_solver/capsolver_extension_integration/solve_cloudflare_turnstile.py | docs/examples/capsolver_captcha_solver/capsolver_extension_integration/solve_cloudflare_turnstile.py | import time
import asyncio
from crawl4ai import *
# TODO: the user data directory that includes the capsolver extension
user_data_dir = "/browser-profile/Default1"
"""
The capsolver extension supports more features, such as:
- Telling the extension when to start solving captcha.
- Calling functions to check whether the captcha has been solved, etc.
Reference blog: https://docs.capsolver.com/guide/automation-tool-integration/
"""
browser_config = BrowserConfig(
verbose=True,
headless=False,
user_data_dir=user_data_dir,
use_persistent_context=True,
)
async def main():
async with AsyncWebCrawler(config=browser_config) as crawler:
result_initial = await crawler.arun(
url="https://clifford.io/demo/cloudflare-turnstile",
cache_mode=CacheMode.BYPASS,
session_id="session_captcha_test"
)
# do something later
time.sleep(300)
if __name__ == "__main__":
asyncio.run(main())
| python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/capsolver_captcha_solver/capsolver_api_integration/solve_cloudflare_challenge.py | docs/examples/capsolver_captcha_solver/capsolver_api_integration/solve_cloudflare_challenge.py | import asyncio
import capsolver
from crawl4ai import *
# TODO: set your config
# Docs: https://docs.capsolver.com/guide/captcha/cloudflare_challenge/
api_key = "CAP-xxxxxxxxxxxxxxxxxxxxx" # your api key of capsolver
site_url = "https://gitlab.com/users/sign_in" # page url of your target site
captcha_type = "AntiCloudflareTask" # type of your target captcha
# your http proxy to solve cloudflare challenge
proxy_server = "proxy.example.com:8080"
proxy_username = "myuser"
proxy_password = "mypass"
capsolver.api_key = api_key
async def main():
# get challenge cookie using capsolver sdk
solution = capsolver.solve({
"type": captcha_type,
"websiteURL": site_url,
"proxy": f"{proxy_server}:{proxy_username}:{proxy_password}",
})
cookies = solution["cookies"]
user_agent = solution["userAgent"]
print("challenge cookies:", cookies)
cookies_list = []
for name, value in cookies.items():
cookies_list.append({
"name": name,
"value": value,
"url": site_url,
})
browser_config = BrowserConfig(
verbose=True,
headless=False,
use_persistent_context=True,
user_agent=user_agent,
cookies=cookies_list,
proxy_config={
"server": f"http://{proxy_server}",
"username": proxy_username,
"password": proxy_password,
},
)
async with AsyncWebCrawler(config=browser_config) as crawler:
result = await crawler.arun(
url=site_url,
cache_mode=CacheMode.BYPASS,
session_id="session_captcha_test"
)
print(result.markdown)
if __name__ == "__main__":
asyncio.run(main())
| python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/capsolver_captcha_solver/capsolver_api_integration/solve_aws_waf.py | docs/examples/capsolver_captcha_solver/capsolver_api_integration/solve_aws_waf.py | import asyncio
import capsolver
from crawl4ai import *
# TODO: set your config
# Docs: https://docs.capsolver.com/guide/captcha/awsWaf/
api_key = "CAP-xxxxxxxxxxxxxxxxxxxxx" # your api key of capsolver
site_url = "https://nft.porsche.com/onboarding@6" # page url of your target site
cookie_domain = ".nft.porsche.com" # the domain name to which you want to apply the cookie
captcha_type = "AntiAwsWafTaskProxyLess" # type of your target captcha
capsolver.api_key = api_key
async def main():
browser_config = BrowserConfig(
verbose=True,
headless=False,
use_persistent_context=True,
)
async with AsyncWebCrawler(config=browser_config) as crawler:
await crawler.arun(
url=site_url,
cache_mode=CacheMode.BYPASS,
session_id="session_captcha_test"
)
# get aws waf cookie using capsolver sdk
solution = capsolver.solve({
"type": captcha_type,
"websiteURL": site_url,
})
cookie = solution["cookie"]
print("aws waf cookie:", cookie)
js_code = """
document.cookie = \'aws-waf-token=""" + cookie + """;domain=""" + cookie_domain + """;path=/\';
location.reload();
"""
wait_condition = """() => {
return document.title === \'Join Porsche’s journey into Web3\';
}"""
run_config = CrawlerRunConfig(
cache_mode=CacheMode.BYPASS,
session_id="session_captcha_test",
js_code=js_code,
js_only=True,
wait_for=f"js:{wait_condition}"
)
result_next = await crawler.arun(
url=site_url,
config=run_config,
)
print(result_next.markdown)
if __name__ == "__main__":
asyncio.run(main())
| python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/capsolver_captcha_solver/capsolver_api_integration/solve_recaptcha_v2.py | docs/examples/capsolver_captcha_solver/capsolver_api_integration/solve_recaptcha_v2.py | import asyncio
import capsolver
from crawl4ai import *
# TODO: set your config
# Docs: https://docs.capsolver.com/guide/captcha/ReCaptchaV2/
api_key = "CAP-xxxxxxxxxxxxxxxxxxxxx" # your api key of capsolver
site_key = "6LfW6wATAAAAAHLqO2pb8bDBahxlMxNdo9g947u9" # site key of your target site
site_url = "https://recaptcha-demo.appspot.com/recaptcha-v2-checkbox.php" # page url of your target site
captcha_type = "ReCaptchaV2TaskProxyLess" # type of your target captcha
capsolver.api_key = api_key
async def main():
browser_config = BrowserConfig(
verbose=True,
headless=False,
use_persistent_context=True,
)
async with AsyncWebCrawler(config=browser_config) as crawler:
await crawler.arun(
url=site_url,
cache_mode=CacheMode.BYPASS,
session_id="session_captcha_test"
)
# get recaptcha token using capsolver sdk
solution = capsolver.solve({
"type": captcha_type,
"websiteURL": site_url,
"websiteKey": site_key,
})
token = solution["gRecaptchaResponse"]
print("recaptcha token:", token)
js_code = """
const textarea = document.getElementById(\'g-recaptcha-response\');
if (textarea) {
textarea.value = \"""" + token + """\";
document.querySelector(\'button.form-field[type="submit"]\').click();
}
"""
wait_condition = """() => {
const items = document.querySelectorAll(\'h2\');
return items.length > 1;
}"""
run_config = CrawlerRunConfig(
cache_mode=CacheMode.BYPASS,
session_id="session_captcha_test",
js_code=js_code,
js_only=True,
wait_for=f"js:{wait_condition}"
)
result_next = await crawler.arun(
url=site_url,
config=run_config,
)
print(result_next.markdown)
if __name__ == "__main__":
asyncio.run(main())
| python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/capsolver_captcha_solver/capsolver_api_integration/solve_recaptcha_v3.py | docs/examples/capsolver_captcha_solver/capsolver_api_integration/solve_recaptcha_v3.py | import asyncio
import capsolver
from crawl4ai import *
# TODO: set your config
# Docs: https://docs.capsolver.com/guide/captcha/ReCaptchaV3/
api_key = "CAP-xxxxxxxxxxxxxxxxxxxxx" # your api key of capsolver
site_key = "6LdKlZEpAAAAAAOQjzC2v_d36tWxCl6dWsozdSy9" # site key of your target site
site_url = "https://recaptcha-demo.appspot.com/recaptcha-v3-request-scores.php" # page url of your target site
page_action = "examples/v3scores" # page action of your target site
captcha_type = "ReCaptchaV3TaskProxyLess" # type of your target captcha
capsolver.api_key = api_key
async def main():
browser_config = BrowserConfig(
verbose=True,
headless=False,
use_persistent_context=True,
)
# get recaptcha token using capsolver sdk
solution = capsolver.solve({
"type": captcha_type,
"websiteURL": site_url,
"websiteKey": site_key,
"pageAction": page_action,
})
token = solution["gRecaptchaResponse"]
print("recaptcha token:", token)
async with AsyncWebCrawler(config=browser_config) as crawler:
await crawler.arun(
url=site_url,
cache_mode=CacheMode.BYPASS,
session_id="session_captcha_test"
)
js_code = """
const originalFetch = window.fetch;
window.fetch = function(...args) {
if (typeof args[0] === 'string' && args[0].includes('/recaptcha-v3-verify.php')) {
const url = new URL(args[0], window.location.origin);
url.searchParams.set('action', '""" + token + """');
args[0] = url.toString();
document.querySelector('.token').innerHTML = "fetch('/recaptcha-v3-verify.php?action=examples/v3scores&token=""" + token + """')";
console.log('Fetch URL hooked:', args[0]);
}
return originalFetch.apply(this, args);
};
"""
wait_condition = """() => {
return document.querySelector('.step3:not(.hidden)');
}"""
run_config = CrawlerRunConfig(
cache_mode=CacheMode.BYPASS,
session_id="session_captcha_test",
js_code=js_code,
js_only=True,
wait_for=f"js:{wait_condition}"
)
result_next = await crawler.arun(
url=site_url,
config=run_config,
)
print(result_next.markdown)
if __name__ == "__main__":
asyncio.run(main())
| python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/capsolver_captcha_solver/capsolver_api_integration/solve_cloudflare_turnstile.py | docs/examples/capsolver_captcha_solver/capsolver_api_integration/solve_cloudflare_turnstile.py | import asyncio
import capsolver
from crawl4ai import *
# TODO: set your config
# Docs: https://docs.capsolver.com/guide/captcha/cloudflare_turnstile/
api_key = "CAP-xxxxxxxxxxxxxxxxxxxxx" # your api key of capsolver
site_key = "0x4AAAAAAAGlwMzq_9z6S9Mh" # site key of your target site
site_url = "https://clifford.io/demo/cloudflare-turnstile" # page url of your target site
captcha_type = "AntiTurnstileTaskProxyLess" # type of your target captcha
capsolver.api_key = api_key
async def main():
browser_config = BrowserConfig(
verbose=True,
headless=False,
use_persistent_context=True,
)
async with AsyncWebCrawler(config=browser_config) as crawler:
await crawler.arun(
url=site_url,
cache_mode=CacheMode.BYPASS,
session_id="session_captcha_test"
)
# get turnstile token using capsolver sdk
solution = capsolver.solve({
"type": captcha_type,
"websiteURL": site_url,
"websiteKey": site_key,
})
token = solution["token"]
print("turnstile token:", token)
js_code = """
document.querySelector(\'input[name="cf-turnstile-response"]\').value = \'"""+token+"""\';
document.querySelector(\'button[type="submit"]\').click();
"""
wait_condition = """() => {
const items = document.querySelectorAll(\'h1\');
return items.length === 0;
}"""
run_config = CrawlerRunConfig(
cache_mode=CacheMode.BYPASS,
session_id="session_captcha_test",
js_code=js_code,
js_only=True,
wait_for=f"js:{wait_condition}"
)
result_next = await crawler.arun(
url=site_url,
config=run_config,
)
print(result_next.markdown)
if __name__ == "__main__":
asyncio.run(main())
| python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/md_v2/marketplace/backend/dummy_data.py | docs/md_v2/marketplace/backend/dummy_data.py | import sqlite3
import json
import random
from datetime import datetime, timedelta
from database import DatabaseManager
def generate_slug(text):
return text.lower().replace(' ', '-').replace('&', 'and')
def generate_dummy_data():
db = DatabaseManager()
conn = db.conn
cursor = conn.cursor()
# Clear existing data
for table in ['apps', 'articles', 'categories', 'sponsors']:
cursor.execute(f"DELETE FROM {table}")
# Categories
categories = [
("Browser Automation", "⚙", "Tools for browser automation and control"),
("Proxy Services", "🔒", "Proxy providers and rotation services"),
("LLM Integration", "🤖", "AI/LLM tools and integrations"),
("Data Processing", "📊", "Data extraction and processing tools"),
("Cloud Infrastructure", "☁", "Cloud browser and computing services"),
("Developer Tools", "🛠", "Development and testing utilities")
]
for i, (name, icon, desc) in enumerate(categories):
cursor.execute("""
INSERT INTO categories (name, slug, icon, description, order_index)
VALUES (?, ?, ?, ?, ?)
""", (name, generate_slug(name), icon, desc, i))
# Apps with real Unsplash images
apps_data = [
# Browser Automation
("Playwright Cloud", "Browser Automation", "Paid", True, True,
"Scalable browser automation in the cloud with Playwright", "https://playwright.cloud",
None, "$99/month starter", 4.8, 12500,
"https://images.unsplash.com/photo-1633356122544-f134324a6cee?w=800&h=400&fit=crop"),
("Selenium Grid Hub", "Browser Automation", "Freemium", False, False,
"Distributed Selenium grid for parallel testing", "https://seleniumhub.io",
"https://github.com/seleniumhub/grid", "Free - $299/month", 4.2, 8400,
"https://images.unsplash.com/photo-1555066931-4365d14bab8c?w=800&h=400&fit=crop"),
("Puppeteer Extra", "Browser Automation", "Open Source", True, False,
"Enhanced Puppeteer with stealth plugins and more", "https://puppeteer-extra.dev",
"https://github.com/berstend/puppeteer-extra", "Free", 4.6, 15200,
"https://images.unsplash.com/photo-1461749280684-dccba630e2f6?w=800&h=400&fit=crop"),
# Proxy Services
("BrightData", "Proxy Services", "Paid", True, True,
"Premium proxy network with 72M+ IPs worldwide", "https://brightdata.com",
None, "Starting $500/month", 4.7, 9800,
"https://images.unsplash.com/photo-1558494949-ef010cbdcc31?w=800&h=400&fit=crop"),
("SmartProxy", "Proxy Services", "Paid", False, True,
"Residential and datacenter proxies with rotation", "https://smartproxy.com",
None, "Starting $75/month", 4.3, 7600,
"https://images.unsplash.com/photo-1544197150-b99a580bb7a8?w=800&h=400&fit=crop"),
("ProxyMesh", "Proxy Services", "Freemium", False, False,
"Rotating proxy servers with sticky sessions", "https://proxymesh.com",
None, "$10-$50/month", 4.0, 4200,
"https://images.unsplash.com/photo-1451187580459-43490279c0fa?w=800&h=400&fit=crop"),
# LLM Integration
("LangChain Crawl", "LLM Integration", "Open Source", True, False,
"LangChain integration for Crawl4AI workflows", "https://langchain-crawl.dev",
"https://github.com/langchain/crawl", "Free", 4.5, 18900,
"https://images.unsplash.com/photo-1677442136019-21780ecad995?w=800&h=400&fit=crop"),
("GPT Scraper", "LLM Integration", "Freemium", False, False,
"Extract structured data using GPT models", "https://gptscraper.ai",
None, "Free - $99/month", 4.1, 5600,
"https://images.unsplash.com/photo-1655720828018-edd2daec9349?w=800&h=400&fit=crop"),
("Claude Extract", "LLM Integration", "Paid", True, True,
"Professional extraction using Claude AI", "https://claude-extract.com",
None, "$199/month", 4.9, 3200,
"https://images.unsplash.com/photo-1686191128892-3b09ad503b4f?w=800&h=400&fit=crop"),
# Data Processing
("DataMiner Pro", "Data Processing", "Paid", False, False,
"Advanced data extraction and transformation", "https://dataminer.pro",
None, "$149/month", 4.2, 6700,
"https://images.unsplash.com/photo-1551288049-bebda4e38f71?w=800&h=400&fit=crop"),
("ScraperAPI", "Data Processing", "Freemium", True, True,
"Simple API for web scraping with proxy rotation", "https://scraperapi.com",
None, "Free - $299/month", 4.6, 22300,
"https://images.unsplash.com/photo-1460925895917-afdab827c52f?w=800&h=400&fit=crop"),
("Apify", "Data Processing", "Freemium", False, False,
"Web scraping and automation platform", "https://apify.com",
None, "$49-$499/month", 4.4, 14500,
"https://images.unsplash.com/photo-1504639725590-34d0984388bd?w=800&h=400&fit=crop"),
# Cloud Infrastructure
("BrowserCloud", "Cloud Infrastructure", "Paid", True, True,
"Managed headless browsers in the cloud", "https://browsercloud.io",
None, "$199/month", 4.5, 8900,
"https://images.unsplash.com/photo-1667372393119-3d4c48d07fc9?w=800&h=400&fit=crop"),
("LambdaTest", "Cloud Infrastructure", "Freemium", False, False,
"Cross-browser testing on cloud", "https://lambdatest.com",
None, "Free - $99/month", 4.1, 11200,
"https://images.unsplash.com/photo-1451187580459-43490279c0fa?w=800&h=400&fit=crop"),
("Browserless", "Cloud Infrastructure", "Freemium", True, False,
"Headless browser automation API", "https://browserless.io",
None, "$50-$500/month", 4.7, 19800,
"https://images.unsplash.com/photo-1639762681485-074b7f938ba0?w=800&h=400&fit=crop"),
# Developer Tools
("Crawl4AI VSCode", "Developer Tools", "Open Source", True, False,
"VSCode extension for Crawl4AI development", "https://marketplace.visualstudio.com",
"https://github.com/crawl4ai/vscode", "Free", 4.8, 34500,
"https://images.unsplash.com/photo-1629654297299-c8506221ca97?w=800&h=400&fit=crop"),
("Postman Collection", "Developer Tools", "Open Source", False, False,
"Postman collection for Crawl4AI API testing", "https://postman.com/crawl4ai",
"https://github.com/crawl4ai/postman", "Free", 4.3, 7800,
"https://images.unsplash.com/photo-1599507593499-a3f7d7d97667?w=800&h=400&fit=crop"),
("Debug Toolkit", "Developer Tools", "Open Source", False, False,
"Debugging tools for crawler development", "https://debug.crawl4ai.com",
"https://github.com/crawl4ai/debug", "Free", 4.0, 4300,
"https://images.unsplash.com/photo-1515879218367-8466d910aaa4?w=800&h=400&fit=crop"),
]
for name, category, type_, featured, sponsored, desc, url, github, pricing, rating, downloads, image in apps_data:
screenshots = json.dumps([
f"https://images.unsplash.com/photo-{random.randint(1500000000000, 1700000000000)}-{random.randint(1000000000000, 9999999999999)}?w=800&h=600&fit=crop",
f"https://images.unsplash.com/photo-{random.randint(1500000000000, 1700000000000)}-{random.randint(1000000000000, 9999999999999)}?w=800&h=600&fit=crop"
])
cursor.execute("""
INSERT INTO apps (name, slug, description, category, type, featured, sponsored,
website_url, github_url, pricing, rating, downloads, image, screenshots, logo_url,
integration_guide, contact_email, views)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
""", (name, generate_slug(name), desc, category, type_, featured, sponsored,
url, github, pricing, rating, downloads, image, screenshots,
f"https://ui-avatars.com/api/?name={name}&background=50ffff&color=070708&size=128",
f"# {name} Integration\n\n```python\nfrom crawl4ai import AsyncWebCrawler\n# Integration code coming soon...\n```",
f"contact@{generate_slug(name)}.com",
random.randint(100, 5000)))
# Articles with real images
articles_data = [
("Browser Automation Showdown: Playwright vs Puppeteer vs Selenium",
"Review", "John Doe", ["Playwright Cloud", "Puppeteer Extra"],
["browser-automation", "comparison", "2024"],
"https://images.unsplash.com/photo-1587620962725-abab7fe55159?w=1200&h=630&fit=crop"),
("Top 5 Proxy Services for Web Scraping in 2024",
"Comparison", "Jane Smith", ["BrightData", "SmartProxy", "ProxyMesh"],
["proxy", "web-scraping", "guide"],
"https://images.unsplash.com/photo-1558494949-ef010cbdcc31?w=1200&h=630&fit=crop"),
("Integrating LLMs with Crawl4AI: A Complete Guide",
"Tutorial", "Crawl4AI Team", ["LangChain Crawl", "GPT Scraper", "Claude Extract"],
["llm", "integration", "tutorial"],
"https://images.unsplash.com/photo-1677442136019-21780ecad995?w=1200&h=630&fit=crop"),
("Building Scalable Crawlers with Cloud Infrastructure",
"Tutorial", "Mike Johnson", ["BrowserCloud", "Browserless"],
["cloud", "scalability", "architecture"],
"https://images.unsplash.com/photo-1667372393119-3d4c48d07fc9?w=1200&h=630&fit=crop"),
("What's New in Crawl4AI Marketplace",
"News", "Crawl4AI Team", [],
["marketplace", "announcement", "news"],
"https://images.unsplash.com/photo-1556075798-4825dfaaf498?w=1200&h=630&fit=crop"),
("Cost Analysis: Self-Hosted vs Cloud Browser Solutions",
"Comparison", "Sarah Chen", ["BrowserCloud", "LambdaTest", "Browserless"],
["cost", "cloud", "comparison"],
"https://images.unsplash.com/photo-1554224155-8d04cb21cd6c?w=1200&h=630&fit=crop"),
("Getting Started with Browser Automation",
"Tutorial", "Crawl4AI Team", ["Playwright Cloud", "Selenium Grid Hub"],
["beginner", "tutorial", "automation"],
"https://images.unsplash.com/photo-1498050108023-c5249f4df085?w=1200&h=630&fit=crop"),
("The Future of Web Scraping: AI-Powered Extraction",
"News", "Dr. Alan Turing", ["Claude Extract", "GPT Scraper"],
["ai", "future", "trends"],
"https://images.unsplash.com/photo-1593720213428-28a5b9e94613?w=1200&h=630&fit=crop")
]
for title, category, author, related_apps, tags, image in articles_data:
# Get app IDs for related apps
related_ids = []
for app_name in related_apps:
cursor.execute("SELECT id FROM apps WHERE name = ?", (app_name,))
result = cursor.fetchone()
if result:
related_ids.append(result[0])
content = f"""# {title}
By {author} | {datetime.now().strftime('%B %d, %Y')}
## Introduction
This is a comprehensive article about {title.lower()}. Lorem ipsum dolor sit amet, consectetur adipiscing elit.
Sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
## Key Points
- Important point about the topic
- Another crucial insight
- Technical details and specifications
- Performance comparisons
## Conclusion
In summary, this article explored various aspects of the topic. Stay tuned for more updates!
"""
cursor.execute("""
INSERT INTO articles (title, slug, content, author, category, related_apps,
featured_image, tags, views)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
""", (title, generate_slug(title), content, author, category,
json.dumps(related_ids), image, json.dumps(tags),
random.randint(200, 10000)))
# Sponsors
sponsors_data = [
("BrightData", "Gold", "https://brightdata.com",
"https://images.unsplash.com/photo-1558494949-ef010cbdcc31?w=728&h=90&fit=crop"),
("ScraperAPI", "Gold", "https://scraperapi.com",
"https://images.unsplash.com/photo-1460925895917-afdab827c52f?w=728&h=90&fit=crop"),
("BrowserCloud", "Silver", "https://browsercloud.io",
"https://images.unsplash.com/photo-1667372393119-3d4c48d07fc9?w=728&h=90&fit=crop"),
("Claude Extract", "Silver", "https://claude-extract.com",
"https://images.unsplash.com/photo-1686191128892-3b09ad503b4f?w=728&h=90&fit=crop"),
("SmartProxy", "Bronze", "https://smartproxy.com",
"https://images.unsplash.com/photo-1544197150-b99a580bb7a8?w=728&h=90&fit=crop")
]
for company, tier, landing_url, banner in sponsors_data:
start_date = datetime.now() - timedelta(days=random.randint(1, 30))
end_date = datetime.now() + timedelta(days=random.randint(30, 180))
cursor.execute("""
INSERT INTO sponsors (company_name, logo_url, tier, banner_url,
landing_url, active, start_date, end_date)
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
""", (company,
f"https://ui-avatars.com/api/?name={company}&background=09b5a5&color=fff&size=200",
tier, banner, landing_url, 1,
start_date.isoformat(), end_date.isoformat()))
conn.commit()
print("✓ Dummy data generated successfully!")
print(f" - {len(categories)} categories")
print(f" - {len(apps_data)} apps")
print(f" - {len(articles_data)} articles")
print(f" - {len(sponsors_data)} sponsors")
if __name__ == "__main__":
generate_dummy_data() | python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/md_v2/marketplace/backend/database.py | docs/md_v2/marketplace/backend/database.py | import sqlite3
import yaml
import json
from pathlib import Path
from typing import Dict, List, Any
class DatabaseManager:
def __init__(self, db_path=None, schema_path='schema.yaml'):
self.schema = self._load_schema(schema_path)
# Use provided path or fallback to schema default
self.db_path = db_path or self.schema['database']['name']
self.conn = None
self._init_database()
def _load_schema(self, path: str) -> Dict:
with open(path, 'r') as f:
return yaml.safe_load(f)
def _init_database(self):
"""Auto-create/migrate database from schema"""
self.conn = sqlite3.connect(self.db_path, check_same_thread=False)
self.conn.row_factory = sqlite3.Row
for table_name, table_def in self.schema['tables'].items():
self._create_or_update_table(table_name, table_def['columns'])
def _create_or_update_table(self, table_name: str, columns: Dict):
cursor = self.conn.cursor()
# Check if table exists
cursor.execute(f"SELECT name FROM sqlite_master WHERE type='table' AND name=?", (table_name,))
table_exists = cursor.fetchone() is not None
if not table_exists:
# Create table
col_defs = []
for col_name, col_spec in columns.items():
col_def = f"{col_name} {col_spec['type']}"
if col_spec.get('primary'):
col_def += " PRIMARY KEY"
if col_spec.get('autoincrement'):
col_def += " AUTOINCREMENT"
if col_spec.get('unique'):
col_def += " UNIQUE"
if col_spec.get('required'):
col_def += " NOT NULL"
if 'default' in col_spec:
default = col_spec['default']
if default == 'CURRENT_TIMESTAMP':
col_def += f" DEFAULT {default}"
elif isinstance(default, str):
col_def += f" DEFAULT '{default}'"
else:
col_def += f" DEFAULT {default}"
col_defs.append(col_def)
create_sql = f"CREATE TABLE {table_name} ({', '.join(col_defs)})"
cursor.execute(create_sql)
else:
# Check for new columns and add them
cursor.execute(f"PRAGMA table_info({table_name})")
existing_columns = {row[1] for row in cursor.fetchall()}
for col_name, col_spec in columns.items():
if col_name not in existing_columns:
col_def = f"{col_spec['type']}"
if 'default' in col_spec:
default = col_spec['default']
if default == 'CURRENT_TIMESTAMP':
col_def += f" DEFAULT {default}"
elif isinstance(default, str):
col_def += f" DEFAULT '{default}'"
else:
col_def += f" DEFAULT {default}"
cursor.execute(f"ALTER TABLE {table_name} ADD COLUMN {col_name} {col_def}")
self.conn.commit()
def get_all(self, table: str, limit: int = 100, offset: int = 0, where: str = None) -> List[Dict]:
cursor = self.conn.cursor()
query = f"SELECT * FROM {table}"
if where:
query += f" WHERE {where}"
query += f" LIMIT {limit} OFFSET {offset}"
cursor.execute(query)
rows = cursor.fetchall()
return [dict(row) for row in rows]
def search(self, query: str, tables: List[str] = None) -> Dict[str, List[Dict]]:
if not tables:
tables = list(self.schema['tables'].keys())
results = {}
cursor = self.conn.cursor()
for table in tables:
# Search in text columns
columns = self.schema['tables'][table]['columns']
text_cols = [col for col, spec in columns.items()
if spec['type'] == 'TEXT' and col != 'id']
if text_cols:
where_clause = ' OR '.join([f"{col} LIKE ?" for col in text_cols])
params = [f'%{query}%'] * len(text_cols)
cursor.execute(f"SELECT * FROM {table} WHERE {where_clause} LIMIT 10", params)
rows = cursor.fetchall()
if rows:
results[table] = [dict(row) for row in rows]
return results
def close(self):
if self.conn:
self.conn.close() | python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/md_v2/marketplace/backend/config.py | docs/md_v2/marketplace/backend/config.py | """
Marketplace Configuration - Loads from .env file
"""
import os
import sys
import hashlib
from pathlib import Path
from dotenv import load_dotenv
# Load .env file
env_path = Path(__file__).parent / '.env'
if not env_path.exists():
print("\n❌ ERROR: No .env file found!")
print("Please copy .env.example to .env and update with your values:")
print(f" cp {Path(__file__).parent}/.env.example {Path(__file__).parent}/.env")
print("\nThen edit .env with your secure values.")
sys.exit(1)
load_dotenv(env_path)
# Required environment variables
required_vars = ['MARKETPLACE_ADMIN_PASSWORD', 'MARKETPLACE_JWT_SECRET']
missing_vars = [var for var in required_vars if not os.getenv(var)]
if missing_vars:
print(f"\n❌ ERROR: Missing required environment variables: {', '.join(missing_vars)}")
print("Please check your .env file and ensure all required variables are set.")
sys.exit(1)
class Config:
"""Configuration loaded from environment variables"""
# Admin authentication - hashed from password in .env
ADMIN_PASSWORD_HASH = hashlib.sha256(
os.getenv('MARKETPLACE_ADMIN_PASSWORD').encode()
).hexdigest()
# JWT secret for token generation
JWT_SECRET_KEY = os.getenv('MARKETPLACE_JWT_SECRET')
# Database path
DATABASE_PATH = os.getenv('MARKETPLACE_DB_PATH', './marketplace.db')
# Token expiry in hours
TOKEN_EXPIRY_HOURS = int(os.getenv('MARKETPLACE_TOKEN_EXPIRY', '4'))
# CORS origins - hardcoded as they don't contain secrets
ALLOWED_ORIGINS = [
"http://localhost:8000",
"http://localhost:8080",
"http://localhost:8100",
"http://127.0.0.1:8000",
"http://127.0.0.1:8080",
"http://127.0.0.1:8100",
"https://crawl4ai.com",
"https://www.crawl4ai.com",
"https://docs.crawl4ai.com",
"https://market.crawl4ai.com"
] | python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/md_v2/marketplace/backend/server.py | docs/md_v2/marketplace/backend/server.py | from fastapi import FastAPI, HTTPException, Query, Depends, Body, UploadFile, File, Form, APIRouter
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse
from fastapi.staticfiles import StaticFiles
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
from typing import Optional, Dict, Any
import json
import hashlib
import secrets
import re
from pathlib import Path
from database import DatabaseManager
from datetime import datetime, timedelta
# Import configuration (will exit if .env not found or invalid)
from config import Config
app = FastAPI(title="Crawl4AI Marketplace API")
router = APIRouter(prefix="/marketplace/api")
# Security setup
security = HTTPBearer()
tokens = {} # In production, use Redis or database for token storage
# CORS configuration
app.add_middleware(
CORSMiddleware,
allow_origins=Config.ALLOWED_ORIGINS,
allow_credentials=True,
allow_methods=["GET", "POST", "PUT", "DELETE", "OPTIONS"],
allow_headers=["*"],
max_age=3600
)
# Initialize database with configurable path
db = DatabaseManager(Config.DATABASE_PATH)
BASE_DIR = Path(__file__).parent
UPLOAD_ROOT = BASE_DIR / "uploads"
UPLOAD_ROOT.mkdir(parents=True, exist_ok=True)
app.mount("/uploads", StaticFiles(directory=UPLOAD_ROOT), name="uploads")
ALLOWED_IMAGE_TYPES = {
"image/png": ".png",
"image/jpeg": ".jpg",
"image/webp": ".webp",
"image/svg+xml": ".svg"
}
ALLOWED_UPLOAD_FOLDERS = {"sponsors"}
MAX_UPLOAD_SIZE = 2 * 1024 * 1024 # 2 MB
def json_response(data, cache_time=3600):
"""Helper to return JSON with cache headers"""
return JSONResponse(
content=data,
headers={
"Cache-Control": f"public, max-age={cache_time}",
"X-Content-Type-Options": "nosniff"
}
)
def to_int(value, default=0):
"""Coerce incoming values to integers, falling back to default."""
if value is None:
return default
if isinstance(value, bool):
return int(value)
if isinstance(value, (int, float)):
return int(value)
if isinstance(value, str):
stripped = value.strip()
if not stripped:
return default
match = re.match(r"^-?\d+", stripped)
if match:
try:
return int(match.group())
except ValueError:
return default
return default
# ============= PUBLIC ENDPOINTS =============
@router.get("/apps")
async def get_apps(
category: Optional[str] = None,
type: Optional[str] = None,
featured: Optional[bool] = None,
sponsored: Optional[bool] = None,
limit: int = Query(default=20, le=10000),
offset: int = Query(default=0)
):
"""Get apps with optional filters"""
where_clauses = []
if category:
where_clauses.append(f"category = '{category}'")
if type:
where_clauses.append(f"type = '{type}'")
if featured is not None:
where_clauses.append(f"featured = {1 if featured else 0}")
if sponsored is not None:
where_clauses.append(f"sponsored = {1 if sponsored else 0}")
where = " AND ".join(where_clauses) if where_clauses else None
apps = db.get_all('apps', limit=limit, offset=offset, where=where)
# Parse JSON fields
for app in apps:
if app.get('screenshots'):
app['screenshots'] = json.loads(app['screenshots'])
return json_response(apps)
@router.get("/apps/{slug}")
async def get_app(slug: str):
"""Get single app by slug"""
apps = db.get_all('apps', where=f"slug = '{slug}'", limit=1)
if not apps:
raise HTTPException(status_code=404, detail="App not found")
app = apps[0]
if app.get('screenshots'):
app['screenshots'] = json.loads(app['screenshots'])
return json_response(app)
@router.get("/articles")
async def get_articles(
category: Optional[str] = None,
limit: int = Query(default=20, le=10000),
offset: int = Query(default=0)
):
"""Get articles with optional category filter"""
where = f"category = '{category}'" if category else None
articles = db.get_all('articles', limit=limit, offset=offset, where=where)
# Parse JSON fields
for article in articles:
if article.get('related_apps'):
article['related_apps'] = json.loads(article['related_apps'])
if article.get('tags'):
article['tags'] = json.loads(article['tags'])
return json_response(articles)
@router.get("/articles/{slug}")
async def get_article(slug: str):
"""Get single article by slug"""
articles = db.get_all('articles', where=f"slug = '{slug}'", limit=1)
if not articles:
raise HTTPException(status_code=404, detail="Article not found")
article = articles[0]
if article.get('related_apps'):
article['related_apps'] = json.loads(article['related_apps'])
if article.get('tags'):
article['tags'] = json.loads(article['tags'])
return json_response(article)
@router.get("/categories")
async def get_categories():
"""Get all categories ordered by index"""
categories = db.get_all('categories', limit=50)
for category in categories:
category['order_index'] = to_int(category.get('order_index'), 0)
categories.sort(key=lambda x: x.get('order_index', 0))
return json_response(categories, cache_time=7200)
@router.get("/sponsors")
async def get_sponsors(active: Optional[bool] = True):
"""Get sponsors, default active only"""
where = f"active = {1 if active else 0}" if active is not None else None
sponsors = db.get_all('sponsors', where=where, limit=20)
# Filter by date if active
if active:
now = datetime.now().isoformat()
sponsors = [s for s in sponsors
if (not s.get('start_date') or s['start_date'] <= now) and
(not s.get('end_date') or s['end_date'] >= now)]
return json_response(sponsors)
@router.get("/search")
async def search(q: str = Query(min_length=2)):
"""Search across apps and articles"""
if len(q) < 2:
return json_response({})
results = db.search(q, tables=['apps', 'articles'])
# Parse JSON fields in results
for table, items in results.items():
for item in items:
if table == 'apps' and item.get('screenshots'):
item['screenshots'] = json.loads(item['screenshots'])
elif table == 'articles':
if item.get('related_apps'):
item['related_apps'] = json.loads(item['related_apps'])
if item.get('tags'):
item['tags'] = json.loads(item['tags'])
return json_response(results, cache_time=1800)
@router.get("/stats")
async def get_stats():
"""Get marketplace statistics"""
stats = {
"total_apps": len(db.get_all('apps', limit=10000)),
"total_articles": len(db.get_all('articles', limit=10000)),
"total_categories": len(db.get_all('categories', limit=1000)),
"active_sponsors": len(db.get_all('sponsors', where="active = 1", limit=1000))
}
return json_response(stats, cache_time=1800)
# ============= ADMIN AUTHENTICATION =============
def verify_token(credentials: HTTPAuthorizationCredentials = Depends(security)):
"""Verify admin authentication token"""
token = credentials.credentials
if token not in tokens or tokens[token] < datetime.now():
raise HTTPException(status_code=401, detail="Invalid or expired token")
return token
@router.post("/admin/upload-image", dependencies=[Depends(verify_token)])
async def upload_image(file: UploadFile = File(...), folder: str = Form("sponsors")):
"""Upload image files for admin assets"""
folder = (folder or "").strip().lower()
if folder not in ALLOWED_UPLOAD_FOLDERS:
raise HTTPException(status_code=400, detail="Invalid upload folder")
if file.content_type not in ALLOWED_IMAGE_TYPES:
raise HTTPException(status_code=400, detail="Unsupported file type")
contents = await file.read()
if len(contents) > MAX_UPLOAD_SIZE:
raise HTTPException(status_code=400, detail="File too large (max 2MB)")
extension = ALLOWED_IMAGE_TYPES[file.content_type]
filename = f"{datetime.now().strftime('%Y%m%d%H%M%S')}_{secrets.token_hex(8)}{extension}"
target_dir = UPLOAD_ROOT / folder
target_dir.mkdir(parents=True, exist_ok=True)
target_path = target_dir / filename
target_path.write_bytes(contents)
return {"url": f"/uploads/{folder}/{filename}"}
@router.post("/admin/login")
async def admin_login(password: str = Body(..., embed=True)):
"""Admin login with password"""
provided_hash = hashlib.sha256(password.encode()).hexdigest()
if provided_hash != Config.ADMIN_PASSWORD_HASH:
# Log failed attempt in production
print(f"Failed login attempt at {datetime.now()}")
raise HTTPException(status_code=401, detail="Invalid password")
# Generate secure token
token = secrets.token_urlsafe(32)
tokens[token] = datetime.now() + timedelta(hours=Config.TOKEN_EXPIRY_HOURS)
return {
"token": token,
"expires_in": Config.TOKEN_EXPIRY_HOURS * 3600
}
# ============= ADMIN ENDPOINTS =============
@router.get("/admin/stats", dependencies=[Depends(verify_token)])
async def get_admin_stats():
"""Get detailed admin statistics"""
stats = {
"apps": {
"total": len(db.get_all('apps', limit=10000)),
"featured": len(db.get_all('apps', where="featured = 1", limit=10000)),
"sponsored": len(db.get_all('apps', where="sponsored = 1", limit=10000))
},
"articles": len(db.get_all('articles', limit=10000)),
"categories": len(db.get_all('categories', limit=1000)),
"sponsors": {
"active": len(db.get_all('sponsors', where="active = 1", limit=1000)),
"total": len(db.get_all('sponsors', limit=10000))
},
"total_views": sum(app.get('views', 0) for app in db.get_all('apps', limit=10000))
}
return stats
# Apps CRUD
@router.post("/admin/apps", dependencies=[Depends(verify_token)])
async def create_app(app_data: Dict[str, Any]):
"""Create new app"""
try:
# Handle JSON fields
for field in ['screenshots', 'tags']:
if field in app_data and isinstance(app_data[field], list):
app_data[field] = json.dumps(app_data[field])
cursor = db.conn.cursor()
columns = ', '.join(app_data.keys())
placeholders = ', '.join(['?' for _ in app_data])
cursor.execute(f"INSERT INTO apps ({columns}) VALUES ({placeholders})",
list(app_data.values()))
db.conn.commit()
return {"id": cursor.lastrowid, "message": "App created"}
except Exception as e:
raise HTTPException(status_code=400, detail=str(e))
@router.put("/admin/apps/{app_id}", dependencies=[Depends(verify_token)])
async def update_app(app_id: int, app_data: Dict[str, Any]):
"""Update app"""
try:
# Handle JSON fields
for field in ['screenshots', 'tags']:
if field in app_data and isinstance(app_data[field], list):
app_data[field] = json.dumps(app_data[field])
set_clause = ', '.join([f"{k} = ?" for k in app_data.keys()])
cursor = db.conn.cursor()
cursor.execute(f"UPDATE apps SET {set_clause} WHERE id = ?",
list(app_data.values()) + [app_id])
db.conn.commit()
return {"message": "App updated"}
except Exception as e:
raise HTTPException(status_code=400, detail=str(e))
@router.delete("/admin/apps/{app_id}", dependencies=[Depends(verify_token)])
async def delete_app(app_id: int):
"""Delete app"""
cursor = db.conn.cursor()
cursor.execute("DELETE FROM apps WHERE id = ?", (app_id,))
db.conn.commit()
return {"message": "App deleted"}
# Articles CRUD
@router.post("/admin/articles", dependencies=[Depends(verify_token)])
async def create_article(article_data: Dict[str, Any]):
"""Create new article"""
try:
for field in ['related_apps', 'tags']:
if field in article_data and isinstance(article_data[field], list):
article_data[field] = json.dumps(article_data[field])
cursor = db.conn.cursor()
columns = ', '.join(article_data.keys())
placeholders = ', '.join(['?' for _ in article_data])
cursor.execute(f"INSERT INTO articles ({columns}) VALUES ({placeholders})",
list(article_data.values()))
db.conn.commit()
return {"id": cursor.lastrowid, "message": "Article created"}
except Exception as e:
raise HTTPException(status_code=400, detail=str(e))
@router.put("/admin/articles/{article_id}", dependencies=[Depends(verify_token)])
async def update_article(article_id: int, article_data: Dict[str, Any]):
"""Update article"""
try:
for field in ['related_apps', 'tags']:
if field in article_data and isinstance(article_data[field], list):
article_data[field] = json.dumps(article_data[field])
set_clause = ', '.join([f"{k} = ?" for k in article_data.keys()])
cursor = db.conn.cursor()
cursor.execute(f"UPDATE articles SET {set_clause} WHERE id = ?",
list(article_data.values()) + [article_id])
db.conn.commit()
return {"message": "Article updated"}
except Exception as e:
raise HTTPException(status_code=400, detail=str(e))
@router.delete("/admin/articles/{article_id}", dependencies=[Depends(verify_token)])
async def delete_article(article_id: int):
"""Delete article"""
cursor = db.conn.cursor()
cursor.execute("DELETE FROM articles WHERE id = ?", (article_id,))
db.conn.commit()
return {"message": "Article deleted"}
# Categories CRUD
@router.post("/admin/categories", dependencies=[Depends(verify_token)])
async def create_category(category_data: Dict[str, Any]):
"""Create new category"""
try:
category_data = dict(category_data)
category_data['order_index'] = to_int(category_data.get('order_index'), 0)
cursor = db.conn.cursor()
columns = ', '.join(category_data.keys())
placeholders = ', '.join(['?' for _ in category_data])
cursor.execute(f"INSERT INTO categories ({columns}) VALUES ({placeholders})",
list(category_data.values()))
db.conn.commit()
return {"id": cursor.lastrowid, "message": "Category created"}
except Exception as e:
raise HTTPException(status_code=400, detail=str(e))
@router.put("/admin/categories/{cat_id}", dependencies=[Depends(verify_token)])
async def update_category(cat_id: int, category_data: Dict[str, Any]):
"""Update category"""
try:
category_data = dict(category_data)
if 'order_index' in category_data:
category_data['order_index'] = to_int(category_data.get('order_index'), 0)
set_clause = ', '.join([f"{k} = ?" for k in category_data.keys()])
cursor = db.conn.cursor()
cursor.execute(f"UPDATE categories SET {set_clause} WHERE id = ?",
list(category_data.values()) + [cat_id])
db.conn.commit()
return {"message": "Category updated"}
except Exception as e:
raise HTTPException(status_code=400, detail=str(e))
@router.delete("/admin/categories/{cat_id}", dependencies=[Depends(verify_token)])
async def delete_category(cat_id: int):
"""Delete category"""
try:
cursor = db.conn.cursor()
cursor.execute("DELETE FROM categories WHERE id = ?", (cat_id,))
db.conn.commit()
return {"message": "Category deleted"}
except Exception as e:
raise HTTPException(status_code=400, detail=str(e))
# Sponsors CRUD
@router.post("/admin/sponsors", dependencies=[Depends(verify_token)])
async def create_sponsor(sponsor_data: Dict[str, Any]):
"""Create new sponsor"""
try:
cursor = db.conn.cursor()
columns = ', '.join(sponsor_data.keys())
placeholders = ', '.join(['?' for _ in sponsor_data])
cursor.execute(f"INSERT INTO sponsors ({columns}) VALUES ({placeholders})",
list(sponsor_data.values()))
db.conn.commit()
return {"id": cursor.lastrowid, "message": "Sponsor created"}
except Exception as e:
raise HTTPException(status_code=400, detail=str(e))
@router.put("/admin/sponsors/{sponsor_id}", dependencies=[Depends(verify_token)])
async def update_sponsor(sponsor_id: int, sponsor_data: Dict[str, Any]):
"""Update sponsor"""
try:
set_clause = ', '.join([f"{k} = ?" for k in sponsor_data.keys()])
cursor = db.conn.cursor()
cursor.execute(f"UPDATE sponsors SET {set_clause} WHERE id = ?",
list(sponsor_data.values()) + [sponsor_id])
db.conn.commit()
return {"message": "Sponsor updated"}
except Exception as e:
raise HTTPException(status_code=400, detail=str(e))
@router.delete("/admin/sponsors/{sponsor_id}", dependencies=[Depends(verify_token)])
async def delete_sponsor(sponsor_id: int):
"""Delete sponsor"""
try:
cursor = db.conn.cursor()
cursor.execute("DELETE FROM sponsors WHERE id = ?", (sponsor_id,))
db.conn.commit()
return {"message": "Sponsor deleted"}
except Exception as e:
raise HTTPException(status_code=400, detail=str(e))
app.include_router(router)
# Version info
VERSION = "1.1.0"
BUILD_DATE = "2025-10-26"
@app.get("/")
async def root():
"""API info"""
return {
"name": "Crawl4AI Marketplace API",
"version": VERSION,
"build_date": BUILD_DATE,
"endpoints": [
"/marketplace/api/apps",
"/marketplace/api/articles",
"/marketplace/api/categories",
"/marketplace/api/sponsors",
"/marketplace/api/search?q=query",
"/marketplace/api/stats"
]
}
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="127.0.0.1", port=8100) | python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/md_v2/apps/c4a-script/server.py | docs/md_v2/apps/c4a-script/server.py | #!/usr/bin/env python3
"""
C4A-Script Tutorial Server
Serves the tutorial app and provides C4A compilation API
"""
import sys
import os
from pathlib import Path
from flask import Flask, render_template_string, request, jsonify, send_from_directory
from flask_cors import CORS
# Add parent directories to path to import crawl4ai
sys.path.insert(0, str(Path(__file__).parent.parent.parent.parent.parent))
try:
from crawl4ai.script import compile as c4a_compile
C4A_AVAILABLE = True
except ImportError:
print("⚠️ C4A compiler not available. Using mock compiler.")
C4A_AVAILABLE = False
app = Flask(__name__)
CORS(app)
# Serve static files
@app.route('/')
def index():
return send_from_directory('.', 'index.html')
@app.route('/assets/<path:path>')
def serve_assets(path):
return send_from_directory('assets', path)
@app.route('/playground/')
def playground():
return send_from_directory('playground', 'index.html')
@app.route('/playground/<path:path>')
def serve_playground(path):
return send_from_directory('playground', path)
# API endpoint for C4A compilation
@app.route('/api/compile', methods=['POST'])
def compile_endpoint():
try:
data = request.get_json()
script = data.get('script', '')
if not script:
return jsonify({
'success': False,
'error': {
'line': 1,
'column': 1,
'message': 'No script provided',
'suggestion': 'Write some C4A commands'
}
})
if C4A_AVAILABLE:
# Use real C4A compiler
result = c4a_compile(script)
if result.success:
return jsonify({
'success': True,
'jsCode': result.js_code,
'metadata': {
'lineCount': len(result.js_code),
'sourceLines': len(script.split('\n'))
}
})
else:
error = result.first_error
return jsonify({
'success': False,
'error': {
'line': error.line,
'column': error.column,
'message': error.message,
'suggestion': error.suggestions[0].message if error.suggestions else None,
'code': error.code,
'sourceLine': error.source_line
}
})
else:
# Use mock compiler for demo
result = mock_compile(script)
return jsonify(result)
except Exception as e:
return jsonify({
'success': False,
'error': {
'line': 1,
'column': 1,
'message': f'Server error: {str(e)}',
'suggestion': 'Check server logs'
}
}), 500
def mock_compile(script):
"""Simple mock compiler for demo when C4A is not available"""
lines = [line for line in script.split('\n') if line.strip() and not line.strip().startswith('#')]
js_code = []
for i, line in enumerate(lines):
line = line.strip()
try:
if line.startswith('GO '):
url = line[3:].strip()
# Handle relative URLs
if not url.startswith(('http://', 'https://')):
url = '/' + url.lstrip('/')
js_code.append(f"await page.goto('{url}');")
elif line.startswith('WAIT '):
parts = line[5:].strip().split(' ')
if parts[0].startswith('`'):
selector = parts[0].strip('`')
timeout = parts[1] if len(parts) > 1 else '5'
js_code.append(f"await page.waitForSelector('{selector}', {{ timeout: {timeout}000 }});")
else:
seconds = parts[0]
js_code.append(f"await page.waitForTimeout({seconds}000);")
elif line.startswith('CLICK '):
selector = line[6:].strip().strip('`')
js_code.append(f"await page.click('{selector}');")
elif line.startswith('TYPE '):
text = line[5:].strip().strip('"')
js_code.append(f"await page.keyboard.type('{text}');")
elif line.startswith('SCROLL '):
parts = line[7:].strip().split(' ')
direction = parts[0]
amount = parts[1] if len(parts) > 1 else '500'
if direction == 'DOWN':
js_code.append(f"await page.evaluate(() => window.scrollBy(0, {amount}));")
elif direction == 'UP':
js_code.append(f"await page.evaluate(() => window.scrollBy(0, -{amount}));")
elif line.startswith('IF '):
if 'THEN' not in line:
return {
'success': False,
'error': {
'line': i + 1,
'column': len(line),
'message': "Missing 'THEN' keyword after IF condition",
'suggestion': "Add 'THEN' after the condition",
'sourceLine': line
}
}
condition = line[3:line.index('THEN')].strip()
action = line[line.index('THEN') + 4:].strip()
if 'EXISTS' in condition:
selector_match = condition.split('`')
if len(selector_match) >= 2:
selector = selector_match[1]
action_selector = action.split('`')[1] if '`' in action else ''
js_code.append(
f"if (await page.$$('{selector}').length > 0) {{ "
f"await page.click('{action_selector}'); }}"
)
elif line.startswith('PRESS '):
key = line[6:].strip()
js_code.append(f"await page.keyboard.press('{key}');")
else:
# Unknown command
return {
'success': False,
'error': {
'line': i + 1,
'column': 1,
'message': f"Unknown command: {line.split()[0]}",
'suggestion': "Check command syntax",
'sourceLine': line
}
}
except Exception as e:
return {
'success': False,
'error': {
'line': i + 1,
'column': 1,
'message': f"Failed to parse: {str(e)}",
'suggestion': "Check syntax",
'sourceLine': line
}
}
return {
'success': True,
'jsCode': js_code,
'metadata': {
'lineCount': len(js_code),
'sourceLines': len(lines)
}
}
# Example scripts endpoint
@app.route('/api/examples')
def get_examples():
examples = [
{
'id': 'cookie-banner',
'name': 'Handle Cookie Banner',
'description': 'Accept cookies and close newsletter popup',
'script': '''# Handle cookie banner and newsletter
GO http://127.0.0.1:8000/playground/
WAIT `body` 2
IF (EXISTS `.cookie-banner`) THEN CLICK `.accept`
IF (EXISTS `.newsletter-popup`) THEN CLICK `.close`'''
},
{
'id': 'login',
'name': 'Login Flow',
'description': 'Complete login with credentials',
'script': '''# Login to the site
CLICK `#login-btn`
WAIT `.login-form` 2
CLICK `#email`
TYPE "demo@example.com"
CLICK `#password`
TYPE "demo123"
IF (EXISTS `#remember-me`) THEN CLICK `#remember-me`
CLICK `button[type="submit"]`
WAIT `.welcome-message` 5'''
},
{
'id': 'infinite-scroll',
'name': 'Infinite Scroll',
'description': 'Load products with scrolling',
'script': '''# Navigate to catalog and scroll
CLICK `#catalog-link`
WAIT `.product-grid` 3
# Scroll multiple times to load products
SCROLL DOWN 1000
WAIT 1
SCROLL DOWN 1000
WAIT 1
SCROLL DOWN 1000'''
},
{
'id': 'form-wizard',
'name': 'Multi-step Form',
'description': 'Complete a multi-step survey',
'script': '''# Navigate to forms
CLICK `a[href="#forms"]`
WAIT `#survey-form` 2
# Step 1: Basic info
CLICK `#full-name`
TYPE "John Doe"
CLICK `#survey-email`
TYPE "john@example.com"
CLICK `.next-step`
WAIT 1
# Step 2: Preferences
CLICK `#interests`
CLICK `option[value="tech"]`
CLICK `option[value="music"]`
CLICK `.next-step`
WAIT 1
# Step 3: Submit
CLICK `#submit-survey`
WAIT `.success-message` 5'''
}
]
return jsonify(examples)
if __name__ == '__main__':
port = int(os.environ.get('PORT', 8000))
print(f"""
╔══════════════════════════════════════════════════════════╗
║ C4A-Script Interactive Tutorial Server ║
╠══════════════════════════════════════════════════════════╣
║ ║
║ Server running at: http://localhost:{port:<6} ║
║ ║
║ Features: ║
║ • C4A-Script compilation API ║
║ • Interactive playground ║
║ • Real-time execution visualization ║
║ ║
║ C4A Compiler: {'✓ Available' if C4A_AVAILABLE else '✗ Using mock compiler':<30} ║
║ ║
╚══════════════════════════════════════════════════════════╝
""")
app.run(host='0.0.0.0', port=port, debug=True) | python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/releases_review/demo_v0.7.0.py | docs/releases_review/demo_v0.7.0.py | """
🚀 Crawl4AI v0.7.0 Release Demo
================================
This demo showcases all major features introduced in v0.7.0 release.
Major Features:
1. ✅ Adaptive Crawling - Intelligent crawling with confidence tracking
2. ✅ Virtual Scroll Support - Handle infinite scroll pages
3. ✅ Link Preview - Advanced link analysis with 3-layer scoring
4. ✅ URL Seeder - Smart URL discovery and filtering
5. ✅ C4A Script - Domain-specific language for web automation
6. ✅ Chrome Extension Updates - Click2Crawl and instant schema extraction
7. ✅ PDF Parsing Support - Extract content from PDF documents
8. ✅ Nightly Builds - Automated nightly releases
Run this demo to see all features in action!
"""
import asyncio
import json
from typing import List, Dict
from rich.console import Console
from rich.table import Table
from rich.panel import Panel
from rich import box
from crawl4ai import (
AsyncWebCrawler,
CrawlerRunConfig,
BrowserConfig,
CacheMode,
AdaptiveCrawler,
AdaptiveConfig,
AsyncUrlSeeder,
SeedingConfig,
c4a_compile,
CompilationResult
)
from crawl4ai.async_configs import VirtualScrollConfig, LinkPreviewConfig
from crawl4ai.extraction_strategy import JsonCssExtractionStrategy
console = Console()
def print_section(title: str, description: str = ""):
"""Print a section header"""
console.print(f"\n[bold cyan]{'=' * 60}[/bold cyan]")
console.print(f"[bold yellow]{title}[/bold yellow]")
if description:
console.print(f"[dim]{description}[/dim]")
console.print(f"[bold cyan]{'=' * 60}[/bold cyan]\n")
async def demo_1_adaptive_crawling():
"""Demo 1: Adaptive Crawling - Intelligent content extraction"""
print_section(
"Demo 1: Adaptive Crawling",
"Intelligently learns and adapts to website patterns"
)
# Create adaptive crawler with custom configuration
config = AdaptiveConfig(
strategy="statistical", # or "embedding"
confidence_threshold=0.7,
max_pages=10,
top_k_links=3,
min_gain_threshold=0.1
)
# Example: Learn from a product page
console.print("[cyan]Learning from product page patterns...[/cyan]")
async with AsyncWebCrawler() as crawler:
adaptive = AdaptiveCrawler(crawler, config)
# Start adaptive crawl
console.print("[cyan]Starting adaptive crawl...[/cyan]")
result = await adaptive.digest(
start_url="https://docs.python.org/3/",
query="python decorators tutorial"
)
console.print("[green]✓ Adaptive crawl completed[/green]")
console.print(f" - Confidence Level: {adaptive.confidence:.0%}")
console.print(f" - Pages Crawled: {len(result.crawled_urls)}")
console.print(f" - Knowledge Base: {len(adaptive.state.knowledge_base)} documents")
# Get most relevant content
relevant = adaptive.get_relevant_content(top_k=3)
if relevant:
console.print("\nMost relevant pages:")
for i, page in enumerate(relevant, 1):
console.print(f" {i}. {page['url']} (relevance: {page['score']:.2%})")
async def demo_2_virtual_scroll():
"""Demo 2: Virtual Scroll - Handle infinite scroll pages"""
print_section(
"Demo 2: Virtual Scroll Support",
"Capture content from modern infinite scroll pages"
)
# Configure virtual scroll - using body as container for example.com
scroll_config = VirtualScrollConfig(
container_selector="body", # Using body since example.com has simple structure
scroll_count=3, # Just 3 scrolls for demo
scroll_by="container_height", # or "page_height" or pixel value
wait_after_scroll=0.5 # Wait 500ms after each scroll
)
config = CrawlerRunConfig(
virtual_scroll_config=scroll_config,
cache_mode=CacheMode.BYPASS,
wait_until="networkidle"
)
console.print("[cyan]Virtual Scroll Configuration:[/cyan]")
console.print(f" - Container: {scroll_config.container_selector}")
console.print(f" - Scroll count: {scroll_config.scroll_count}")
console.print(f" - Scroll by: {scroll_config.scroll_by}")
console.print(f" - Wait after scroll: {scroll_config.wait_after_scroll}s")
console.print("\n[dim]Note: Using example.com for demo - in production, use this[/dim]")
console.print("[dim]with actual infinite scroll pages like social media feeds.[/dim]\n")
async with AsyncWebCrawler() as crawler:
result = await crawler.arun(
"https://example.com",
config=config
)
if result.success:
console.print("[green]✓ Virtual scroll executed successfully![/green]")
console.print(f" - Content length: {len(result.markdown)} chars")
# Show example of how to use with real infinite scroll sites
console.print("\n[yellow]Example for real infinite scroll sites:[/yellow]")
console.print("""
# For Twitter-like feeds:
scroll_config = VirtualScrollConfig(
container_selector="[data-testid='primaryColumn']",
scroll_count=20,
scroll_by="container_height",
wait_after_scroll=1.0
)
# For Instagram-like grids:
scroll_config = VirtualScrollConfig(
container_selector="main article",
scroll_count=15,
scroll_by=1000, # Fixed pixel amount
wait_after_scroll=1.5
)""")
async def demo_3_link_preview():
"""Demo 3: Link Preview with 3-layer scoring"""
print_section(
"Demo 3: Link Preview & Scoring",
"Advanced link analysis with intrinsic, contextual, and total scoring"
)
# Configure link preview
link_config = LinkPreviewConfig(
include_internal=True,
include_external=False,
max_links=10,
concurrency=5,
query="python tutorial", # For contextual scoring
score_threshold=0.3,
verbose=True
)
config = CrawlerRunConfig(
link_preview_config=link_config,
score_links=True, # Enable intrinsic scoring
cache_mode=CacheMode.BYPASS
)
console.print("[cyan]Analyzing links with 3-layer scoring system...[/cyan]")
async with AsyncWebCrawler() as crawler:
result = await crawler.arun("https://docs.python.org/3/", config=config)
if result.success and result.links:
# Get scored links
internal_links = result.links.get("internal", [])
scored_links = [l for l in internal_links if l.get("total_score")]
scored_links.sort(key=lambda x: x.get("total_score", 0), reverse=True)
# Create a scoring table
table = Table(title="Link Scoring Results", box=box.ROUNDED)
table.add_column("Link Text", style="cyan", width=40)
table.add_column("Intrinsic Score", justify="center")
table.add_column("Contextual Score", justify="center")
table.add_column("Total Score", justify="center", style="bold green")
for link in scored_links[:5]:
text = link.get('text', 'No text')[:40]
table.add_row(
text,
f"{link.get('intrinsic_score', 0):.1f}/10",
f"{link.get('contextual_score', 0):.2f}/1",
f"{link.get('total_score', 0):.3f}"
)
console.print(table)
async def demo_4_url_seeder():
"""Demo 4: URL Seeder - Smart URL discovery"""
print_section(
"Demo 4: URL Seeder",
"Intelligent URL discovery and filtering"
)
# Configure seeding
seeding_config = SeedingConfig(
source="cc+sitemap", # or "crawl"
pattern="*tutorial*", # URL pattern filter
max_urls=50,
extract_head=True, # Get metadata
query="python programming", # For relevance scoring
scoring_method="bm25",
score_threshold=0.2,
force = True
)
console.print("[cyan]URL Seeder Configuration:[/cyan]")
console.print(f" - Source: {seeding_config.source}")
console.print(f" - Pattern: {seeding_config.pattern}")
console.print(f" - Max URLs: {seeding_config.max_urls}")
console.print(f" - Query: {seeding_config.query}")
console.print(f" - Scoring: {seeding_config.scoring_method}")
# Use URL seeder to discover URLs
async with AsyncUrlSeeder() as seeder:
console.print("\n[cyan]Discovering URLs from Python docs...[/cyan]")
urls = await seeder.urls("docs.python.org", seeding_config)
console.print(f"\n[green]✓ Discovered {len(urls)} URLs[/green]")
for i, url_info in enumerate(urls[:5], 1):
console.print(f" {i}. {url_info['url']}")
if url_info.get('relevance_score'):
console.print(f" Relevance: {url_info['relevance_score']:.3f}")
async def demo_5_c4a_script():
"""Demo 5: C4A Script - Domain-specific language"""
print_section(
"Demo 5: C4A Script Language",
"Domain-specific language for web automation"
)
# Example C4A script
c4a_script = """
# Simple C4A script example
WAIT `body` 3
IF (EXISTS `.cookie-banner`) THEN CLICK `.accept`
CLICK `.search-button`
TYPE "python tutorial"
PRESS Enter
WAIT `.results` 5
"""
console.print("[cyan]C4A Script Example:[/cyan]")
console.print(Panel(c4a_script, title="script.c4a", border_style="blue"))
# Compile the script
compilation_result = c4a_compile(c4a_script)
if compilation_result.success:
console.print("[green]✓ Script compiled successfully![/green]")
console.print(f" - Generated {len(compilation_result.js_code)} JavaScript statements")
console.print("\nFirst 3 JS statements:")
for stmt in compilation_result.js_code[:3]:
console.print(f" • {stmt}")
else:
console.print("[red]✗ Script compilation failed[/red]")
if compilation_result.first_error:
error = compilation_result.first_error
console.print(f" Error at line {error.line}: {error.message}")
async def demo_6_css_extraction():
"""Demo 6: Enhanced CSS/JSON extraction"""
print_section(
"Demo 6: Enhanced Extraction",
"Improved CSS selector and JSON extraction"
)
# Define extraction schema
schema = {
"name": "Example Page Data",
"baseSelector": "body",
"fields": [
{
"name": "title",
"selector": "h1",
"type": "text"
},
{
"name": "paragraphs",
"selector": "p",
"type": "list",
"fields": [
{"name": "text", "type": "text"}
]
}
]
}
extraction_strategy = JsonCssExtractionStrategy(schema)
console.print("[cyan]Extraction Schema:[/cyan]")
console.print(json.dumps(schema, indent=2))
async with AsyncWebCrawler() as crawler:
result = await crawler.arun(
"https://example.com",
config=CrawlerRunConfig(
extraction_strategy=extraction_strategy,
cache_mode=CacheMode.BYPASS
)
)
if result.success and result.extracted_content:
console.print("\n[green]✓ Content extracted successfully![/green]")
console.print(f"Extracted: {json.dumps(json.loads(result.extracted_content), indent=2)[:200]}...")
async def demo_7_performance_improvements():
"""Demo 7: Performance improvements"""
print_section(
"Demo 7: Performance Improvements",
"Faster crawling with better resource management"
)
# Performance-optimized configuration
config = CrawlerRunConfig(
cache_mode=CacheMode.ENABLED, # Use caching
wait_until="domcontentloaded", # Faster than networkidle
page_timeout=10000, # 10 second timeout
exclude_external_links=True,
exclude_social_media_links=True,
exclude_external_images=True
)
console.print("[cyan]Performance Configuration:[/cyan]")
console.print(" - Cache: ENABLED")
console.print(" - Wait: domcontentloaded (faster)")
console.print(" - Timeout: 10s")
console.print(" - Excluding: external links, images, social media")
# Measure performance
import time
start_time = time.time()
async with AsyncWebCrawler() as crawler:
result = await crawler.arun("https://example.com", config=config)
elapsed = time.time() - start_time
if result.success:
console.print(f"\n[green]✓ Page crawled in {elapsed:.2f} seconds[/green]")
async def main():
"""Run all demos"""
console.print(Panel(
"[bold cyan]Crawl4AI v0.7.0 Release Demo[/bold cyan]\n\n"
"This demo showcases all major features introduced in v0.7.0.\n"
"Each demo is self-contained and demonstrates a specific feature.",
title="Welcome",
border_style="blue"
))
demos = [
demo_1_adaptive_crawling,
demo_2_virtual_scroll,
demo_3_link_preview,
demo_4_url_seeder,
demo_5_c4a_script,
demo_6_css_extraction,
demo_7_performance_improvements
]
for i, demo in enumerate(demos, 1):
try:
await demo()
if i < len(demos):
console.print("\n[dim]Press Enter to continue to next demo...[/dim]")
input()
except Exception as e:
console.print(f"[red]Error in demo: {e}[/red]")
continue
console.print(Panel(
"[bold green]Demo Complete![/bold green]\n\n"
"Thank you for trying Crawl4AI v0.7.0!\n"
"For more examples and documentation, visit:\n"
"https://github.com/unclecode/crawl4ai",
title="Complete",
border_style="green"
))
if __name__ == "__main__":
asyncio.run(main()) | python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/releases_review/demo_v0.7.5.py | docs/releases_review/demo_v0.7.5.py | """
🚀 Crawl4AI v0.7.5 Release Demo - Working Examples
==================================================
This demo showcases key features introduced in v0.7.5 with real, executable examples.
Featured Demos:
1. ✅ Docker Hooks System - Real API calls with custom hooks (string & function-based)
2. ✅ Enhanced LLM Integration - Working LLM configurations
3. ✅ HTTPS Preservation - Live crawling with HTTPS maintenance
Requirements:
- crawl4ai v0.7.5 installed
- Docker running with crawl4ai image (optional for Docker demos)
- Valid API keys for LLM demos (optional)
"""
import asyncio
import requests
import time
import sys
from crawl4ai import (AsyncWebCrawler, CrawlerRunConfig, BrowserConfig,
CacheMode, FilterChain, URLPatternFilter, BFSDeepCrawlStrategy,
hooks_to_string)
from crawl4ai.docker_client import Crawl4aiDockerClient
def print_section(title: str, description: str = ""):
"""Print a section header"""
print(f"\n{'=' * 60}")
print(f"{title}")
if description:
print(f"{description}")
print(f"{'=' * 60}\n")
async def demo_1_docker_hooks_system():
"""Demo 1: Docker Hooks System - Real API calls with custom hooks"""
print_section(
"Demo 1: Docker Hooks System",
"Testing both string-based and function-based hooks (NEW in v0.7.5!)"
)
# Check Docker service availability
def check_docker_service():
try:
response = requests.get("http://localhost:11235/", timeout=3)
return response.status_code == 200
except:
return False
print("Checking Docker service...")
docker_running = check_docker_service()
if not docker_running:
print("⚠️ Docker service not running on localhost:11235")
print("To test Docker hooks:")
print("1. Run: docker run -p 11235:11235 unclecode/crawl4ai:latest")
print("2. Wait for service to start")
print("3. Re-run this demo\n")
return
print("✓ Docker service detected!")
# ============================================================================
# PART 1: Traditional String-Based Hooks (Works with REST API)
# ============================================================================
print("\n" + "─" * 60)
print("Part 1: String-Based Hooks (REST API)")
print("─" * 60)
hooks_config_string = {
"on_page_context_created": """
async def hook(page, context, **kwargs):
print("[String Hook] Setting up page context")
await context.route("**/*.{png,jpg,jpeg,gif,webp}", lambda route: route.abort())
return page
""",
"before_retrieve_html": """
async def hook(page, context, **kwargs):
print("[String Hook] Before retrieving HTML")
await page.evaluate("window.scrollTo(0, document.body.scrollHeight)")
await page.wait_for_timeout(1000)
return page
"""
}
payload = {
"urls": ["https://httpbin.org/html"],
"hooks": {
"code": hooks_config_string,
"timeout": 30
}
}
print("🔧 Using string-based hooks for REST API...")
try:
start_time = time.time()
response = requests.post("http://localhost:11235/crawl", json=payload, timeout=60)
execution_time = time.time() - start_time
if response.status_code == 200:
result = response.json()
print(f"✅ String-based hooks executed in {execution_time:.2f}s")
if result.get('results') and result['results'][0].get('success'):
html_length = len(result['results'][0].get('html', ''))
print(f" 📄 HTML length: {html_length} characters")
else:
print(f"❌ Request failed: {response.status_code}")
except Exception as e:
print(f"❌ Error: {str(e)}")
# ============================================================================
# PART 2: NEW Function-Based Hooks with Docker Client (v0.7.5)
# ============================================================================
print("\n" + "─" * 60)
print("Part 2: Function-Based Hooks with Docker Client (✨ NEW!)")
print("─" * 60)
# Define hooks as regular Python functions
async def on_page_context_created_func(page, context, **kwargs):
"""Block images to speed up crawling"""
print("[Function Hook] Setting up page context")
await context.route("**/*.{png,jpg,jpeg,gif,webp}", lambda route: route.abort())
await page.set_viewport_size({"width": 1920, "height": 1080})
return page
async def before_goto_func(page, context, url, **kwargs):
"""Add custom headers before navigation"""
print(f"[Function Hook] About to navigate to {url}")
await page.set_extra_http_headers({
'X-Crawl4AI': 'v0.7.5-function-hooks',
'X-Test-Header': 'demo'
})
return page
async def before_retrieve_html_func(page, context, **kwargs):
"""Scroll to load lazy content"""
print("[Function Hook] Scrolling page for lazy-loaded content")
await page.evaluate("window.scrollTo(0, document.body.scrollHeight)")
await page.wait_for_timeout(500)
await page.evaluate("window.scrollTo(0, 0)")
return page
# Use the hooks_to_string utility (can be used standalone)
print("\n📦 Converting functions to strings with hooks_to_string()...")
hooks_as_strings = hooks_to_string({
"on_page_context_created": on_page_context_created_func,
"before_goto": before_goto_func,
"before_retrieve_html": before_retrieve_html_func
})
print(f" ✓ Converted {len(hooks_as_strings)} hooks to string format")
# OR use Docker Client which does conversion automatically!
print("\n🐳 Using Docker Client with automatic conversion...")
try:
client = Crawl4aiDockerClient(base_url="http://localhost:11235")
# Pass function objects directly - conversion happens automatically!
results = await client.crawl(
urls=["https://httpbin.org/html"],
hooks={
"on_page_context_created": on_page_context_created_func,
"before_goto": before_goto_func,
"before_retrieve_html": before_retrieve_html_func
},
hooks_timeout=30
)
if results and results.success:
print(f"✅ Function-based hooks executed successfully!")
print(f" 📄 HTML length: {len(results.html)} characters")
print(f" 🎯 URL: {results.url}")
else:
print("⚠️ Crawl completed but may have warnings")
except Exception as e:
print(f"❌ Docker client error: {str(e)}")
# Show the benefits
print("\n" + "=" * 60)
print("✨ Benefits of Function-Based Hooks:")
print("=" * 60)
print("✓ Full IDE support (autocomplete, syntax highlighting)")
print("✓ Type checking and linting")
print("✓ Easier to test and debug")
print("✓ Reusable across projects")
print("✓ Automatic conversion in Docker client")
print("=" * 60)
async def demo_2_enhanced_llm_integration():
"""Demo 2: Enhanced LLM Integration - Working LLM configurations"""
print_section(
"Demo 2: Enhanced LLM Integration",
"Testing custom LLM providers and configurations"
)
print("🤖 Testing Enhanced LLM Integration Features")
provider = "gemini/gemini-2.5-flash-lite"
payload = {
"url": "https://example.com",
"f": "llm",
"q": "Summarize this page in one sentence.",
"provider": provider, # Explicitly set provider
"temperature": 0.7
}
try:
response = requests.post(
"http://localhost:11235/md",
json=payload,
timeout=60
)
if response.status_code == 200:
result = response.json()
print(f"✓ Request successful with provider: {provider}")
print(f" - Response keys: {list(result.keys())}")
print(f" - Content length: {len(result.get('markdown', ''))} characters")
print(f" - Note: Actual LLM call may fail without valid API key")
else:
print(f"❌ Request failed: {response.status_code}")
print(f" - Response: {response.text[:500]}")
except Exception as e:
print(f"[red]Error: {e}[/]")
async def demo_3_https_preservation():
"""Demo 3: HTTPS Preservation - Live crawling with HTTPS maintenance"""
print_section(
"Demo 3: HTTPS Preservation",
"Testing HTTPS preservation for internal links"
)
print("🔒 Testing HTTPS Preservation Feature")
# Test with HTTPS preservation enabled
print("\nTest 1: HTTPS Preservation ENABLED")
url_filter = URLPatternFilter(
patterns=["^(https:\/\/)?quotes\.toscrape\.com(\/.*)?$"]
)
config = CrawlerRunConfig(
exclude_external_links=True,
stream=True,
verbose=False,
preserve_https_for_internal_links=True,
deep_crawl_strategy=BFSDeepCrawlStrategy(
max_depth=2,
max_pages=5,
filter_chain=FilterChain([url_filter])
)
)
test_url = "https://quotes.toscrape.com"
print(f"🎯 Testing URL: {test_url}")
async with AsyncWebCrawler() as crawler:
async for result in await crawler.arun(url=test_url, config=config):
print("✓ HTTPS Preservation Test Completed")
internal_links = [i['href'] for i in result.links['internal']]
for link in internal_links:
print(f" → {link}")
async def main():
"""Run all demos"""
print("\n" + "=" * 60)
print("🚀 Crawl4AI v0.7.5 Working Demo")
print("=" * 60)
# Check system requirements
print("🔍 System Requirements Check:")
print(f" - Python version: {sys.version.split()[0]} {'✓' if sys.version_info >= (3, 10) else '❌ (3.10+ required)'}")
try:
import requests
print(f" - Requests library: ✓")
except ImportError:
print(f" - Requests library: ❌")
print()
demos = [
("Docker Hooks System", demo_1_docker_hooks_system),
("Enhanced LLM Integration", demo_2_enhanced_llm_integration),
("HTTPS Preservation", demo_3_https_preservation),
]
for i, (name, demo_func) in enumerate(demos, 1):
try:
print(f"\n📍 Starting Demo {i}/{len(demos)}: {name}")
await demo_func()
if i < len(demos):
print(f"\n✨ Demo {i} complete! Press Enter for next demo...")
input()
except KeyboardInterrupt:
print(f"\n⏹️ Demo interrupted by user")
break
except Exception as e:
print(f"❌ Demo {i} error: {str(e)}")
print("Continuing to next demo...")
continue
print("\n" + "=" * 60)
print("🎉 Demo Complete!")
print("=" * 60)
print("You've experienced the power of Crawl4AI v0.7.5!")
print("")
print("Key Features Demonstrated:")
print("🔧 Docker Hooks - String-based & function-based (NEW!)")
print(" • hooks_to_string() utility for function conversion")
print(" • Docker client with automatic conversion")
print(" • Full IDE support and type checking")
print("🤖 Enhanced LLM - Better AI integration")
print("🔒 HTTPS Preservation - Secure link handling")
print("")
print("Ready to build something amazing? 🚀")
print("")
print("📖 Docs: https://docs.crawl4ai.com/")
print("🐙 GitHub: https://github.com/unclecode/crawl4ai")
print("=" * 60)
if __name__ == "__main__":
print("🚀 Crawl4AI v0.7.5 Live Demo Starting...")
print("Press Ctrl+C anytime to exit\n")
try:
asyncio.run(main())
except KeyboardInterrupt:
print("\n👋 Demo stopped by user. Thanks for trying Crawl4AI v0.7.5!")
except Exception as e:
print(f"\n❌ Demo error: {str(e)}")
print("Make sure you have the required dependencies installed.")
| python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/releases_review/v0.3.74.overview.py | docs/releases_review/v0.3.74.overview.py | import os, sys
# append the parent directory to the sys.path
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(parent_dir)
parent_parent_dir = os.path.dirname(parent_dir)
sys.path.append(parent_parent_dir)
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
__data__ = os.path.join(__location__, "__data")
import asyncio
from pathlib import Path
import aiohttp
import json
from crawl4ai import AsyncWebCrawler, CacheMode
from crawl4ai.content_filter_strategy import BM25ContentFilter
# 1. File Download Processing Example
async def download_example():
"""Example of downloading files from Python.org"""
# downloads_path = os.path.join(os.getcwd(), "downloads")
downloads_path = os.path.join(Path.home(), ".crawl4ai", "downloads")
os.makedirs(downloads_path, exist_ok=True)
print(f"Downloads will be saved to: {downloads_path}")
async with AsyncWebCrawler(
accept_downloads=True, downloads_path=downloads_path, verbose=True
) as crawler:
result = await crawler.arun(
url="https://www.python.org/downloads/",
js_code="""
// Find and click the first Windows installer link
const downloadLink = document.querySelector('a[href$=".exe"]');
if (downloadLink) {
console.log('Found download link:', downloadLink.href);
downloadLink.click();
} else {
console.log('No .exe download link found');
}
""",
delay_before_return_html=1, # Wait 5 seconds to ensure download starts
cache_mode=CacheMode.BYPASS,
)
if result.downloaded_files:
print("\nDownload successful!")
print("Downloaded files:")
for file_path in result.downloaded_files:
print(f"- {file_path}")
print(f" File size: {os.path.getsize(file_path) / (1024*1024):.2f} MB")
else:
print("\nNo files were downloaded")
# 2. Local File and Raw HTML Processing Example
async def local_and_raw_html_example():
"""Example of processing local files and raw HTML"""
# Create a sample HTML file
sample_file = os.path.join(__data__, "sample.html")
with open(sample_file, "w") as f:
f.write(
"""
<html><body>
<h1>Test Content</h1>
<p>This is a test paragraph.</p>
</body></html>
"""
)
async with AsyncWebCrawler(verbose=True) as crawler:
# Process local file
local_result = await crawler.arun(url=f"file://{os.path.abspath(sample_file)}")
# Process raw HTML
raw_html = """
<html><body>
<h1>Raw HTML Test</h1>
<p>This is a test of raw HTML processing.</p>
</body></html>
"""
raw_result = await crawler.arun(url=f"raw:{raw_html}")
# Clean up
os.remove(sample_file)
print("Local file content:", local_result.markdown)
print("\nRaw HTML content:", raw_result.markdown)
# 3. Enhanced Markdown Generation Example
async def markdown_generation_example():
"""Example of enhanced markdown generation with citations and LLM-friendly features"""
async with AsyncWebCrawler(verbose=True) as crawler:
# Create a content filter (optional)
content_filter = BM25ContentFilter(
# user_query="History and cultivation",
bm25_threshold=1.0
)
result = await crawler.arun(
url="https://en.wikipedia.org/wiki/Apple",
css_selector="main div#bodyContent",
content_filter=content_filter,
cache_mode=CacheMode.BYPASS,
)
from crawl4ai.content_filter_strategy import BM25ContentFilter
result = await crawler.arun(
url="https://en.wikipedia.org/wiki/Apple",
css_selector="main div#bodyContent",
content_filter=BM25ContentFilter(),
)
print(result.markdown_v2.fit_markdown)
print("\nMarkdown Generation Results:")
print(f"1. Original markdown length: {len(result.markdown)}")
print("2. New markdown versions (markdown_v2):")
print(f" - Raw markdown length: {len(result.markdown_v2.raw_markdown)}")
print(
f" - Citations markdown length: {len(result.markdown_v2.markdown_with_citations)}"
)
print(
f" - References section length: {len(result.markdown_v2.references_markdown)}"
)
if result.markdown_v2.fit_markdown:
print(
f" - Filtered markdown length: {len(result.markdown_v2.fit_markdown)}"
)
# Save examples to files
output_dir = os.path.join(__data__, "markdown_examples")
os.makedirs(output_dir, exist_ok=True)
# Save different versions
with open(os.path.join(output_dir, "1_raw_markdown.md"), "w") as f:
f.write(result.markdown_v2.raw_markdown)
with open(os.path.join(output_dir, "2_citations_markdown.md"), "w") as f:
f.write(result.markdown_v2.markdown_with_citations)
with open(os.path.join(output_dir, "3_references.md"), "w") as f:
f.write(result.markdown_v2.references_markdown)
if result.markdown_v2.fit_markdown:
with open(os.path.join(output_dir, "4_filtered_markdown.md"), "w") as f:
f.write(result.markdown_v2.fit_markdown)
print(f"\nMarkdown examples saved to: {output_dir}")
# Show a sample of citations and references
print("\nSample of markdown with citations:")
print(result.markdown_v2.markdown_with_citations[:500] + "...\n")
print("Sample of references:")
print(
"\n".join(result.markdown_v2.references_markdown.split("\n")[:10]) + "..."
)
# 4. Browser Management Example
async def browser_management_example():
"""Example of using enhanced browser management features"""
# Use the specified user directory path
user_data_dir = os.path.join(Path.home(), ".crawl4ai", "browser_profile")
os.makedirs(user_data_dir, exist_ok=True)
print(f"Browser profile will be saved to: {user_data_dir}")
async with AsyncWebCrawler(
use_managed_browser=True,
user_data_dir=user_data_dir,
headless=False,
verbose=True,
) as crawler:
result = await crawler.arun(
url="https://crawl4ai.com",
# session_id="persistent_session_1",
cache_mode=CacheMode.BYPASS,
)
# Use GitHub as an example - it's a good test for browser management
# because it requires proper browser handling
result = await crawler.arun(
url="https://github.com/trending",
# session_id="persistent_session_1",
cache_mode=CacheMode.BYPASS,
)
print("\nBrowser session result:", result.success)
if result.success:
print("Page title:", result.metadata.get("title", "No title found"))
# 5. API Usage Example
async def api_example():
"""Example of using the new API endpoints"""
api_token = os.getenv("CRAWL4AI_API_TOKEN") or "test_api_code"
headers = {"Authorization": f"Bearer {api_token}"}
async with aiohttp.ClientSession() as session:
# Submit crawl job
crawl_request = {
"urls": ["https://news.ycombinator.com"], # Hacker News as an example
"extraction_config": {
"type": "json_css",
"params": {
"schema": {
"name": "Hacker News Articles",
"baseSelector": ".athing",
"fields": [
{"name": "title", "selector": ".title a", "type": "text"},
{"name": "score", "selector": ".score", "type": "text"},
{
"name": "url",
"selector": ".title a",
"type": "attribute",
"attribute": "href",
},
],
}
},
},
"crawler_params": {
"headless": True,
# "use_managed_browser": True
},
"cache_mode": "bypass",
# "screenshot": True,
# "magic": True
}
async with session.post(
"http://localhost:11235/crawl", json=crawl_request, headers=headers
) as response:
task_data = await response.json()
task_id = task_data["task_id"]
# Check task status
while True:
async with session.get(
f"http://localhost:11235/task/{task_id}", headers=headers
) as status_response:
result = await status_response.json()
print(f"Task status: {result['status']}")
if result["status"] == "completed":
print("Task completed!")
print("Results:")
news = json.loads(result["results"][0]["extracted_content"])
print(json.dumps(news[:4], indent=2))
break
else:
await asyncio.sleep(1)
# Main execution
async def main():
# print("Running Crawl4AI feature examples...")
# print("\n1. Running Download Example:")
# await download_example()
# print("\n2. Running Markdown Generation Example:")
# await markdown_generation_example()
# # print("\n3. Running Local and Raw HTML Example:")
# await local_and_raw_html_example()
# # print("\n4. Running Browser Management Example:")
await browser_management_example()
# print("\n5. Running API Example:")
await api_example()
if __name__ == "__main__":
asyncio.run(main())
| python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/releases_review/v0_7_0_features_demo.py | docs/releases_review/v0_7_0_features_demo.py | """
🚀 Crawl4AI v0.7.0 Feature Demo
================================
This file demonstrates the major features introduced in v0.7.0 with practical examples.
"""
import asyncio
import json
from pathlib import Path
from crawl4ai import (
AsyncWebCrawler,
CrawlerRunConfig,
BrowserConfig,
CacheMode,
# New imports for v0.7.0
VirtualScrollConfig,
LinkPreviewConfig,
AdaptiveCrawler,
AdaptiveConfig,
AsyncUrlSeeder,
SeedingConfig,
c4a_compile,
)
async def demo_link_preview():
"""
Demo 1: Link Preview with 3-Layer Scoring
Shows how to analyze links with intrinsic quality scores,
contextual relevance, and combined total scores.
"""
print("\n" + "="*60)
print("🔗 DEMO 1: Link Preview & Intelligent Scoring")
print("="*60)
# Configure link preview with contextual scoring
config = CrawlerRunConfig(
link_preview_config=LinkPreviewConfig(
include_internal=True,
include_external=False,
max_links=10,
concurrency=5,
query="machine learning tutorials", # For contextual scoring
score_threshold=0.3, # Minimum relevance
verbose=True
),
score_links=True, # Enable intrinsic scoring
cache_mode=CacheMode.BYPASS
)
async with AsyncWebCrawler() as crawler:
result = await crawler.arun("https://scikit-learn.org/stable/", config=config)
if result.success:
# Get scored links
internal_links = result.links.get("internal", [])
scored_links = [l for l in internal_links if l.get("total_score")]
scored_links.sort(key=lambda x: x.get("total_score", 0), reverse=True)
print(f"\nTop 5 Most Relevant Links:")
for i, link in enumerate(scored_links[:5], 1):
print(f"\n{i}. {link.get('text', 'No text')[:50]}...")
print(f" URL: {link['href']}")
print(f" Intrinsic Score: {link.get('intrinsic_score', 0):.2f}/10")
print(f" Contextual Score: {link.get('contextual_score', 0):.3f}")
print(f" Total Score: {link.get('total_score', 0):.3f}")
# Show metadata if available
if link.get('head_data'):
title = link['head_data'].get('title', 'No title')
print(f" Title: {title[:60]}...")
async def demo_adaptive_crawling():
"""
Demo 2: Adaptive Crawling
Shows intelligent crawling that stops when enough information
is gathered, with confidence tracking.
"""
print("\n" + "="*60)
print("🎯 DEMO 2: Adaptive Crawling with Confidence Tracking")
print("="*60)
# Configure adaptive crawler
config = AdaptiveConfig(
strategy="statistical", # or "embedding" for semantic understanding
max_pages=10,
confidence_threshold=0.7, # Stop at 70% confidence
top_k_links=3, # Follow top 3 links per page
min_gain_threshold=0.05 # Need 5% information gain to continue
)
async with AsyncWebCrawler(verbose=False) as crawler:
adaptive = AdaptiveCrawler(crawler, config)
print("Starting adaptive crawl about Python decorators...")
result = await adaptive.digest(
start_url="https://docs.python.org/3/glossary.html",
query="python decorators functions wrapping"
)
print(f"\n✅ Crawling Complete!")
print(f"• Confidence Level: {adaptive.confidence:.0%}")
print(f"• Pages Crawled: {len(result.crawled_urls)}")
print(f"• Knowledge Base: {len(adaptive.state.knowledge_base)} documents")
# Get most relevant content
relevant = adaptive.get_relevant_content(top_k=3)
print(f"\nMost Relevant Pages:")
for i, page in enumerate(relevant, 1):
print(f"{i}. {page['url']} (relevance: {page['score']:.2%})")
async def demo_virtual_scroll():
"""
Demo 3: Virtual Scroll for Modern Web Pages
Shows how to capture content from pages with DOM recycling
(Twitter, Instagram, infinite scroll).
"""
print("\n" + "="*60)
print("📜 DEMO 3: Virtual Scroll Support")
print("="*60)
# Configure virtual scroll for a news site
virtual_config = VirtualScrollConfig(
container_selector="main, article, .content", # Common containers
scroll_count=20, # Scroll up to 20 times
scroll_by="container_height", # Scroll by container height
wait_after_scroll=0.5 # Wait 500ms after each scroll
)
config = CrawlerRunConfig(
virtual_scroll_config=virtual_config,
cache_mode=CacheMode.BYPASS,
wait_for="css:article" # Wait for articles to load
)
# Example with a real news site
async with AsyncWebCrawler() as crawler:
result = await crawler.arun(
"https://news.ycombinator.com/",
config=config
)
if result.success:
# Count items captured
import re
items = len(re.findall(r'class="athing"', result.html))
print(f"\n✅ Captured {items} news items")
print(f"• HTML size: {len(result.html):,} bytes")
print(f"• Without virtual scroll, would only capture ~30 items")
async def demo_url_seeder():
"""
Demo 4: URL Seeder for Intelligent Discovery
Shows how to discover and filter URLs before crawling,
with relevance scoring.
"""
print("\n" + "="*60)
print("🌱 DEMO 4: URL Seeder - Smart URL Discovery")
print("="*60)
async with AsyncUrlSeeder() as seeder:
# Discover Python tutorial URLs
config = SeedingConfig(
source="sitemap", # Use sitemap
pattern="*python*", # URL pattern filter
extract_head=True, # Get metadata
query="python tutorial", # For relevance scoring
scoring_method="bm25",
score_threshold=0.2,
max_urls=10
)
print("Discovering Python async tutorial URLs...")
urls = await seeder.urls("https://www.geeksforgeeks.org/", config)
print(f"\n✅ Found {len(urls)} relevant URLs:")
for i, url_info in enumerate(urls[:5], 1):
print(f"\n{i}. {url_info['url']}")
if url_info.get('relevance_score'):
print(f" Relevance: {url_info['relevance_score']:.3f}")
if url_info.get('head_data', {}).get('title'):
print(f" Title: {url_info['head_data']['title'][:60]}...")
async def demo_c4a_script():
"""
Demo 5: C4A Script Language
Shows the domain-specific language for web automation
with JavaScript transpilation.
"""
print("\n" + "="*60)
print("🎭 DEMO 5: C4A Script - Web Automation Language")
print("="*60)
# Example C4A script
c4a_script = """
# E-commerce automation script
WAIT `body` 3
# Handle cookie banner
IF (EXISTS `.cookie-banner`) THEN CLICK `.accept-cookies`
# Search for product
CLICK `.search-box`
TYPE "wireless headphones"
PRESS Enter
# Wait for results
WAIT `.product-grid` 10
# Load more products
REPEAT (SCROLL DOWN 500, `document.querySelectorAll('.product').length < 50`)
# Apply filter
IF (EXISTS `.price-filter`) THEN CLICK `input[data-max-price="100"]`
"""
# Compile the script
print("Compiling C4A script...")
result = c4a_compile(c4a_script)
if result.success:
print(f"✅ Successfully compiled to {len(result.js_code)} JavaScript statements!")
print("\nFirst 3 JS statements:")
for stmt in result.js_code[:3]:
print(f" • {stmt}")
# Use with crawler
config = CrawlerRunConfig(
c4a_script=c4a_script, # Pass C4A script directly
cache_mode=CacheMode.BYPASS
)
print("\n✅ Script ready for use with AsyncWebCrawler!")
else:
print(f"❌ Compilation error: {result.first_error.message}")
async def main():
"""Run all demos"""
print("\n🚀 Crawl4AI v0.7.0 Feature Demonstrations")
print("=" * 60)
demos = [
("Link Preview & Scoring", demo_link_preview),
("Adaptive Crawling", demo_adaptive_crawling),
("Virtual Scroll", demo_virtual_scroll),
("URL Seeder", demo_url_seeder),
("C4A Script", demo_c4a_script),
]
for name, demo_func in demos:
try:
await demo_func()
except Exception as e:
print(f"\n❌ Error in {name} demo: {str(e)}")
# Pause between demos
await asyncio.sleep(1)
print("\n" + "="*60)
print("✅ All demos completed!")
print("\nKey Takeaways:")
print("• Link Preview: 3-layer scoring for intelligent link analysis")
print("• Adaptive Crawling: Stop when you have enough information")
print("• Virtual Scroll: Capture all content from modern web pages")
print("• URL Seeder: Pre-discover and filter URLs efficiently")
print("• C4A Script: Simple language for complex automations")
if __name__ == "__main__":
asyncio.run(main()) | python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/releases_review/v0.7.5_docker_hooks_demo.py | docs/releases_review/v0.7.5_docker_hooks_demo.py | #!/usr/bin/env python3
"""
🚀 Crawl4AI v0.7.5 - Docker Hooks System Complete Demonstration
================================================================
This file demonstrates the NEW Docker Hooks System introduced in v0.7.5.
The Docker Hooks System is a completely NEW feature that provides pipeline
customization through user-provided Python functions. It offers three approaches:
1. String-based hooks for REST API
2. hooks_to_string() utility to convert functions
3. Docker Client with automatic conversion (most convenient)
All three approaches are part of this NEW v0.7.5 feature!
Perfect for video recording and demonstration purposes.
Requirements:
- Docker container running: docker run -p 11235:11235 unclecode/crawl4ai:latest
- crawl4ai v0.7.5 installed: pip install crawl4ai==0.7.5
"""
import asyncio
import requests
import json
import time
from typing import Dict, Any
# Import Crawl4AI components
from crawl4ai import hooks_to_string
from crawl4ai.docker_client import Crawl4aiDockerClient
# Configuration
DOCKER_URL = "http://localhost:11235"
# DOCKER_URL = "http://localhost:11234"
TEST_URLS = [
# "https://httpbin.org/html",
"https://www.kidocode.com",
"https://quotes.toscrape.com",
]
def print_section(title: str, description: str = ""):
"""Print a formatted section header"""
print("\n" + "=" * 70)
print(f" {title}")
if description:
print(f" {description}")
print("=" * 70 + "\n")
def check_docker_service() -> bool:
"""Check if Docker service is running"""
try:
response = requests.get(f"{DOCKER_URL}/health", timeout=3)
return response.status_code == 200
except:
return False
# ============================================================================
# REUSABLE HOOK LIBRARY (NEW in v0.7.5)
# ============================================================================
async def performance_optimization_hook(page, context, **kwargs):
"""
Performance Hook: Block unnecessary resources to speed up crawling
"""
print(" [Hook] 🚀 Optimizing performance - blocking images and ads...")
# Block images
await context.route(
"**/*.{png,jpg,jpeg,gif,webp,svg,ico}",
lambda route: route.abort()
)
# Block ads and analytics
await context.route("**/analytics/*", lambda route: route.abort())
await context.route("**/ads/*", lambda route: route.abort())
await context.route("**/google-analytics.com/*", lambda route: route.abort())
print(" [Hook] ✓ Performance optimization applied")
return page
async def viewport_setup_hook(page, context, **kwargs):
"""
Viewport Hook: Set consistent viewport size for rendering
"""
print(" [Hook] 🖥️ Setting viewport to 1920x1080...")
await page.set_viewport_size({"width": 1920, "height": 1080})
print(" [Hook] ✓ Viewport configured")
return page
async def authentication_headers_hook(page, context, url, **kwargs):
"""
Headers Hook: Add custom authentication and tracking headers
"""
print(f" [Hook] 🔐 Adding custom headers for {url[:50]}...")
await page.set_extra_http_headers({
'X-Crawl4AI-Version': '0.7.5',
'X-Custom-Hook': 'function-based-demo',
'Accept-Language': 'en-US,en;q=0.9',
'User-Agent': 'Crawl4AI/0.7.5 (Educational Demo)'
})
print(" [Hook] ✓ Custom headers added")
return page
async def lazy_loading_handler_hook(page, context, **kwargs):
"""
Content Hook: Handle lazy-loaded content by scrolling
"""
print(" [Hook] 📜 Scrolling to load lazy content...")
# Scroll to bottom
await page.evaluate("window.scrollTo(0, document.body.scrollHeight)")
await page.wait_for_timeout(1000)
# Scroll to middle
await page.evaluate("window.scrollTo(0, document.body.scrollHeight / 2)")
await page.wait_for_timeout(500)
# Scroll back to top
await page.evaluate("window.scrollTo(0, 0)")
await page.wait_for_timeout(500)
print(" [Hook] ✓ Lazy content loaded")
return page
async def page_analytics_hook(page, context, **kwargs):
"""
Analytics Hook: Log page metrics before extraction
"""
print(" [Hook] 📊 Collecting page analytics...")
metrics = await page.evaluate('''
() => ({
title: document.title,
images: document.images.length,
links: document.links.length,
scripts: document.scripts.length,
headings: document.querySelectorAll('h1, h2, h3').length,
paragraphs: document.querySelectorAll('p').length
})
''')
print(f" [Hook] 📈 Page: {metrics['title'][:50]}...")
print(f" Links: {metrics['links']}, Images: {metrics['images']}, "
f"Headings: {metrics['headings']}, Paragraphs: {metrics['paragraphs']}")
return page
# ============================================================================
# DEMO 1: String-Based Hooks (NEW Docker Hooks System)
# ============================================================================
def demo_1_string_based_hooks():
"""
Demonstrate string-based hooks with REST API (part of NEW Docker Hooks System)
"""
print_section(
"DEMO 1: String-Based Hooks (REST API)",
"Part of the NEW Docker Hooks System - hooks as strings"
)
# Define hooks as strings
hooks_config = {
"on_page_context_created": """
async def hook(page, context, **kwargs):
print(" [String Hook] Setting up page context...")
# Block images for performance
await context.route("**/*.{png,jpg,jpeg,gif,webp}", lambda route: route.abort())
await page.set_viewport_size({"width": 1920, "height": 1080})
return page
""",
"before_goto": """
async def hook(page, context, url, **kwargs):
print(f" [String Hook] Navigating to {url[:50]}...")
await page.set_extra_http_headers({
'X-Crawl4AI': 'string-based-hooks',
'X-Demo': 'v0.7.5'
})
return page
""",
"before_retrieve_html": """
async def hook(page, context, **kwargs):
print(" [String Hook] Scrolling page...")
await page.evaluate("window.scrollTo(0, document.body.scrollHeight)")
await page.wait_for_timeout(1000)
return page
"""
}
# Prepare request payload
payload = {
"urls": [TEST_URLS[0]],
"hooks": {
"code": hooks_config,
"timeout": 30
},
"crawler_config": {
"cache_mode": "bypass"
}
}
print(f"🎯 Target URL: {TEST_URLS[0]}")
print(f"🔧 Configured {len(hooks_config)} string-based hooks")
print(f"📡 Sending request to Docker API...\n")
try:
start_time = time.time()
response = requests.post(f"{DOCKER_URL}/crawl", json=payload, timeout=60)
execution_time = time.time() - start_time
if response.status_code == 200:
result = response.json()
print(f"\n✅ Request successful! (took {execution_time:.2f}s)")
# Display results
if result.get('results') and result['results'][0].get('success'):
crawl_result = result['results'][0]
html_length = len(crawl_result.get('html', ''))
markdown_length = len(crawl_result.get('markdown', ''))
print(f"\n📊 Results:")
print(f" • HTML length: {html_length:,} characters")
print(f" • Markdown length: {markdown_length:,} characters")
print(f" • URL: {crawl_result.get('url')}")
# Check hooks execution
if 'hooks' in result:
hooks_info = result['hooks']
print(f"\n🎣 Hooks Execution:")
print(f" • Status: {hooks_info['status']['status']}")
print(f" • Attached hooks: {len(hooks_info['status']['attached_hooks'])}")
if 'summary' in hooks_info:
summary = hooks_info['summary']
print(f" • Total executions: {summary['total_executions']}")
print(f" • Successful: {summary['successful']}")
print(f" • Success rate: {summary['success_rate']:.1f}%")
else:
print(f"⚠️ Crawl completed but no results")
else:
print(f"❌ Request failed with status {response.status_code}")
print(f" Error: {response.text[:200]}")
except requests.exceptions.Timeout:
print("⏰ Request timed out after 60 seconds")
except Exception as e:
print(f"❌ Error: {str(e)}")
print("\n" + "─" * 70)
print("✓ String-based hooks demo complete\n")
# ============================================================================
# DEMO 2: Function-Based Hooks with hooks_to_string() Utility
# ============================================================================
def demo_2_hooks_to_string_utility():
"""
Demonstrate the new hooks_to_string() utility for converting functions
"""
print_section(
"DEMO 2: hooks_to_string() Utility (NEW! ✨)",
"Convert Python functions to strings for REST API"
)
print("📦 Creating hook functions...")
print(" • performance_optimization_hook")
print(" • viewport_setup_hook")
print(" • authentication_headers_hook")
print(" • lazy_loading_handler_hook")
# Convert function objects to strings using the NEW utility
print("\n🔄 Converting functions to strings with hooks_to_string()...")
hooks_dict = {
"on_page_context_created": performance_optimization_hook,
"before_goto": authentication_headers_hook,
"before_retrieve_html": lazy_loading_handler_hook,
}
hooks_as_strings = hooks_to_string(hooks_dict)
print(f"✅ Successfully converted {len(hooks_as_strings)} functions to strings")
# Show a preview
print("\n📝 Sample converted hook (first 250 characters):")
print("─" * 70)
sample_hook = list(hooks_as_strings.values())[0]
print(sample_hook[:250] + "...")
print("─" * 70)
# Use the converted hooks with REST API
print("\n📡 Using converted hooks with REST API...")
payload = {
"urls": [TEST_URLS[0]],
"hooks": {
"code": hooks_as_strings,
"timeout": 30
}
}
try:
start_time = time.time()
response = requests.post(f"{DOCKER_URL}/crawl", json=payload, timeout=60)
execution_time = time.time() - start_time
if response.status_code == 200:
result = response.json()
print(f"\n✅ Request successful! (took {execution_time:.2f}s)")
if result.get('results') and result['results'][0].get('success'):
crawl_result = result['results'][0]
print(f" • HTML length: {len(crawl_result.get('html', '')):,} characters")
print(f" • Hooks executed successfully!")
else:
print(f"❌ Request failed: {response.status_code}")
except Exception as e:
print(f"❌ Error: {str(e)}")
print("\n💡 Benefits of hooks_to_string():")
print(" ✓ Write hooks as regular Python functions")
print(" ✓ Full IDE support (autocomplete, syntax highlighting)")
print(" ✓ Type checking and linting")
print(" ✓ Easy to test and debug")
print(" ✓ Reusable across projects")
print(" ✓ Works with any REST API client")
print("\n" + "─" * 70)
print("✓ hooks_to_string() utility demo complete\n")
# ============================================================================
# DEMO 3: Docker Client with Automatic Conversion (RECOMMENDED! 🌟)
# ============================================================================
async def demo_3_docker_client_auto_conversion():
"""
Demonstrate Docker Client with automatic hook conversion (RECOMMENDED)
"""
print_section(
"DEMO 3: Docker Client with Auto-Conversion (RECOMMENDED! 🌟)",
"Pass function objects directly - conversion happens automatically!"
)
print("🐳 Initializing Crawl4AI Docker Client...")
client = Crawl4aiDockerClient(base_url=DOCKER_URL)
print("✅ Client ready!\n")
# Use our reusable hook library - just pass the function objects!
print("📚 Using reusable hook library:")
print(" • performance_optimization_hook")
print(" • viewport_setup_hook")
print(" • authentication_headers_hook")
print(" • lazy_loading_handler_hook")
print(" • page_analytics_hook")
print("\n🎯 Target URL: " + TEST_URLS[1])
print("🚀 Starting crawl with automatic hook conversion...\n")
try:
start_time = time.time()
# Pass function objects directly - NO manual conversion needed! ✨
results = await client.crawl(
urls=[TEST_URLS[0]],
hooks={
"on_page_context_created": performance_optimization_hook,
"before_goto": authentication_headers_hook,
"before_retrieve_html": lazy_loading_handler_hook,
"before_return_html": page_analytics_hook,
},
hooks_timeout=30
)
execution_time = time.time() - start_time
print(f"\n✅ Crawl completed! (took {execution_time:.2f}s)\n")
# Display results
if results and results.success:
result = results
print(f"📊 Results:")
print(f" • URL: {result.url}")
print(f" • Success: {result.success}")
print(f" • HTML length: {len(result.html):,} characters")
print(f" • Markdown length: {len(result.markdown):,} characters")
# Show metadata
if result.metadata:
print(f"\n📋 Metadata:")
print(f" • Title: {result.metadata.get('title', 'N/A')}")
print(f" • Description: {result.metadata.get('description', 'N/A')}")
# Show links
if result.links:
internal_count = len(result.links.get('internal', []))
external_count = len(result.links.get('external', []))
print(f"\n🔗 Links Found:")
print(f" • Internal: {internal_count}")
print(f" • External: {external_count}")
else:
print(f"⚠️ Crawl completed but no successful results")
if results:
print(f" Error: {results.error_message}")
except Exception as e:
print(f"❌ Error: {str(e)}")
import traceback
traceback.print_exc()
print("\n🌟 Why Docker Client is RECOMMENDED:")
print(" ✓ Automatic function-to-string conversion")
print(" ✓ No manual hooks_to_string() calls needed")
print(" ✓ Cleaner, more Pythonic code")
print(" ✓ Full type hints and IDE support")
print(" ✓ Built-in error handling")
print(" ✓ Async/await support")
print("\n" + "─" * 70)
print("✓ Docker Client auto-conversion demo complete\n")
# ============================================================================
# DEMO 4: Advanced Use Case - Complete Hook Pipeline
# ============================================================================
async def demo_4_complete_hook_pipeline():
"""
Demonstrate a complete hook pipeline using all 8 hook points
"""
print_section(
"DEMO 4: Complete Hook Pipeline",
"Using all 8 available hook points for comprehensive control"
)
# Define all 8 hooks
async def on_browser_created_hook(browser, **kwargs):
"""Hook 1: Called after browser is created"""
print(" [Pipeline] 1/8 Browser created")
return browser
async def on_page_context_created_hook(page, context, **kwargs):
"""Hook 2: Called after page context is created"""
print(" [Pipeline] 2/8 Page context created - setting up...")
await page.set_viewport_size({"width": 1920, "height": 1080})
return page
async def on_user_agent_updated_hook(page, context, user_agent, **kwargs):
"""Hook 3: Called when user agent is updated"""
print(f" [Pipeline] 3/8 User agent updated: {user_agent[:50]}...")
return page
async def before_goto_hook(page, context, url, **kwargs):
"""Hook 4: Called before navigating to URL"""
print(f" [Pipeline] 4/8 Before navigation to: {url[:60]}...")
return page
async def after_goto_hook(page, context, url, response, **kwargs):
"""Hook 5: Called after navigation completes"""
print(f" [Pipeline] 5/8 After navigation - Status: {response.status if response else 'N/A'}")
await page.wait_for_timeout(1000)
return page
async def on_execution_started_hook(page, context, **kwargs):
"""Hook 6: Called when JavaScript execution starts"""
print(" [Pipeline] 6/8 JavaScript execution started")
return page
async def before_retrieve_html_hook(page, context, **kwargs):
"""Hook 7: Called before retrieving HTML"""
print(" [Pipeline] 7/8 Before HTML retrieval - scrolling...")
await page.evaluate("window.scrollTo(0, document.body.scrollHeight)")
return page
async def before_return_html_hook(page, context, html, **kwargs):
"""Hook 8: Called before returning HTML"""
print(f" [Pipeline] 8/8 Before return - HTML length: {len(html):,} chars")
return page
print("🎯 Target URL: " + TEST_URLS[0])
print("🔧 Configured ALL 8 hook points for complete pipeline control\n")
client = Crawl4aiDockerClient(base_url=DOCKER_URL)
try:
print("🚀 Starting complete pipeline crawl...\n")
start_time = time.time()
results = await client.crawl(
urls=[TEST_URLS[0]],
hooks={
"on_browser_created": on_browser_created_hook,
"on_page_context_created": on_page_context_created_hook,
"on_user_agent_updated": on_user_agent_updated_hook,
"before_goto": before_goto_hook,
"after_goto": after_goto_hook,
"on_execution_started": on_execution_started_hook,
"before_retrieve_html": before_retrieve_html_hook,
"before_return_html": before_return_html_hook,
},
hooks_timeout=45
)
execution_time = time.time() - start_time
if results and results.success:
print(f"\n✅ Complete pipeline executed successfully! (took {execution_time:.2f}s)")
print(f" • All 8 hooks executed in sequence")
print(f" • HTML length: {len(results.html):,} characters")
else:
print(f"⚠️ Pipeline completed with warnings")
except Exception as e:
print(f"❌ Error: {str(e)}")
print("\n📚 Available Hook Points:")
print(" 1. on_browser_created - Browser initialization")
print(" 2. on_page_context_created - Page context setup")
print(" 3. on_user_agent_updated - User agent configuration")
print(" 4. before_goto - Pre-navigation setup")
print(" 5. after_goto - Post-navigation processing")
print(" 6. on_execution_started - JavaScript execution start")
print(" 7. before_retrieve_html - Pre-extraction processing")
print(" 8. before_return_html - Final HTML processing")
print("\n" + "─" * 70)
print("✓ Complete hook pipeline demo complete\n")
# ============================================================================
# MAIN EXECUTION
# ============================================================================
async def main():
"""
Run all demonstrations
"""
print("\n" + "=" * 70)
print(" 🚀 Crawl4AI v0.7.5 - Docker Hooks Complete Demonstration")
print("=" * 70)
# Check Docker service
print("\n🔍 Checking Docker service status...")
if not check_docker_service():
print("❌ Docker service is not running!")
print("\n📋 To start the Docker service:")
print(" docker run -p 11235:11235 unclecode/crawl4ai:latest")
print("\nPlease start the service and run this demo again.")
return
print("✅ Docker service is running!\n")
# Run all demos
demos = [
("String-Based Hooks (REST API)", demo_1_string_based_hooks, False),
("hooks_to_string() Utility", demo_2_hooks_to_string_utility, False),
("Docker Client Auto-Conversion", demo_3_docker_client_auto_conversion, True),
# ("Complete Hook Pipeline", demo_4_complete_hook_pipeline, True),
]
for i, (name, demo_func, is_async) in enumerate(demos, 1):
print(f"\n{'🔷' * 35}")
print(f"Starting Demo {i}/{len(demos)}: {name}")
print(f"{'🔷' * 35}\n")
try:
if is_async:
await demo_func()
else:
demo_func()
print(f"✅ Demo {i} completed successfully!")
# Pause between demos (except the last one)
if i < len(demos):
print("\n⏸️ Press Enter to continue to next demo...")
# input()
except KeyboardInterrupt:
print(f"\n⏹️ Demo interrupted by user")
break
except Exception as e:
print(f"\n❌ Demo {i} failed: {str(e)}")
import traceback
traceback.print_exc()
print("\nContinuing to next demo...\n")
continue
# Final summary
print("\n" + "=" * 70)
print(" 🎉 All Demonstrations Complete!")
print("=" * 70)
print("\n📊 Summary of v0.7.5 Docker Hooks System:")
print("\n🆕 COMPLETELY NEW FEATURE in v0.7.5:")
print(" The Docker Hooks System lets you customize the crawling pipeline")
print(" with user-provided Python functions at 8 strategic points.")
print("\n✨ Three Ways to Use Docker Hooks (All NEW!):")
print(" 1. String-based - Write hooks as strings for REST API")
print(" 2. hooks_to_string() - Convert Python functions to strings")
print(" 3. Docker Client - Automatic conversion (RECOMMENDED)")
print("\n💡 Key Benefits:")
print(" ✓ Full IDE support (autocomplete, syntax highlighting)")
print(" ✓ Type checking and linting")
print(" ✓ Easy to test and debug")
print(" ✓ Reusable across projects")
print(" ✓ Complete pipeline control")
print("\n🎯 8 Hook Points Available:")
print(" • on_browser_created, on_page_context_created")
print(" • on_user_agent_updated, before_goto, after_goto")
print(" • on_execution_started, before_retrieve_html, before_return_html")
print("\n📚 Resources:")
print(" • Docs: https://docs.crawl4ai.com")
print(" • GitHub: https://github.com/unclecode/crawl4ai")
print(" • Discord: https://discord.gg/jP8KfhDhyN")
print("\n" + "=" * 70)
print(" Happy Crawling with v0.7.5! 🕷️")
print("=" * 70 + "\n")
if __name__ == "__main__":
print("\n🎬 Starting Crawl4AI v0.7.5 Docker Hooks Demonstration...")
print("Press Ctrl+C anytime to exit\n")
try:
asyncio.run(main())
except KeyboardInterrupt:
print("\n\n👋 Demo stopped by user. Thanks for exploring Crawl4AI v0.7.5!")
except Exception as e:
print(f"\n\n❌ Demo error: {str(e)}")
import traceback
traceback.print_exc()
| python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/releases_review/demo_v0.7.8.py | docs/releases_review/demo_v0.7.8.py | #!/usr/bin/env python3
"""
Crawl4AI v0.7.8 Release Demo - Verification Tests
==================================================
This demo ACTUALLY RUNS and VERIFIES the bug fixes in v0.7.8.
Each test executes real code and validates the fix is working.
Bug Fixes Verified:
1. ProxyConfig JSON serialization (#1629)
2. Configurable backoff parameters (#1269)
3. LLM Strategy input_format support (#1178)
4. Raw HTML URL variable (#1116)
5. Relative URLs after redirects (#1268)
6. pypdf migration (#1412)
7. Pydantic v2 ConfigDict (#678)
8. Docker ContentRelevanceFilter (#1642) - requires Docker
9. Docker .cache permissions (#1638) - requires Docker
10. AdaptiveCrawler query expansion (#1621) - requires LLM API key
11. Import statement formatting (#1181)
Usage:
python docs/releases_review/demo_v0.7.8.py
For Docker tests:
docker run -d -p 11235:11235 --shm-size=1g unclecode/crawl4ai:0.7.8
python docs/releases_review/demo_v0.7.8.py
"""
import asyncio
import json
import sys
import warnings
import os
import tempfile
from typing import Tuple, Optional
from dataclasses import dataclass
# Test results tracking
@dataclass
class TestResult:
name: str
issue: str
passed: bool
message: str
skipped: bool = False
results: list[TestResult] = []
def print_header(title: str):
print(f"\n{'=' * 70}")
print(f"{title}")
print(f"{'=' * 70}")
def print_test(name: str, issue: str):
print(f"\n[TEST] {name} ({issue})")
print("-" * 50)
def record_result(name: str, issue: str, passed: bool, message: str, skipped: bool = False):
results.append(TestResult(name, issue, passed, message, skipped))
if skipped:
print(f" SKIPPED: {message}")
elif passed:
print(f" PASSED: {message}")
else:
print(f" FAILED: {message}")
# =============================================================================
# TEST 1: ProxyConfig JSON Serialization (#1629)
# =============================================================================
async def test_proxy_config_serialization():
"""
Verify BrowserConfig.to_dict() properly serializes ProxyConfig to JSON.
BEFORE: ProxyConfig was included as object, causing JSON serialization to fail
AFTER: ProxyConfig.to_dict() is called, producing valid JSON
"""
print_test("ProxyConfig JSON Serialization", "#1629")
try:
from crawl4ai import BrowserConfig
from crawl4ai.async_configs import ProxyConfig
# Create config with ProxyConfig
proxy = ProxyConfig(
server="http://proxy.example.com:8080",
username="testuser",
password="testpass"
)
browser_config = BrowserConfig(headless=True, proxy_config=proxy)
# Test 1: to_dict() should return dict for proxy_config
config_dict = browser_config.to_dict()
proxy_dict = config_dict.get('proxy_config')
if not isinstance(proxy_dict, dict):
record_result("ProxyConfig Serialization", "#1629", False,
f"proxy_config is {type(proxy_dict)}, expected dict")
return
# Test 2: Should be JSON serializable
try:
json_str = json.dumps(config_dict)
json.loads(json_str) # Verify valid JSON
except (TypeError, json.JSONDecodeError) as e:
record_result("ProxyConfig Serialization", "#1629", False,
f"JSON serialization failed: {e}")
return
# Test 3: Verify proxy data is preserved
if proxy_dict.get('server') != "http://proxy.example.com:8080":
record_result("ProxyConfig Serialization", "#1629", False,
"Proxy server not preserved in serialization")
return
record_result("ProxyConfig Serialization", "#1629", True,
"BrowserConfig with ProxyConfig serializes to valid JSON")
except Exception as e:
record_result("ProxyConfig Serialization", "#1629", False, f"Exception: {e}")
# =============================================================================
# TEST 2: Configurable Backoff Parameters (#1269)
# =============================================================================
async def test_configurable_backoff():
"""
Verify LLMConfig accepts and stores backoff configuration parameters.
BEFORE: Backoff was hardcoded (delay=2, attempts=3, factor=2)
AFTER: LLMConfig accepts backoff_base_delay, backoff_max_attempts, backoff_exponential_factor
"""
print_test("Configurable Backoff Parameters", "#1269")
try:
from crawl4ai import LLMConfig
# Test 1: Default values
default_config = LLMConfig(provider="openai/gpt-4o-mini")
if default_config.backoff_base_delay != 2:
record_result("Configurable Backoff", "#1269", False,
f"Default base_delay is {default_config.backoff_base_delay}, expected 2")
return
if default_config.backoff_max_attempts != 3:
record_result("Configurable Backoff", "#1269", False,
f"Default max_attempts is {default_config.backoff_max_attempts}, expected 3")
return
if default_config.backoff_exponential_factor != 2:
record_result("Configurable Backoff", "#1269", False,
f"Default exponential_factor is {default_config.backoff_exponential_factor}, expected 2")
return
# Test 2: Custom values
custom_config = LLMConfig(
provider="openai/gpt-4o-mini",
backoff_base_delay=5,
backoff_max_attempts=10,
backoff_exponential_factor=3
)
if custom_config.backoff_base_delay != 5:
record_result("Configurable Backoff", "#1269", False,
f"Custom base_delay is {custom_config.backoff_base_delay}, expected 5")
return
if custom_config.backoff_max_attempts != 10:
record_result("Configurable Backoff", "#1269", False,
f"Custom max_attempts is {custom_config.backoff_max_attempts}, expected 10")
return
if custom_config.backoff_exponential_factor != 3:
record_result("Configurable Backoff", "#1269", False,
f"Custom exponential_factor is {custom_config.backoff_exponential_factor}, expected 3")
return
# Test 3: to_dict() includes backoff params
config_dict = custom_config.to_dict()
if 'backoff_base_delay' not in config_dict:
record_result("Configurable Backoff", "#1269", False,
"backoff_base_delay missing from to_dict()")
return
record_result("Configurable Backoff", "#1269", True,
"LLMConfig accepts and stores custom backoff parameters")
except Exception as e:
record_result("Configurable Backoff", "#1269", False, f"Exception: {e}")
# =============================================================================
# TEST 3: LLM Strategy Input Format (#1178)
# =============================================================================
async def test_llm_input_format():
"""
Verify LLMExtractionStrategy accepts input_format parameter.
BEFORE: Always used markdown input
AFTER: Supports "markdown", "html", "fit_markdown", "cleaned_html", "fit_html"
"""
print_test("LLM Strategy Input Format", "#1178")
try:
from crawl4ai import LLMExtractionStrategy, LLMConfig
llm_config = LLMConfig(provider="openai/gpt-4o-mini")
# Test 1: Default is markdown
default_strategy = LLMExtractionStrategy(
llm_config=llm_config,
instruction="Extract data"
)
if default_strategy.input_format != "markdown":
record_result("LLM Input Format", "#1178", False,
f"Default input_format is '{default_strategy.input_format}', expected 'markdown'")
return
# Test 2: Can set to html
html_strategy = LLMExtractionStrategy(
llm_config=llm_config,
instruction="Extract data",
input_format="html"
)
if html_strategy.input_format != "html":
record_result("LLM Input Format", "#1178", False,
f"HTML input_format is '{html_strategy.input_format}', expected 'html'")
return
# Test 3: Can set to fit_markdown
fit_strategy = LLMExtractionStrategy(
llm_config=llm_config,
instruction="Extract data",
input_format="fit_markdown"
)
if fit_strategy.input_format != "fit_markdown":
record_result("LLM Input Format", "#1178", False,
f"fit_markdown input_format is '{fit_strategy.input_format}'")
return
record_result("LLM Input Format", "#1178", True,
"LLMExtractionStrategy accepts all input_format options")
except Exception as e:
record_result("LLM Input Format", "#1178", False, f"Exception: {e}")
# =============================================================================
# TEST 4: Raw HTML URL Variable (#1116)
# =============================================================================
async def test_raw_html_url_variable():
"""
Verify that raw: prefix URLs pass "Raw HTML" to extraction strategy.
BEFORE: Entire HTML blob was passed as URL parameter
AFTER: "Raw HTML" string is passed as URL parameter
"""
print_test("Raw HTML URL Variable", "#1116")
try:
from crawl4ai import AsyncWebCrawler, CrawlerRunConfig
from crawl4ai.extraction_strategy import ExtractionStrategy
# Custom strategy to capture what URL is passed
class URLCapturingStrategy(ExtractionStrategy):
captured_url = None
def extract(self, url: str, html: str, *args, **kwargs):
URLCapturingStrategy.captured_url = url
return [{"content": "test"}]
html_content = "<html><body><h1>Test</h1></body></html>"
strategy = URLCapturingStrategy()
async with AsyncWebCrawler() as crawler:
result = await crawler.arun(
url=f"raw:{html_content}",
config=CrawlerRunConfig(
extraction_strategy=strategy
)
)
captured = URLCapturingStrategy.captured_url
if captured is None:
record_result("Raw HTML URL Variable", "#1116", False,
"Extraction strategy was not called")
return
if captured == html_content or captured.startswith("<html"):
record_result("Raw HTML URL Variable", "#1116", False,
f"URL contains HTML content instead of 'Raw HTML': {captured[:50]}...")
return
if captured != "Raw HTML":
record_result("Raw HTML URL Variable", "#1116", False,
f"URL is '{captured}', expected 'Raw HTML'")
return
record_result("Raw HTML URL Variable", "#1116", True,
"Extraction strategy receives 'Raw HTML' as URL for raw: prefix")
except Exception as e:
record_result("Raw HTML URL Variable", "#1116", False, f"Exception: {e}")
# =============================================================================
# TEST 5: Relative URLs After Redirects (#1268)
# =============================================================================
async def test_redirect_url_handling():
"""
Verify that redirected_url reflects the final URL after JS navigation.
BEFORE: redirected_url was the original URL, not the final URL
AFTER: redirected_url is captured after JS execution completes
"""
print_test("Relative URLs After Redirects", "#1268")
try:
from crawl4ai import AsyncWebCrawler, CrawlerRunConfig
# Test with a URL that we know the final state of
# We'll use httpbin which doesn't redirect, but verify the mechanism works
test_url = "https://httpbin.org/html"
async with AsyncWebCrawler() as crawler:
result = await crawler.arun(
url=test_url,
config=CrawlerRunConfig()
)
# Verify redirected_url is populated
if not result.redirected_url:
record_result("Redirect URL Handling", "#1268", False,
"redirected_url is empty")
return
# For non-redirecting URL, should match original or be the final URL
if not result.redirected_url.startswith("https://httpbin.org"):
record_result("Redirect URL Handling", "#1268", False,
f"redirected_url is unexpected: {result.redirected_url}")
return
# Verify links are present and resolved
if result.links:
# Check that internal links have full URLs
internal_links = result.links.get('internal', [])
external_links = result.links.get('external', [])
all_links = internal_links + external_links
for link in all_links[:5]: # Check first 5 links
href = link.get('href', '')
if href and not href.startswith(('http://', 'https://', 'mailto:', 'tel:', '#', 'javascript:')):
record_result("Redirect URL Handling", "#1268", False,
f"Link not resolved to absolute URL: {href}")
return
record_result("Redirect URL Handling", "#1268", True,
f"redirected_url correctly captured: {result.redirected_url}")
except Exception as e:
record_result("Redirect URL Handling", "#1268", False, f"Exception: {e}")
# =============================================================================
# TEST 6: pypdf Migration (#1412)
# =============================================================================
async def test_pypdf_migration():
"""
Verify pypdf is used instead of deprecated PyPDF2.
BEFORE: Used PyPDF2 (deprecated since 2022)
AFTER: Uses pypdf (actively maintained)
"""
print_test("pypdf Migration", "#1412")
try:
# Test 1: pypdf should be importable (if pdf extra is installed)
try:
import pypdf
pypdf_available = True
pypdf_version = pypdf.__version__
except ImportError:
pypdf_available = False
pypdf_version = None
# Test 2: PyPDF2 should NOT be imported by crawl4ai
# Check if the processor uses pypdf
try:
from crawl4ai.processors.pdf import processor
processor_source = open(processor.__file__).read()
uses_pypdf = 'from pypdf' in processor_source or 'import pypdf' in processor_source
uses_pypdf2 = 'from PyPDF2' in processor_source or 'import PyPDF2' in processor_source
if uses_pypdf2 and not uses_pypdf:
record_result("pypdf Migration", "#1412", False,
"PDF processor still uses PyPDF2")
return
if uses_pypdf:
record_result("pypdf Migration", "#1412", True,
f"PDF processor uses pypdf{' v' + pypdf_version if pypdf_version else ''}")
return
else:
record_result("pypdf Migration", "#1412", True,
"PDF processor found, pypdf dependency updated", skipped=not pypdf_available)
return
except ImportError:
# PDF processor not available
if pypdf_available:
record_result("pypdf Migration", "#1412", True,
f"pypdf v{pypdf_version} is installed (PDF processor not loaded)")
else:
record_result("pypdf Migration", "#1412", True,
"PDF support not installed (optional feature)", skipped=True)
return
except Exception as e:
record_result("pypdf Migration", "#1412", False, f"Exception: {e}")
# =============================================================================
# TEST 7: Pydantic v2 ConfigDict (#678)
# =============================================================================
async def test_pydantic_configdict():
"""
Verify no Pydantic deprecation warnings for Config class.
BEFORE: Used deprecated 'class Config' syntax
AFTER: Uses ConfigDict for Pydantic v2 compatibility
"""
print_test("Pydantic v2 ConfigDict", "#678")
try:
import pydantic
from pydantic import __version__ as pydantic_version
# Capture warnings during import
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DeprecationWarning)
# Import models that might have Config classes
from crawl4ai.models import CrawlResult, MarkdownGenerationResult
from crawl4ai.async_configs import CrawlerRunConfig, BrowserConfig
# Filter for Pydantic-related deprecation warnings
pydantic_warnings = [
warning for warning in w
if 'pydantic' in str(warning.message).lower()
or 'config' in str(warning.message).lower()
]
if pydantic_warnings:
warning_msgs = [str(w.message) for w in pydantic_warnings[:3]]
record_result("Pydantic ConfigDict", "#678", False,
f"Deprecation warnings: {warning_msgs}")
return
# Verify models work correctly
try:
# Test that models can be instantiated without issues
config = CrawlerRunConfig()
browser = BrowserConfig()
record_result("Pydantic ConfigDict", "#678", True,
f"No deprecation warnings with Pydantic v{pydantic_version}")
except Exception as e:
record_result("Pydantic ConfigDict", "#678", False,
f"Model instantiation failed: {e}")
except Exception as e:
record_result("Pydantic ConfigDict", "#678", False, f"Exception: {e}")
# =============================================================================
# TEST 8: Docker ContentRelevanceFilter (#1642)
# =============================================================================
async def test_docker_content_filter():
"""
Verify ContentRelevanceFilter deserializes correctly in Docker API.
BEFORE: Docker API failed to import/instantiate ContentRelevanceFilter
AFTER: Filter is properly exported and deserializable
"""
print_test("Docker ContentRelevanceFilter", "#1642")
# First verify the fix in local code
try:
# Test 1: ContentRelevanceFilter should be importable from crawl4ai
from crawl4ai import ContentRelevanceFilter
# Test 2: Should be instantiable
filter_instance = ContentRelevanceFilter(
query="test query",
threshold=0.3
)
if not hasattr(filter_instance, 'query'):
record_result("Docker ContentRelevanceFilter", "#1642", False,
"ContentRelevanceFilter missing query attribute")
return
except ImportError as e:
record_result("Docker ContentRelevanceFilter", "#1642", False,
f"ContentRelevanceFilter not exported: {e}")
return
except Exception as e:
record_result("Docker ContentRelevanceFilter", "#1642", False,
f"ContentRelevanceFilter instantiation failed: {e}")
return
# Test Docker API if available
try:
import httpx
async with httpx.AsyncClient(timeout=5.0) as client:
response = await client.get("http://localhost:11235/health")
if response.status_code != 200:
raise Exception("Docker not available")
# Docker is running, test the API
async with httpx.AsyncClient(timeout=30.0) as client:
request = {
"urls": ["https://httpbin.org/html"],
"crawler_config": {
"deep_crawl_strategy": {
"type": "BFSDeepCrawlStrategy",
"max_depth": 1,
"filter_chain": [
{
"type": "ContentTypeFilter",
"allowed_types": ["text/html"]
}
]
}
}
}
response = await client.post(
"http://localhost:11235/crawl",
json=request
)
if response.status_code == 200:
record_result("Docker ContentRelevanceFilter", "#1642", True,
"Filter deserializes correctly in Docker API")
else:
record_result("Docker ContentRelevanceFilter", "#1642", False,
f"Docker API returned {response.status_code}: {response.text[:100]}")
except ImportError:
record_result("Docker ContentRelevanceFilter", "#1642", True,
"ContentRelevanceFilter exportable (Docker test skipped - httpx not installed)",
skipped=True)
except Exception as e:
record_result("Docker ContentRelevanceFilter", "#1642", True,
f"ContentRelevanceFilter exportable (Docker test skipped: {e})",
skipped=True)
# =============================================================================
# TEST 9: Docker Cache Permissions (#1638)
# =============================================================================
async def test_docker_cache_permissions():
"""
Verify Docker image has correct .cache folder permissions.
This test requires Docker container to be running.
"""
print_test("Docker Cache Permissions", "#1638")
try:
import httpx
async with httpx.AsyncClient(timeout=5.0) as client:
response = await client.get("http://localhost:11235/health")
if response.status_code != 200:
raise Exception("Docker not available")
# Test by making a crawl request with caching
async with httpx.AsyncClient(timeout=60.0) as client:
request = {
"urls": ["https://httpbin.org/html"],
"crawler_config": {
"cache_mode": "enabled"
}
}
response = await client.post(
"http://localhost:11235/crawl",
json=request
)
if response.status_code == 200:
result = response.json()
# Check if there were permission errors
if "permission" in str(result).lower() and "denied" in str(result).lower():
record_result("Docker Cache Permissions", "#1638", False,
"Permission denied error in response")
else:
record_result("Docker Cache Permissions", "#1638", True,
"Crawl with caching succeeded in Docker")
else:
error_text = response.text[:200]
if "permission" in error_text.lower():
record_result("Docker Cache Permissions", "#1638", False,
f"Permission error: {error_text}")
else:
record_result("Docker Cache Permissions", "#1638", False,
f"Request failed: {response.status_code}")
except ImportError:
record_result("Docker Cache Permissions", "#1638", True,
"Skipped - httpx not installed", skipped=True)
except Exception as e:
record_result("Docker Cache Permissions", "#1638", True,
f"Skipped - Docker not available: {e}", skipped=True)
# =============================================================================
# TEST 10: AdaptiveCrawler Query Expansion (#1621)
# =============================================================================
async def test_adaptive_crawler_embedding():
"""
Verify EmbeddingStrategy LLM code is uncommented and functional.
BEFORE: LLM call was commented out, using hardcoded mock data
AFTER: Actually calls LLM for query expansion
"""
print_test("AdaptiveCrawler Query Expansion", "#1621")
try:
# Read the source file to verify the fix
import crawl4ai.adaptive_crawler as adaptive_module
source_file = adaptive_module.__file__
with open(source_file, 'r') as f:
source_code = f.read()
# Check that the LLM call is NOT commented out
# Look for the perform_completion_with_backoff call
# Find the EmbeddingStrategy section
if 'class EmbeddingStrategy' not in source_code:
record_result("AdaptiveCrawler Query Expansion", "#1621", True,
"EmbeddingStrategy not in adaptive_crawler (may have moved)",
skipped=True)
return
# Check if the mock data line is commented out
# and the actual LLM call is NOT commented out
lines = source_code.split('\n')
in_embedding_strategy = False
found_llm_call = False
mock_data_commented = False
for i, line in enumerate(lines):
if 'class EmbeddingStrategy' in line:
in_embedding_strategy = True
elif in_embedding_strategy and line.strip().startswith('class '):
in_embedding_strategy = False
if in_embedding_strategy:
# Check for uncommented LLM call
if 'perform_completion_with_backoff' in line and not line.strip().startswith('#'):
found_llm_call = True
# Check for commented mock data
if "variations ={'queries'" in line or 'variations = {\'queries\'' in line:
if line.strip().startswith('#'):
mock_data_commented = True
if found_llm_call:
record_result("AdaptiveCrawler Query Expansion", "#1621", True,
"LLM call is active in EmbeddingStrategy")
else:
# Check if the entire embedding strategy exists but might be structured differently
if 'perform_completion_with_backoff' in source_code:
record_result("AdaptiveCrawler Query Expansion", "#1621", True,
"perform_completion_with_backoff found in module")
else:
record_result("AdaptiveCrawler Query Expansion", "#1621", False,
"LLM call not found or still commented out")
except Exception as e:
record_result("AdaptiveCrawler Query Expansion", "#1621", False, f"Exception: {e}")
# =============================================================================
# TEST 11: Import Statement Formatting (#1181)
# =============================================================================
async def test_import_formatting():
"""
Verify code extraction properly formats import statements.
BEFORE: Import statements were concatenated without newlines
AFTER: Import statements have proper newline separation
"""
print_test("Import Statement Formatting", "#1181")
try:
from crawl4ai import AsyncWebCrawler, CrawlerRunConfig
# Create HTML with code containing imports
html_with_code = """
<html>
<body>
<pre><code>
import os
import sys
from pathlib import Path
from typing import List, Dict
def main():
pass
</code></pre>
</body>
</html>
"""
async with AsyncWebCrawler() as crawler:
result = await crawler.arun(
url=f"raw:{html_with_code}",
config=CrawlerRunConfig()
)
markdown = result.markdown.raw_markdown if result.markdown else ""
# Check that imports are not concatenated on the same line
# Bad: "import osimport sys" (no newline between statements)
# This is the actual bug - statements getting merged on same line
bad_patterns = [
"import os import sys", # Space but no newline
"import osimport sys", # No space or newline
"import os from pathlib", # Space but no newline
"import osfrom pathlib", # No space or newline
]
markdown_single_line = markdown.replace('\n', ' ') # Convert newlines to spaces
for pattern in bad_patterns:
# Check if pattern exists without proper line separation
if pattern.replace(' ', '') in markdown_single_line.replace(' ', ''):
# Verify it's actually on same line (not just adjacent after newline removal)
lines = markdown.split('\n')
for line in lines:
if 'import' in line.lower():
# Count import statements on this line
import_count = line.lower().count('import ')
if import_count > 1:
record_result("Import Formatting", "#1181", False,
f"Multiple imports on same line: {line[:60]}...")
return
# Verify imports are present
if "import" in markdown.lower():
record_result("Import Formatting", "#1181", True,
"Import statements are properly line-separated")
else:
record_result("Import Formatting", "#1181", True,
"No import statements found to verify (test HTML may have changed)")
except Exception as e:
record_result("Import Formatting", "#1181", False, f"Exception: {e}")
# =============================================================================
# COMPREHENSIVE CRAWL TEST
# =============================================================================
async def test_comprehensive_crawl():
"""
Run a comprehensive crawl to verify overall stability.
"""
print_test("Comprehensive Crawl Test", "Overall")
try:
from crawl4ai import AsyncWebCrawler, CrawlerRunConfig, BrowserConfig
async with AsyncWebCrawler(config=BrowserConfig(headless=True)) as crawler:
result = await crawler.arun(
url="https://httpbin.org/html",
config=CrawlerRunConfig()
)
# Verify result
checks = []
if result.success:
checks.append("success=True")
else:
record_result("Comprehensive Crawl", "Overall", False,
f"Crawl failed: {result.error_message}")
return
if result.html and len(result.html) > 100:
checks.append(f"html={len(result.html)} chars")
if result.markdown and result.markdown.raw_markdown:
checks.append(f"markdown={len(result.markdown.raw_markdown)} chars")
if result.redirected_url:
checks.append("redirected_url present")
record_result("Comprehensive Crawl", "Overall", True,
f"All checks passed: {', '.join(checks)}")
except Exception as e:
record_result("Comprehensive Crawl", "Overall", False, f"Exception: {e}")
# =============================================================================
# MAIN
# =============================================================================
def print_summary():
"""Print test results summary"""
print_header("TEST RESULTS SUMMARY")
passed = sum(1 for r in results if r.passed and not r.skipped)
failed = sum(1 for r in results if not r.passed and not r.skipped)
skipped = sum(1 for r in results if r.skipped)
print(f"\nTotal: {len(results)} tests")
print(f" Passed: {passed}")
print(f" Failed: {failed}")
print(f" Skipped: {skipped}")
if failed > 0:
print("\nFailed Tests:")
for r in results:
if not r.passed and not r.skipped:
print(f" - {r.name} ({r.issue}): {r.message}")
if skipped > 0:
print("\nSkipped Tests:")
for r in results:
if r.skipped:
print(f" - {r.name} ({r.issue}): {r.message}")
| python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | true |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/releases_review/v0_4_24_walkthrough.py | docs/releases_review/v0_4_24_walkthrough.py | """
Crawl4AI v0.4.24 Feature Walkthrough
===================================
This script demonstrates the new features introduced in Crawl4AI v0.4.24.
Each section includes detailed examples and explanations of the new capabilities.
"""
import asyncio
import os
import json
import re
from typing import List
from crawl4ai import (
AsyncWebCrawler,
BrowserConfig,
CrawlerRunConfig,
CacheMode,
LLMExtractionStrategy,
JsonCssExtractionStrategy,
)
from crawl4ai.content_filter_strategy import RelevantContentFilter
from crawl4ai.markdown_generation_strategy import DefaultMarkdownGenerator
from bs4 import BeautifulSoup
# Sample HTML for demonstrations
SAMPLE_HTML = """
<div class="article-list">
<article class="post" data-category="tech" data-author="john">
<h2 class="title"><a href="/post-1">First Post</a></h2>
<div class="meta">
<a href="/author/john" class="author">John Doe</a>
<span class="date">2023-12-31</span>
</div>
<div class="content">
<p>First post content...</p>
<a href="/read-more-1" class="read-more">Read More</a>
</div>
</article>
<article class="post" data-category="science" data-author="jane">
<h2 class="title"><a href="/post-2">Second Post</a></h2>
<div class="meta">
<a href="/author/jane" class="author">Jane Smith</a>
<span class="date">2023-12-30</span>
</div>
<div class="content">
<p>Second post content...</p>
<a href="/read-more-2" class="read-more">Read More</a>
</div>
</article>
</div>
"""
async def demo_ssl_features():
"""
Enhanced SSL & Security Features Demo
-----------------------------------
This example demonstrates the new SSL certificate handling and security features:
1. Custom certificate paths
2. SSL verification options
3. HTTPS error handling
4. Certificate validation configurations
These features are particularly useful when:
- Working with self-signed certificates
- Dealing with corporate proxies
- Handling mixed content websites
- Managing different SSL security levels
"""
print("\n1. Enhanced SSL & Security Demo")
print("--------------------------------")
browser_config = BrowserConfig()
run_config = CrawlerRunConfig(
cache_mode=CacheMode.BYPASS,
fetch_ssl_certificate=True, # Enable SSL certificate fetching
)
async with AsyncWebCrawler(config=browser_config) as crawler:
result = await crawler.arun(url="https://example.com", config=run_config)
print(f"SSL Crawl Success: {result.success}")
result.ssl_certificate.to_json(
os.path.join(os.getcwd(), "ssl_certificate.json")
)
if not result.success:
print(f"SSL Error: {result.error_message}")
async def demo_content_filtering():
"""
Smart Content Filtering Demo
----------------------
Demonstrates advanced content filtering capabilities:
1. Custom filter to identify and extract specific content
2. Integration with markdown generation
3. Flexible pruning rules
"""
print("\n2. Smart Content Filtering Demo")
print("--------------------------------")
# Create a custom content filter
class CustomNewsFilter(RelevantContentFilter):
def __init__(self):
super().__init__()
# Add news-specific patterns
self.negative_patterns = re.compile(
r"nav|footer|header|sidebar|ads|comment|share|related|recommended|popular|trending",
re.I,
)
self.min_word_count = 30 # Higher threshold for news content
def filter_content(
self, html: str, min_word_threshold: int = None
) -> List[str]:
"""
Implements news-specific content filtering logic.
Args:
html (str): HTML content to be filtered
min_word_threshold (int, optional): Minimum word count threshold
Returns:
List[str]: List of filtered HTML content blocks
"""
if not html or not isinstance(html, str):
return []
soup = BeautifulSoup(html, "lxml")
if not soup.body:
soup = BeautifulSoup(f"<body>{html}</body>", "lxml")
body = soup.find("body")
# Extract chunks with metadata
chunks = self.extract_text_chunks(
body, min_word_threshold or self.min_word_count
)
# Filter chunks based on news-specific criteria
filtered_chunks = []
for _, text, tag_type, element in chunks:
# Skip if element has negative class/id
if self.is_excluded(element):
continue
# Headers are important in news articles
if tag_type == "header":
filtered_chunks.append(self.clean_element(element))
continue
# For content, check word count and link density
text = element.get_text(strip=True)
if len(text.split()) >= (min_word_threshold or self.min_word_count):
# Calculate link density
links_text = " ".join(
a.get_text(strip=True) for a in element.find_all("a")
)
link_density = len(links_text) / len(text) if text else 1
# Accept if link density is reasonable
if link_density < 0.5:
filtered_chunks.append(self.clean_element(element))
return filtered_chunks
# Create markdown generator with custom filter
markdown_gen = DefaultMarkdownGenerator(content_filter=CustomNewsFilter())
run_config = CrawlerRunConfig(
markdown_generator=markdown_gen, cache_mode=CacheMode.BYPASS
)
async with AsyncWebCrawler() as crawler:
result = await crawler.arun(
url="https://news.ycombinator.com", config=run_config
)
print("Filtered Content Sample:")
print(result.markdown[:500]) # Show first 500 chars
async def demo_json_extraction():
"""
Improved JSON Extraction Demo
---------------------------
Demonstrates the enhanced JSON extraction capabilities:
1. Base element attributes extraction
2. Complex nested structures
3. Multiple extraction patterns
Key features shown:
- Extracting attributes from base elements (href, data-* attributes)
- Processing repeated patterns
- Handling optional fields
"""
print("\n3. Improved JSON Extraction Demo")
print("--------------------------------")
# Define the extraction schema with base element attributes
json_strategy = JsonCssExtractionStrategy(
schema={
"name": "Blog Posts",
"baseSelector": "div.article-list",
"baseFields": [
{"name": "list_id", "type": "attribute", "attribute": "data-list-id"},
{"name": "category", "type": "attribute", "attribute": "data-category"},
],
"fields": [
{
"name": "posts",
"selector": "article.post",
"type": "nested_list",
"baseFields": [
{
"name": "post_id",
"type": "attribute",
"attribute": "data-post-id",
},
{
"name": "author_id",
"type": "attribute",
"attribute": "data-author",
},
],
"fields": [
{
"name": "title",
"selector": "h2.title a",
"type": "text",
"baseFields": [
{
"name": "url",
"type": "attribute",
"attribute": "href",
}
],
},
{
"name": "author",
"selector": "div.meta a.author",
"type": "text",
"baseFields": [
{
"name": "profile_url",
"type": "attribute",
"attribute": "href",
}
],
},
{"name": "date", "selector": "span.date", "type": "text"},
{
"name": "read_more",
"selector": "a.read-more",
"type": "nested",
"fields": [
{"name": "text", "type": "text"},
{
"name": "url",
"type": "attribute",
"attribute": "href",
},
],
},
],
}
],
}
)
# Demonstrate extraction from raw HTML
run_config = CrawlerRunConfig(
extraction_strategy=json_strategy, cache_mode=CacheMode.BYPASS
)
async with AsyncWebCrawler() as crawler:
result = await crawler.arun(
url="raw:" + SAMPLE_HTML, # Use raw: prefix for raw HTML
config=run_config,
)
print("Extracted Content:")
print(result.extracted_content)
async def demo_input_formats():
"""
Input Format Handling Demo
----------------------
Demonstrates how LLM extraction can work with different input formats:
1. Markdown (default) - Good for simple text extraction
2. HTML - Better when you need structure and attributes
This example shows how HTML input can be beneficial when:
- You need to understand the DOM structure
- You want to extract both visible text and HTML attributes
- The content has complex layouts like tables or forms
"""
print("\n4. Input Format Handling Demo")
print("---------------------------")
# Create a dummy HTML with rich structure
dummy_html = """
<div class="job-posting" data-post-id="12345">
<header class="job-header">
<h1 class="job-title">Senior AI/ML Engineer</h1>
<div class="job-meta">
<span class="department">AI Research Division</span>
<span class="location" data-remote="hybrid">San Francisco (Hybrid)</span>
</div>
<div class="salary-info" data-currency="USD">
<span class="range">$150,000 - $220,000</span>
<span class="period">per year</span>
</div>
</header>
<section class="requirements">
<div class="technical-skills">
<h3>Technical Requirements</h3>
<ul class="required-skills">
<li class="skill required" data-priority="must-have">
5+ years experience in Machine Learning
</li>
<li class="skill required" data-priority="must-have">
Proficiency in Python and PyTorch/TensorFlow
</li>
<li class="skill preferred" data-priority="nice-to-have">
Experience with distributed training systems
</li>
</ul>
</div>
<div class="soft-skills">
<h3>Professional Skills</h3>
<ul class="required-skills">
<li class="skill required" data-priority="must-have">
Strong problem-solving abilities
</li>
<li class="skill preferred" data-priority="nice-to-have">
Experience leading technical teams
</li>
</ul>
</div>
</section>
<section class="timeline">
<time class="deadline" datetime="2024-02-28">
Application Deadline: February 28, 2024
</time>
</section>
<footer class="contact-section">
<div class="hiring-manager">
<h4>Hiring Manager</h4>
<div class="contact-info">
<span class="name">Dr. Sarah Chen</span>
<span class="title">Director of AI Research</span>
<span class="email">ai.hiring@example.com</span>
</div>
</div>
<div class="team-info">
<p>Join our team of 50+ researchers working on cutting-edge AI applications</p>
</div>
</footer>
</div>
"""
# Use raw:// prefix to pass HTML content directly
url = f"raw://{dummy_html}"
from pydantic import BaseModel, Field
from typing import List, Optional
# Define our schema using Pydantic
class JobRequirement(BaseModel):
category: str = Field(
description="Category of the requirement (e.g., Technical, Soft Skills)"
)
items: List[str] = Field(
description="List of specific requirements in this category"
)
priority: str = Field(
description="Priority level (Required/Preferred) based on the HTML class or context"
)
class JobPosting(BaseModel):
title: str = Field(description="Job title")
department: str = Field(description="Department or team")
location: str = Field(description="Job location, including remote options")
salary_range: Optional[str] = Field(description="Salary range if specified")
requirements: List[JobRequirement] = Field(
description="Categorized job requirements"
)
application_deadline: Optional[str] = Field(
description="Application deadline if specified"
)
contact_info: Optional[dict] = Field(
description="Contact information from footer or contact section"
)
# First try with markdown (default)
markdown_strategy = LLMExtractionStrategy(
provider="openai/gpt-4o",
api_token=os.getenv("OPENAI_API_KEY"),
schema=JobPosting.model_json_schema(),
extraction_type="schema",
instruction="""
Extract job posting details into structured data. Focus on the visible text content
and organize requirements into categories.
""",
input_format="markdown", # default
)
# Then with HTML for better structure understanding
html_strategy = LLMExtractionStrategy(
provider="openai/gpt-4",
api_token=os.getenv("OPENAI_API_KEY"),
schema=JobPosting.model_json_schema(),
extraction_type="schema",
instruction="""
Extract job posting details, using HTML structure to:
1. Identify requirement priorities from CSS classes (e.g., 'required' vs 'preferred')
2. Extract contact info from the page footer or dedicated contact section
3. Parse salary information from specially formatted elements
4. Determine application deadline from timestamp or date elements
Use HTML attributes and classes to enhance extraction accuracy.
""",
input_format="html", # explicitly use HTML
)
async with AsyncWebCrawler() as crawler:
# Try with markdown first
markdown_config = CrawlerRunConfig(extraction_strategy=markdown_strategy)
markdown_result = await crawler.arun(url=url, config=markdown_config)
print("\nMarkdown-based Extraction Result:")
items = json.loads(markdown_result.extracted_content)
print(json.dumps(items, indent=2))
# Then with HTML for better structure understanding
html_config = CrawlerRunConfig(extraction_strategy=html_strategy)
html_result = await crawler.arun(url=url, config=html_config)
print("\nHTML-based Extraction Result:")
items = json.loads(html_result.extracted_content)
print(json.dumps(items, indent=2))
# Main execution
async def main():
print("Crawl4AI v0.4.24 Feature Walkthrough")
print("====================================")
# Run all demos
await demo_ssl_features()
await demo_content_filtering()
await demo_json_extraction()
# await demo_input_formats()
if __name__ == "__main__":
asyncio.run(main())
| python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/releases_review/demo_v0.7.6.py | docs/releases_review/demo_v0.7.6.py | #!/usr/bin/env python3
"""
Crawl4AI v0.7.6 Release Demo
============================
This demo showcases the major feature in v0.7.6:
**Webhook Support for Docker Job Queue API**
Features Demonstrated:
1. Asynchronous job processing with webhook notifications
2. Webhook support for /crawl/job endpoint
3. Webhook support for /llm/job endpoint
4. Notification-only vs data-in-payload modes
5. Custom webhook headers for authentication
6. Structured extraction with JSON schemas
7. Exponential backoff retry for reliable delivery
Prerequisites:
- Crawl4AI Docker container running on localhost:11235
- Flask installed: pip install flask requests
- LLM API key configured (for LLM examples)
Usage:
python docs/releases_review/demo_v0.7.6.py
"""
import requests
import json
import time
from flask import Flask, request, jsonify
from threading import Thread
# Configuration
CRAWL4AI_BASE_URL = "http://localhost:11235"
WEBHOOK_BASE_URL = "http://localhost:8080"
# Flask app for webhook receiver
app = Flask(__name__)
received_webhooks = []
@app.route('/webhook', methods=['POST'])
def webhook_handler():
"""Universal webhook handler for both crawl and LLM extraction jobs."""
payload = request.json
task_id = payload['task_id']
task_type = payload['task_type']
status = payload['status']
print(f"\n{'='*70}")
print(f"📬 Webhook Received!")
print(f" Task ID: {task_id}")
print(f" Task Type: {task_type}")
print(f" Status: {status}")
print(f" Timestamp: {payload['timestamp']}")
if status == 'completed':
if 'data' in payload:
print(f" ✅ Data included in webhook")
if task_type == 'crawl':
results = payload['data'].get('results', [])
print(f" 📊 Crawled {len(results)} URL(s)")
elif task_type == 'llm_extraction':
extracted = payload['data'].get('extracted_content', {})
print(f" 🤖 Extracted: {json.dumps(extracted, indent=6)}")
else:
print(f" 📥 Notification only (fetch data separately)")
elif status == 'failed':
print(f" ❌ Error: {payload.get('error', 'Unknown')}")
print(f"{'='*70}\n")
received_webhooks.append(payload)
return jsonify({"status": "received"}), 200
def start_webhook_server():
"""Start Flask webhook server in background."""
app.run(host='0.0.0.0', port=8080, debug=False, use_reloader=False)
def demo_1_crawl_webhook_notification_only():
"""Demo 1: Crawl job with webhook notification (data fetched separately)."""
print("\n" + "="*70)
print("DEMO 1: Crawl Job - Webhook Notification Only")
print("="*70)
print("Submitting crawl job with webhook notification...")
payload = {
"urls": ["https://example.com"],
"browser_config": {"headless": True},
"crawler_config": {"cache_mode": "bypass"},
"webhook_config": {
"webhook_url": f"{WEBHOOK_BASE_URL}/webhook",
"webhook_data_in_payload": False,
"webhook_headers": {
"X-Demo": "v0.7.6",
"X-Type": "crawl"
}
}
}
response = requests.post(f"{CRAWL4AI_BASE_URL}/crawl/job", json=payload)
if response.ok:
task_id = response.json()['task_id']
print(f"✅ Job submitted: {task_id}")
print("⏳ Webhook will notify when complete...")
return task_id
else:
print(f"❌ Failed: {response.text}")
return None
def demo_2_crawl_webhook_with_data():
"""Demo 2: Crawl job with full data in webhook payload."""
print("\n" + "="*70)
print("DEMO 2: Crawl Job - Webhook with Full Data")
print("="*70)
print("Submitting crawl job with data included in webhook...")
payload = {
"urls": ["https://www.python.org"],
"browser_config": {"headless": True},
"crawler_config": {"cache_mode": "bypass"},
"webhook_config": {
"webhook_url": f"{WEBHOOK_BASE_URL}/webhook",
"webhook_data_in_payload": True,
"webhook_headers": {
"X-Demo": "v0.7.6",
"X-Type": "crawl-with-data"
}
}
}
response = requests.post(f"{CRAWL4AI_BASE_URL}/crawl/job", json=payload)
if response.ok:
task_id = response.json()['task_id']
print(f"✅ Job submitted: {task_id}")
print("⏳ Webhook will include full results...")
return task_id
else:
print(f"❌ Failed: {response.text}")
return None
def demo_3_llm_webhook_notification_only():
"""Demo 3: LLM extraction with webhook notification (NEW in v0.7.6!)."""
print("\n" + "="*70)
print("DEMO 3: LLM Extraction - Webhook Notification Only (NEW!)")
print("="*70)
print("Submitting LLM extraction job with webhook notification...")
payload = {
"url": "https://www.example.com",
"q": "Extract the main heading and description from this page",
"provider": "openai/gpt-4o-mini",
"cache": False,
"webhook_config": {
"webhook_url": f"{WEBHOOK_BASE_URL}/webhook",
"webhook_data_in_payload": False,
"webhook_headers": {
"X-Demo": "v0.7.6",
"X-Type": "llm"
}
}
}
response = requests.post(f"{CRAWL4AI_BASE_URL}/llm/job", json=payload)
if response.ok:
task_id = response.json()['task_id']
print(f"✅ Job submitted: {task_id}")
print("⏳ Webhook will notify when LLM extraction completes...")
return task_id
else:
print(f"❌ Failed: {response.text}")
return None
def demo_4_llm_webhook_with_schema():
"""Demo 4: LLM extraction with JSON schema and data in webhook (NEW in v0.7.6!)."""
print("\n" + "="*70)
print("DEMO 4: LLM Extraction - Schema + Full Data in Webhook (NEW!)")
print("="*70)
print("Submitting LLM extraction with JSON schema...")
schema = {
"type": "object",
"properties": {
"title": {"type": "string", "description": "Page title"},
"description": {"type": "string", "description": "Page description"},
"main_topics": {
"type": "array",
"items": {"type": "string"},
"description": "Main topics covered"
}
},
"required": ["title"]
}
payload = {
"url": "https://www.python.org",
"q": "Extract the title, description, and main topics from this website",
"schema": json.dumps(schema),
"provider": "openai/gpt-4o-mini",
"cache": False,
"webhook_config": {
"webhook_url": f"{WEBHOOK_BASE_URL}/webhook",
"webhook_data_in_payload": True,
"webhook_headers": {
"X-Demo": "v0.7.6",
"X-Type": "llm-with-schema"
}
}
}
response = requests.post(f"{CRAWL4AI_BASE_URL}/llm/job", json=payload)
if response.ok:
task_id = response.json()['task_id']
print(f"✅ Job submitted: {task_id}")
print("⏳ Webhook will include structured extraction results...")
return task_id
else:
print(f"❌ Failed: {response.text}")
return None
def demo_5_global_webhook_config():
"""Demo 5: Using global webhook configuration from config.yml."""
print("\n" + "="*70)
print("DEMO 5: Global Webhook Configuration")
print("="*70)
print("💡 You can configure a default webhook URL in config.yml:")
print("""
webhooks:
enabled: true
default_url: "https://myapp.com/webhooks/default"
data_in_payload: false
retry:
max_attempts: 5
initial_delay_ms: 1000
max_delay_ms: 32000
timeout_ms: 30000
""")
print("Then submit jobs WITHOUT webhook_config - they'll use the default!")
print("This is useful for consistent webhook handling across all jobs.")
def demo_6_webhook_retry_logic():
"""Demo 6: Webhook retry mechanism with exponential backoff."""
print("\n" + "="*70)
print("DEMO 6: Webhook Retry Logic")
print("="*70)
print("🔄 Webhook delivery uses exponential backoff retry:")
print(" • Max attempts: 5")
print(" • Delays: 1s → 2s → 4s → 8s → 16s")
print(" • Timeout: 30s per attempt")
print(" • Retries on: 5xx errors, network errors, timeouts")
print(" • No retry on: 4xx client errors")
print("\nThis ensures reliable webhook delivery even with temporary failures!")
def print_summary():
"""Print demo summary and results."""
print("\n" + "="*70)
print("📊 DEMO SUMMARY")
print("="*70)
print(f"Total webhooks received: {len(received_webhooks)}")
crawl_webhooks = [w for w in received_webhooks if w['task_type'] == 'crawl']
llm_webhooks = [w for w in received_webhooks if w['task_type'] == 'llm_extraction']
print(f"\nBreakdown:")
print(f" 🕷️ Crawl jobs: {len(crawl_webhooks)}")
print(f" 🤖 LLM extraction jobs: {len(llm_webhooks)}")
print(f"\nDetails:")
for i, webhook in enumerate(received_webhooks, 1):
icon = "🕷️" if webhook['task_type'] == 'crawl' else "🤖"
print(f" {i}. {icon} {webhook['task_id']}: {webhook['status']}")
print("\n" + "="*70)
print("✨ v0.7.6 KEY FEATURES DEMONSTRATED:")
print("="*70)
print("✅ Webhook support for /crawl/job")
print("✅ Webhook support for /llm/job (NEW!)")
print("✅ Notification-only mode (fetch data separately)")
print("✅ Data-in-payload mode (get full results in webhook)")
print("✅ Custom headers for authentication")
print("✅ JSON schema for structured LLM extraction")
print("✅ Exponential backoff retry for reliable delivery")
print("✅ Global webhook configuration support")
print("✅ Universal webhook handler for both job types")
print("\n💡 Benefits:")
print(" • No more polling - get instant notifications")
print(" • Better resource utilization")
print(" • Reliable delivery with automatic retries")
print(" • Consistent API across crawl and LLM jobs")
print(" • Production-ready webhook infrastructure")
def main():
"""Run all demos."""
print("\n" + "="*70)
print("🚀 Crawl4AI v0.7.6 Release Demo")
print("="*70)
print("Feature: Webhook Support for Docker Job Queue API")
print("="*70)
# Check if server is running
try:
health = requests.get(f"{CRAWL4AI_BASE_URL}/health", timeout=5)
print(f"✅ Crawl4AI server is running")
except:
print(f"❌ Cannot connect to Crawl4AI at {CRAWL4AI_BASE_URL}")
print("Please start Docker container:")
print(" docker run -d -p 11235:11235 --env-file .llm.env unclecode/crawl4ai:0.7.6")
return
# Start webhook server
print(f"\n🌐 Starting webhook server at {WEBHOOK_BASE_URL}...")
webhook_thread = Thread(target=start_webhook_server, daemon=True)
webhook_thread.start()
time.sleep(2)
# Run demos
demo_1_crawl_webhook_notification_only()
time.sleep(5)
demo_2_crawl_webhook_with_data()
time.sleep(5)
demo_3_llm_webhook_notification_only()
time.sleep(5)
demo_4_llm_webhook_with_schema()
time.sleep(5)
demo_5_global_webhook_config()
demo_6_webhook_retry_logic()
# Wait for webhooks
print("\n⏳ Waiting for all webhooks to arrive...")
time.sleep(30)
# Print summary
print_summary()
print("\n" + "="*70)
print("✅ Demo completed!")
print("="*70)
print("\n📚 Documentation:")
print(" • deploy/docker/WEBHOOK_EXAMPLES.md")
print(" • docs/examples/docker_webhook_example.py")
print("\n🔗 Upgrade:")
print(" docker pull unclecode/crawl4ai:0.7.6")
if __name__ == "__main__":
main()
| python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/releases_review/crawl4ai_v0_7_0_showcase.py | docs/releases_review/crawl4ai_v0_7_0_showcase.py | """
🚀 Crawl4AI v0.7.0 Feature Showcase
=====================================
This demo showcases the major features introduced in v0.7.0:
1. Link Preview/Peek - Advanced link analysis with 3-layer scoring
2. Adaptive Crawling - Intelligent crawling with confidence tracking
3. Virtual Scroll - Capture content from modern infinite scroll pages
4. C4A Script - Domain-specific language for web automation
5. URL Seeder - Smart URL discovery and filtering
6. LLM Context Builder - 3D context for AI assistants
Let's explore each feature with practical examples!
"""
import asyncio
import json
import time
import re
from typing import List, Dict
from rich.console import Console
from rich.table import Table
from rich.progress import Progress, SpinnerColumn, TextColumn
from rich.panel import Panel
from rich.syntax import Syntax
from rich.layout import Layout
from rich.live import Live
from rich import box
from crawl4ai import AsyncWebCrawler, CrawlerRunConfig, AdaptiveCrawler, AdaptiveConfig, BrowserConfig, CacheMode
from crawl4ai import AsyncUrlSeeder, SeedingConfig
from crawl4ai import LinkPreviewConfig, VirtualScrollConfig
from crawl4ai import c4a_compile, CompilationResult
# Initialize Rich console for beautiful output
console = Console()
def print_banner(title: str, subtitle: str = ""):
"""Print a beautiful banner for each section"""
console.print(f"\n[bold cyan]{'=' * 80}[/bold cyan]")
console.print(f"[bold yellow]{title.center(80)}[/bold yellow]")
if subtitle:
console.print(f"[dim white]{subtitle.center(80)}[/dim white]")
console.print(f"[bold cyan]{'=' * 80}[/bold cyan]\n")
def create_score_bar(score: float, max_score: float = 10.0) -> str:
"""Create a visual progress bar for scores"""
percentage = (score / max_score)
filled = int(percentage * 20)
bar = "█" * filled + "░" * (20 - filled)
return f"[{'green' if score >= 7 else 'yellow' if score >= 4 else 'red'}]{bar}[/] {score:.2f}/{max_score}"
async def link_preview_demo(auto_mode=False):
"""
🔗 Link Preview/Peek Demo
Showcases the 3-layer scoring system for intelligent link analysis
"""
print_banner(
"🔗 LINK PREVIEW & INTELLIGENT SCORING",
"Advanced link analysis with intrinsic, contextual, and total scoring"
)
# Explain the feature
console.print(Panel(
"[bold]What is Link Preview?[/bold]\n\n"
"Link Preview analyzes links on a page with a sophisticated 3-layer scoring system:\n\n"
"• [cyan]Intrinsic Score[/cyan]: Quality based on link text, position, and attributes (0-10)\n"
"• [magenta]Contextual Score[/magenta]: Relevance to your query using semantic analysis (0-1)\n"
"• [green]Total Score[/green]: Combined score for intelligent prioritization\n\n"
"This helps you find the most relevant and high-quality links automatically!",
title="Feature Overview",
border_style="blue"
))
await asyncio.sleep(2)
# Demo 1: Basic link analysis with visual scoring
console.print("\n[bold yellow]Demo 1: Analyzing Python Documentation Links[/bold yellow]\n")
query = "async await coroutines tutorial"
console.print(f"[cyan]🔍 Query:[/cyan] [bold]{query}[/bold]")
console.print("[dim]Looking for links related to asynchronous programming...[/dim]\n")
config = CrawlerRunConfig(
link_preview_config=LinkPreviewConfig(
include_internal=True,
include_external=False,
max_links=10,
concurrency=5,
query=query, # Our search context
verbose=False # We'll handle the display
),
score_links=True,
only_text=True
)
# Create a progress display
with Progress(
SpinnerColumn(),
TextColumn("[progress.description]{task.description}"),
console=console
) as progress:
task = progress.add_task("[cyan]Crawling and analyzing links...", total=None)
async with AsyncWebCrawler() as crawler:
result = await crawler.arun("https://docs.python.org/3/library/asyncio.html", config=config)
progress.remove_task(task)
if result.success:
# Extract links with scores
links = result.links.get("internal", [])
scored_links = [l for l in links if l.get("head_data") and l.get("total_score")]
# Sort by total score
scored_links.sort(key=lambda x: x.get("total_score", 0), reverse=True)
# Create a beautiful table for results
table = Table(
title="🎯 Top Scored Links",
box=box.ROUNDED,
show_lines=True,
title_style="bold magenta"
)
table.add_column("Rank", style="cyan", width=6)
table.add_column("Link Text", style="white", width=40)
table.add_column("Intrinsic Score", width=25)
table.add_column("Contextual Score", width=25)
table.add_column("Total Score", style="bold", width=15)
for i, link in enumerate(scored_links[:5], 1):
intrinsic = link.get('intrinsic_score', 0)
contextual = link.get('contextual_score', 0)
total = link.get('total_score', 0)
# Get link text and title
text = link.get('text', '')[:35] + "..." if len(link.get('text', '')) > 35 else link.get('text', '')
title = link.get('head_data', {}).get('title', 'No title')[:40]
table.add_row(
f"#{i}",
text or title,
create_score_bar(intrinsic, 10.0),
create_score_bar(contextual, 1.0),
f"[bold green]{total:.3f}[/bold green]"
)
console.print(table)
# Show what makes a high-scoring link
if scored_links:
best_link = scored_links[0]
console.print(f"\n[bold green]🏆 Best Match:[/bold green]")
console.print(f"URL: [link]{best_link['href']}[/link]")
console.print(f"Title: {best_link.get('head_data', {}).get('title', 'N/A')}")
desc = best_link.get('head_data', {}).get('meta', {}).get('description', '')
if desc:
console.print(f"Description: [dim]{desc[:100]}...[/dim]")
if not auto_mode:
console.print("\n[dim]Press Enter to continue to Demo 2...[/dim]")
input()
else:
await asyncio.sleep(1)
# Demo 2: Research Assistant Mode
console.print("\n[bold yellow]Demo 2: Research Assistant - Finding Machine Learning Resources[/bold yellow]\n")
# First query - will find no results
query1 = "deep learning neural networks beginners tutorial"
console.print(f"[cyan]🔍 Query 1:[/cyan] [bold]{query1}[/bold]")
console.print("[dim]Note: scikit-learn focuses on traditional ML, not deep learning[/dim]\n")
# Configure for research mode
research_config = CrawlerRunConfig(
link_preview_config=LinkPreviewConfig(
include_internal=True,
include_external=True,
query=query1,
max_links=20,
score_threshold=0.3, # Only high-relevance links
concurrency=10
),
score_links=True
)
with Progress(
SpinnerColumn(),
TextColumn("[progress.description]{task.description}"),
console=console
) as progress:
task = progress.add_task("[cyan]Discovering learning resources...", total=None)
async with AsyncWebCrawler() as crawler:
result = await crawler.arun("https://scikit-learn.org/stable/", config=research_config)
progress.remove_task(task)
if result.success:
all_links = result.links.get("internal", []) + result.links.get("external", [])
# Filter for links with actual scores
relevant_links = [l for l in all_links if l.get("total_score") is not None and l.get("total_score") > 0.3]
relevant_links.sort(key=lambda x: x.get("total_score", 0), reverse=True)
console.print(f"[bold green]📚 Found {len(relevant_links)} highly relevant resources![/bold green]\n")
# Group by score ranges
excellent = [l for l in relevant_links if l.get("total_score", 0) > 0.7]
good = [l for l in relevant_links if 0.5 <= l.get("total_score", 0) <= 0.7]
fair = [l for l in relevant_links if 0.3 <= l.get("total_score", 0) < 0.5]
if excellent:
console.print("[bold green]⭐⭐⭐ Excellent Matches:[/bold green]")
for link in excellent[:3]:
title = link.get('head_data', {}).get('title', link.get('text', 'No title'))
console.print(f" • {title[:60]}... [dim]({link.get('total_score', 0):.2f})[/dim]")
if good:
console.print("\n[yellow]⭐⭐ Good Matches:[/yellow]")
for link in good[:3]:
title = link.get('head_data', {}).get('title', link.get('text', 'No title'))
console.print(f" • {title[:60]}... [dim]({link.get('total_score', 0):.2f})[/dim]")
# Second query - will find results
console.print("\n[bold cyan]Let's try a more relevant query for scikit-learn:[/bold cyan]\n")
query2 = "machine learning classification tutorial examples"
console.print(f"[cyan]🔍 Query 2:[/cyan] [bold]{query2}[/bold]")
console.print("[dim]This should find relevant content about traditional ML[/dim]\n")
research_config2 = CrawlerRunConfig(
link_preview_config=LinkPreviewConfig(
include_internal=True,
include_external=True,
query=query2,
max_links=15,
score_threshold=0.2, # Slightly lower threshold
concurrency=10
),
score_links=True
)
with Progress(
SpinnerColumn(),
TextColumn("[progress.description]{task.description}"),
console=console
) as progress:
task = progress.add_task("[cyan]Finding ML tutorials...", total=None)
async with AsyncWebCrawler() as crawler:
result2 = await crawler.arun("https://scikit-learn.org/stable/", config=research_config2)
progress.remove_task(task)
if result2.success:
all_links2 = result2.links.get("internal", []) + result2.links.get("external", [])
relevant_links2 = [l for l in all_links2 if l.get("total_score") is not None and l.get("total_score") > 0.2]
relevant_links2.sort(key=lambda x: x.get("total_score", 0), reverse=True)
console.print(f"[bold green]📚 Now found {len(relevant_links2)} relevant resources![/bold green]\n")
if relevant_links2:
console.print("[bold]Top relevant links for ML tutorials:[/bold]")
for i, link in enumerate(relevant_links2[:5], 1):
title = link.get('head_data', {}).get('title', link.get('text', 'No title'))
score = link.get('total_score', 0)
console.print(f"{i}. [{score:.3f}] {title[:70]}...")
if not auto_mode:
console.print("\n[dim]Press Enter to continue to Demo 3...[/dim]")
input()
else:
await asyncio.sleep(1)
# Demo 3: Live scoring visualization
console.print("\n[bold yellow]Demo 3: Understanding the 3-Layer Scoring System[/bold yellow]\n")
demo_query = "async programming tutorial"
console.print(f"[cyan]🔍 Query:[/cyan] [bold]{demo_query}[/bold]")
console.print("[dim]Let's see how different link types score against this query[/dim]\n")
# Create a sample link analysis
sample_links = [
{
"text": "Complete Guide to Async Programming",
"intrinsic": 9.2,
"contextual": 0.95,
"factors": ["Strong keywords", "Title position", "Descriptive text"]
},
{
"text": "API Reference",
"intrinsic": 6.5,
"contextual": 0.15,
"factors": ["Common link text", "Navigation menu", "Low relevance"]
},
{
"text": "Click here",
"intrinsic": 2.1,
"contextual": 0.05,
"factors": ["Poor link text", "No context", "Generic anchor"]
}
]
for link in sample_links:
total = (link["intrinsic"] / 10 * 0.4) + (link["contextual"] * 0.6)
panel_content = (
f"[bold]Link Text:[/bold] {link['text']}\n\n"
f"[cyan]Intrinsic Score:[/cyan] {create_score_bar(link['intrinsic'], 10.0)}\n"
f"[magenta]Contextual Score:[/magenta] {create_score_bar(link['contextual'], 1.0)}\n"
f"[green]Total Score:[/green] {total:.3f}\n\n"
f"[dim]Factors: {', '.join(link['factors'])}[/dim]"
)
console.print(Panel(
panel_content,
title=f"Link Analysis",
border_style="blue" if total > 0.7 else "yellow" if total > 0.3 else "red"
))
await asyncio.sleep(1)
# Summary
console.print("\n[bold green]✨ Link Preview Benefits:[/bold green]")
console.print("• Automatically finds the most relevant links for your research")
console.print("• Saves time by prioritizing high-quality content")
console.print("• Provides semantic understanding beyond simple keyword matching")
console.print("• Enables intelligent crawling decisions\n")
async def adaptive_crawling_demo(auto_mode=False):
"""
🎯 Adaptive Crawling Demo
Shows intelligent crawling that knows when to stop
"""
print_banner(
"🎯 ADAPTIVE CRAWLING",
"Intelligent crawling that knows when it has enough information"
)
# Explain the feature
console.print(Panel(
"[bold]What is Adaptive Crawling?[/bold]\n\n"
"Adaptive Crawling intelligently determines when sufficient information has been gathered:\n\n"
"• [cyan]Confidence Tracking[/cyan]: Monitors how well we understand the topic (0-100%)\n"
"• [magenta]Smart Exploration[/magenta]: Follows most promising links based on relevance\n"
"• [green]Early Stopping[/green]: Stops when confidence threshold is reached\n"
"• [yellow]Two Strategies[/yellow]: Statistical (fast) vs Embedding (semantic)\n\n"
"Perfect for research tasks where you need 'just enough' information!",
title="Feature Overview",
border_style="blue"
))
await asyncio.sleep(2)
# Demo 1: Basic adaptive crawling with confidence visualization
console.print("\n[bold yellow]Demo 1: Statistical Strategy - Fast Topic Understanding[/bold yellow]\n")
query = "Python async web scraping best practices"
console.print(f"[cyan]🔍 Research Query:[/cyan] [bold]{query}[/bold]")
console.print(f"[cyan]🎯 Goal:[/cyan] Gather enough information to understand the topic")
console.print(f"[cyan]📊 Strategy:[/cyan] Statistical (keyword-based, fast)\n")
# Configure adaptive crawler
config = AdaptiveConfig(
strategy="statistical",
max_pages=3, # Limit to 3 pages for demo
confidence_threshold=0.7, # Stop at 70% confidence
top_k_links=2, # Follow top 2 links per page
min_gain_threshold=0.05 # Need 5% information gain to continue
)
async with AsyncWebCrawler(verbose=False) as crawler:
adaptive = AdaptiveCrawler(crawler, config)
# Create progress tracking
with Progress(
SpinnerColumn(),
TextColumn("[progress.description]{task.description}"),
console=console
) as progress:
# Track crawling progress
crawl_task = progress.add_task("[cyan]Starting adaptive crawl...", total=None)
# Start crawling
start_time = time.time()
result = await adaptive.digest(
start_url="https://docs.python.org/3/library/asyncio.html",
query=query
)
elapsed = time.time() - start_time
progress.remove_task(crawl_task)
# Display results with visual confidence meter
console.print(f"\n[bold green]✅ Crawling Complete in {elapsed:.1f} seconds![/bold green]\n")
# Create confidence visualization
confidence = adaptive.confidence
conf_percentage = int(confidence * 100)
conf_bar = "█" * (conf_percentage // 5) + "░" * (20 - conf_percentage // 5)
console.print(f"[bold]Confidence Level:[/bold] [{('green' if confidence >= 0.7 else 'yellow' if confidence >= 0.5 else 'red')}]{conf_bar}[/] {conf_percentage}%")
# Show crawl statistics
stats_table = Table(
title="📊 Crawl Statistics",
box=box.ROUNDED,
show_lines=True
)
stats_table.add_column("Metric", style="cyan", width=25)
stats_table.add_column("Value", style="white", width=20)
stats_table.add_row("Pages Crawled", str(len(result.crawled_urls)))
stats_table.add_row("Knowledge Base Size", f"{len(adaptive.state.knowledge_base)} documents")
# Calculate total content from CrawlResult objects
total_content = 0
for doc in adaptive.state.knowledge_base:
if hasattr(doc, 'markdown') and doc.markdown and hasattr(doc.markdown, 'raw_markdown'):
total_content += len(doc.markdown.raw_markdown)
stats_table.add_row("Total Content", f"{total_content:,} chars")
stats_table.add_row("Time per Page", f"{elapsed / len(result.crawled_urls):.2f}s")
console.print(stats_table)
# Show top relevant pages
console.print("\n[bold]🏆 Most Relevant Pages Found:[/bold]")
relevant_pages = adaptive.get_relevant_content(top_k=3)
for i, page in enumerate(relevant_pages, 1):
console.print(f"\n{i}. [bold]{page['url']}[/bold]")
console.print(f" Relevance: {page['score']:.2%}")
# Show key information extracted
content = page['content'] or ""
if content:
# Find most relevant sentence
sentences = [s.strip() for s in content.split('.') if s.strip()]
if sentences:
console.print(f" [dim]Key insight: {sentences[0]}...[/dim]")
if not auto_mode:
console.print("\n[dim]Press Enter to continue to Demo 2...[/dim]")
input()
else:
await asyncio.sleep(1)
# Demo 2: Early Stopping Demonstration
console.print("\n[bold yellow]Demo 2: Early Stopping - Stop When We Know Enough[/bold yellow]\n")
query2 = "Python requests library tutorial"
console.print(f"[cyan]🔍 Research Query:[/cyan] [bold]{query2}[/bold]")
console.print(f"[cyan]🎯 Goal:[/cyan] Stop as soon as we reach 60% confidence")
console.print("[dim]Watch how adaptive crawling stops early when it has enough info[/dim]\n")
# Configure for early stopping
early_stop_config = AdaptiveConfig(
strategy="statistical",
max_pages=10, # Allow up to 10, but will stop early
confidence_threshold=0.6, # Lower threshold for demo
top_k_links=2
)
async with AsyncWebCrawler(verbose=False) as crawler:
adaptive_early = AdaptiveCrawler(crawler, early_stop_config)
# Track progress
console.print("[cyan]Starting crawl with early stopping enabled...[/cyan]")
start_time = time.time()
result = await adaptive_early.digest(
start_url="https://docs.python-requests.org/en/latest/",
query=query2
)
elapsed = time.time() - start_time
# Show results
console.print(f"\n[bold green]✅ Stopped early at {int(adaptive_early.confidence * 100)}% confidence![/bold green]")
console.print(f"• Crawled only {len(result.crawled_urls)} pages (max was 10)")
console.print(f"• Saved time: ~{elapsed:.1f}s total")
console.print(f"• Efficiency: {elapsed / len(result.crawled_urls):.1f}s per page\n")
# Show why it stopped
if adaptive_early.confidence >= 0.6:
console.print("[green]✓ Reached confidence threshold - no need to crawl more![/green]")
else:
console.print("[yellow]⚠ Hit max pages limit before reaching threshold[/yellow]")
if not auto_mode:
console.print("\n[dim]Press Enter to continue to Demo 3...[/dim]")
input()
else:
await asyncio.sleep(1)
# Demo 3: Knowledge Base Export/Import
console.print("\n[bold yellow]Demo 3: Knowledge Base Export & Import[/bold yellow]\n")
query3 = "Python decorators tutorial"
console.print(f"[cyan]🔍 Research Query:[/cyan] [bold]{query3}[/bold]")
console.print("[dim]Build knowledge base, export it, then import for continued research[/dim]\n")
# First crawl - build knowledge base
export_config = AdaptiveConfig(
strategy="statistical",
max_pages=2, # Small for demo
confidence_threshold=0.5
)
async with AsyncWebCrawler(verbose=False) as crawler:
# Phase 1: Initial research
console.print("[bold]Phase 1: Initial Research[/bold]")
adaptive1 = AdaptiveCrawler(crawler, export_config)
result1 = await adaptive1.digest(
start_url="https://realpython.com/",
query=query3
)
console.print(f"✓ Built knowledge base with {len(adaptive1.state.knowledge_base)} documents")
console.print(f"✓ Confidence: {int(adaptive1.confidence * 100)}%\n")
# Export knowledge base
console.print("[bold]💾 Exporting Knowledge Base:[/bold]")
kb_export = adaptive1.export_knowledge_base()
export_stats = {
"documents": len(kb_export['documents']),
"urls": len(kb_export['visited_urls']),
"size": len(json.dumps(kb_export)),
"confidence": kb_export['confidence']
}
for key, value in export_stats.items():
console.print(f"• {key.capitalize()}: {value:,}" if isinstance(value, int) else f"• {key.capitalize()}: {value:.2%}")
# Phase 2: Import and continue
console.print("\n[bold]Phase 2: Import & Continue Research[/bold]")
adaptive2 = AdaptiveCrawler(crawler, export_config)
# Import the knowledge base
adaptive2.import_knowledge_base(kb_export)
console.print(f"✓ Imported {len(adaptive2.state.knowledge_base)} documents")
console.print(f"✓ Starting confidence: {int(adaptive2.confidence * 100)}%")
# Continue research from a different starting point
console.print("\n[cyan]Continuing research from a different angle...[/cyan]")
result2 = await adaptive2.digest(
start_url="https://docs.python.org/3/glossary.html#term-decorator",
query=query3
)
console.print(f"\n[bold green]✅ Research Complete![/bold green]")
console.print(f"• Total documents: {len(adaptive2.state.knowledge_base)}")
console.print(f"• Final confidence: {int(adaptive2.confidence * 100)}%")
console.print(f"• Knowledge preserved across sessions!")
# Summary
console.print("\n[bold green]✨ Adaptive Crawling Benefits:[/bold green]")
console.print("• Automatically stops when enough information is gathered")
console.print("• Follows most promising links based on relevance")
console.print("• Saves time and resources with intelligent exploration")
console.print("• Export/import knowledge bases for continued research")
console.print("• Choose strategy based on needs: speed vs semantic understanding\n")
async def virtual_scroll_demo(auto_mode=False):
"""
📜 Virtual Scroll Demo
Shows how to capture content from modern infinite scroll pages
"""
import os
import http.server
import socketserver
import threading
from pathlib import Path
print_banner(
"📜 VIRTUAL SCROLL SUPPORT",
"Capture all content from pages with DOM recycling"
)
# Explain the feature
console.print(Panel(
"[bold]What is Virtual Scroll?[/bold]\n\n"
"Virtual Scroll handles modern web pages that use DOM recycling techniques:\n\n"
"• [cyan]Twitter/X-like feeds[/cyan]: Content replaced as you scroll\n"
"• [magenta]Instagram grids[/magenta]: Visual content with virtualization\n"
"• [green]News feeds[/green]: Mixed content with different behaviors\n"
"• [yellow]Infinite scroll[/yellow]: Captures everything, not just visible\n\n"
"Without this, you'd only get the initially visible content!",
title="Feature Overview",
border_style="blue"
))
await asyncio.sleep(2)
# Start test server with HTML examples
ASSETS_DIR = Path(__file__).parent / "assets"
class TestServer:
"""Simple HTTP server to serve our test HTML files"""
def __init__(self, port=8080):
self.port = port
self.httpd = None
self.server_thread = None
async def start(self):
"""Start the test server"""
Handler = http.server.SimpleHTTPRequestHandler
# Save current directory and change to assets directory
self.original_cwd = os.getcwd()
os.chdir(ASSETS_DIR)
# Try to find an available port
for _ in range(10):
try:
self.httpd = socketserver.TCPServer(("", self.port), Handler)
break
except OSError:
self.port += 1
if self.httpd is None:
raise RuntimeError("Could not find available port")
self.server_thread = threading.Thread(target=self.httpd.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
# Give server time to start
await asyncio.sleep(0.5)
console.print(f"[green]Test server started on http://localhost:{self.port}[/green]")
return self.port
def stop(self):
"""Stop the test server"""
if self.httpd:
self.httpd.shutdown()
# Restore original directory
if hasattr(self, 'original_cwd'):
os.chdir(self.original_cwd)
server = TestServer()
port = await server.start()
try:
# Demo 1: Twitter-like virtual scroll (content REPLACED)
console.print("\n[bold yellow]Demo 1: Twitter-like Virtual Scroll - Content Replaced[/bold yellow]\n")
console.print("[cyan]This simulates Twitter/X where only visible tweets exist in DOM[/cyan]\n")
url = f"http://localhost:{port}/virtual_scroll_twitter_like.html"
# First, crawl WITHOUT virtual scroll
console.print("[red]WITHOUT Virtual Scroll:[/red]")
config_normal = CrawlerRunConfig(cache_mode=CacheMode.BYPASS)
browser_config = BrowserConfig(
headless=False if not auto_mode else True,
viewport={"width": 1280, "height": 800}
)
async with AsyncWebCrawler(config=browser_config) as crawler:
result_normal = await crawler.arun(url=url, config=config_normal)
# Count tweets
tweets_normal = len(set(re.findall(r'data-tweet-id="(\d+)"', result_normal.html)))
console.print(f"• Captured only {tweets_normal} tweets (initial visible)")
console.print(f"• HTML size: {len(result_normal.html):,} bytes\n")
# Then, crawl WITH virtual scroll
console.print("[green]WITH Virtual Scroll:[/green]")
virtual_config = VirtualScrollConfig(
container_selector="#timeline",
scroll_count=50,
scroll_by="container_height",
wait_after_scroll=0.2
)
config_virtual = CrawlerRunConfig(
virtual_scroll_config=virtual_config,
cache_mode=CacheMode.BYPASS
)
async with AsyncWebCrawler(config=browser_config) as crawler:
result_virtual = await crawler.arun(url=url, config=config_virtual)
tweets_virtual = len(set(re.findall(r'data-tweet-id="(\d+)"', result_virtual.html)))
console.print(f"• Captured {tweets_virtual} tweets (all content)")
console.print(f"• HTML size: {len(result_virtual.html):,} bytes")
console.print(f"• [bold]{tweets_virtual / tweets_normal if tweets_normal > 0 else 'N/A':.1f}x more content![/bold]\n")
if not auto_mode:
console.print("\n[dim]Press Enter to continue to Demo 2...[/dim]")
input()
else:
await asyncio.sleep(1)
# Demo 2: Instagram Grid Example
console.print("\n[bold yellow]Demo 2: Instagram Grid - Visual Grid Layout[/bold yellow]\n")
console.print("[cyan]This shows how virtual scroll works with grid layouts[/cyan]\n")
url2 = f"http://localhost:{port}/virtual_scroll_instagram_grid.html"
# Configure for grid layout
grid_config = VirtualScrollConfig(
container_selector=".feed-container",
scroll_count=100, # Many scrolls for 999 posts
scroll_by="container_height",
wait_after_scroll=0.1 if auto_mode else 0.3
)
config = CrawlerRunConfig(
virtual_scroll_config=grid_config,
cache_mode=CacheMode.BYPASS,
screenshot=True # Take a screenshot
)
async with AsyncWebCrawler(config=browser_config) as crawler:
result = await crawler.arun(url=url2, config=config)
# Count posts in grid
posts = re.findall(r'data-post-id="(\d+)"', result.html)
unique_posts = sorted(set(int(id) for id in posts))
console.print(f"[green]✅ Results:[/green]")
console.print(f"• Posts captured: {len(unique_posts)} unique posts")
if unique_posts:
console.print(f"• Post IDs range: {min(unique_posts)} to {max(unique_posts)}")
console.print(f"• Expected: 0 to 998 (999 posts total)")
if len(unique_posts) >= 900:
console.print(f"• [bold green]SUCCESS! Captured {len(unique_posts)/999*100:.1f}% of all posts[/bold green]")
if not auto_mode:
console.print("\n[dim]Press Enter to continue to Demo 3...[/dim]")
input()
else:
await asyncio.sleep(1)
# Demo 3: Show the actual code
console.print("\n[bold yellow]Demo 3: The Code - How It Works[/bold yellow]\n")
# Show the actual implementation
code = '''# Example: Crawling Twitter-like feed with virtual scroll
url = "http://localhost:8080/virtual_scroll_twitter_like.html"
# Configure virtual scroll
virtual_config = VirtualScrollConfig(
container_selector="#timeline", # The scrollable container
scroll_count=50, # Max number of scrolls
scroll_by="container_height", # Scroll by container height
wait_after_scroll=0.3 # Wait 300ms after each scroll
)
config = CrawlerRunConfig(
virtual_scroll_config=virtual_config,
cache_mode=CacheMode.BYPASS
)
# Use headless=False to watch it work!
browser_config = BrowserConfig(
headless=False,
viewport={"width": 1280, "height": 800}
)
async with AsyncWebCrawler(config=browser_config) as crawler:
result = await crawler.arun(url=url, config=config)
# Extract all tweets
tweets = re.findall(r\'data-tweet-id="(\\d+)"\', result.html)
unique_tweets = set(tweets)
print(f"Captured {len(unique_tweets)} unique tweets!")
print(f"Without virtual scroll: only ~10 tweets")
print(f"With virtual scroll: all 500 tweets!")'''
| python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | true |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/releases_review/demo_v0.7.7.py | docs/releases_review/demo_v0.7.7.py | #!/usr/bin/env python3
"""
Crawl4AI v0.7.7 Release Demo
============================
This demo showcases the major feature in v0.7.7:
**Self-Hosting with Real-time Monitoring Dashboard**
Features Demonstrated:
1. System health monitoring with live metrics
2. Real-time request tracking (active & completed)
3. Browser pool management (permanent/hot/cold pools)
4. Monitor API endpoints for programmatic access
5. WebSocket streaming for real-time updates
6. Control actions (kill browser, cleanup, restart)
7. Production metrics (efficiency, reuse rates, memory)
Prerequisites:
- Crawl4AI Docker container running on localhost:11235
- Python packages: pip install httpx websockets
Usage:
python docs/releases_review/demo_v0.7.7.py
"""
import asyncio
import httpx
import json
import time
from datetime import datetime
from typing import Dict, Any
# Configuration
CRAWL4AI_BASE_URL = "http://localhost:11235"
MONITOR_DASHBOARD_URL = f"{CRAWL4AI_BASE_URL}/dashboard"
def print_section(title: str, description: str = ""):
"""Print a formatted section header"""
print(f"\n{'=' * 70}")
print(f"📊 {title}")
if description:
print(f"{description}")
print(f"{'=' * 70}\n")
def print_subsection(title: str):
"""Print a formatted subsection header"""
print(f"\n{'-' * 70}")
print(f"{title}")
print(f"{'-' * 70}")
async def check_server_health():
"""Check if Crawl4AI server is running"""
try:
async with httpx.AsyncClient(timeout=5.0) as client:
response = await client.get(f"{CRAWL4AI_BASE_URL}/health")
return response.status_code == 200
except:
return False
async def demo_1_system_health_overview():
"""Demo 1: System Health Overview - Live metrics and pool status"""
print_section(
"Demo 1: System Health Overview",
"Real-time monitoring of system resources and browser pool"
)
async with httpx.AsyncClient(timeout=30.0) as client:
print("🔍 Fetching system health metrics...")
try:
response = await client.get(f"{CRAWL4AI_BASE_URL}/monitor/health")
health = response.json()
print("\n✅ System Health Report:")
print(f"\n🖥️ Container Metrics:")
print(f" • CPU Usage: {health['container']['cpu_percent']:.1f}%")
print(f" • Memory Usage: {health['container']['memory_percent']:.1f}% "
f"({health['container']['memory_mb']:.0f} MB)")
print(f" • Network RX: {health['container']['network_rx_mb']:.2f} MB")
print(f" • Network TX: {health['container']['network_tx_mb']:.2f} MB")
print(f" • Uptime: {health['container']['uptime_seconds']:.0f}s")
print(f"\n🌐 Browser Pool Status:")
print(f" Permanent Browser:")
print(f" • Active: {health['pool']['permanent']['active']}")
print(f" • Total Requests: {health['pool']['permanent']['total_requests']}")
print(f" Hot Pool (Frequently Used Configs):")
print(f" • Count: {health['pool']['hot']['count']}")
print(f" • Total Requests: {health['pool']['hot']['total_requests']}")
print(f" Cold Pool (On-Demand Configs):")
print(f" • Count: {health['pool']['cold']['count']}")
print(f" • Total Requests: {health['pool']['cold']['total_requests']}")
print(f"\n📈 Overall Statistics:")
print(f" • Total Requests: {health['stats']['total_requests']}")
print(f" • Success Rate: {health['stats']['success_rate_percent']:.1f}%")
print(f" • Avg Latency: {health['stats']['avg_latency_ms']:.0f}ms")
print(f"\n💡 Dashboard URL: {MONITOR_DASHBOARD_URL}")
except Exception as e:
print(f"❌ Error fetching health: {e}")
async def demo_2_request_tracking():
"""Demo 2: Real-time Request Tracking - Generate and monitor requests"""
print_section(
"Demo 2: Real-time Request Tracking",
"Submit crawl jobs and watch them in real-time"
)
async with httpx.AsyncClient(timeout=60.0) as client:
print("🚀 Submitting crawl requests...")
# Submit multiple requests
urls_to_crawl = [
"https://httpbin.org/html",
"https://httpbin.org/json",
"https://example.com"
]
tasks = []
for url in urls_to_crawl:
task = client.post(
f"{CRAWL4AI_BASE_URL}/crawl",
json={"urls": [url], "crawler_config": {}}
)
tasks.append(task)
print(f" • Submitting {len(urls_to_crawl)} requests in parallel...")
results = await asyncio.gather(*tasks, return_exceptions=True)
successful = sum(1 for r in results if not isinstance(r, Exception) and r.status_code == 200)
print(f" ✅ {successful}/{len(urls_to_crawl)} requests submitted")
# Check request tracking
print("\n📊 Checking request tracking...")
await asyncio.sleep(2) # Wait for requests to process
response = await client.get(f"{CRAWL4AI_BASE_URL}/monitor/requests")
requests_data = response.json()
print(f"\n📋 Request Status:")
print(f" • Active Requests: {len(requests_data['active'])}")
print(f" • Completed Requests: {len(requests_data['completed'])}")
if requests_data['completed']:
print(f"\n📝 Recent Completed Requests:")
for req in requests_data['completed'][:3]:
status_icon = "✅" if req['success'] else "❌"
print(f" {status_icon} {req['endpoint']} - {req['latency_ms']:.0f}ms")
async def demo_3_browser_pool_management():
"""Demo 3: Browser Pool Management - 3-tier architecture in action"""
print_section(
"Demo 3: Browser Pool Management",
"Understanding permanent, hot, and cold browser pools"
)
async with httpx.AsyncClient(timeout=60.0) as client:
print("🌊 Testing browser pool with different configurations...")
# Test 1: Default config (permanent browser)
print("\n🔥 Test 1: Default Config → Permanent Browser")
for i in range(3):
await client.post(
f"{CRAWL4AI_BASE_URL}/crawl",
json={"urls": [f"https://httpbin.org/html?req={i}"], "crawler_config": {}}
)
print(f" • Request {i+1}/3 sent (should use permanent browser)")
await asyncio.sleep(2)
# Test 2: Custom viewport (cold → hot promotion after 3 uses)
print("\n♨️ Test 2: Custom Viewport → Cold Pool (promoting to Hot)")
viewport_config = {"viewport": {"width": 1280, "height": 720}}
for i in range(4):
await client.post(
f"{CRAWL4AI_BASE_URL}/crawl",
json={
"urls": [f"https://httpbin.org/json?viewport={i}"],
"browser_config": viewport_config,
"crawler_config": {}
}
)
print(f" • Request {i+1}/4 sent (cold→hot promotion after 3rd use)")
await asyncio.sleep(2)
# Check browser pool status
print("\n📊 Browser Pool Report:")
response = await client.get(f"{CRAWL4AI_BASE_URL}/monitor/browsers")
browsers = response.json()
print(f"\n🎯 Pool Summary:")
print(f" • Total Browsers: {browsers['summary']['total_count']}")
print(f" • Total Memory: {browsers['summary']['total_memory_mb']} MB")
print(f" • Reuse Rate: {browsers['summary']['reuse_rate_percent']:.1f}%")
print(f"\n📋 Browser Pool Details:")
if browsers['permanent']:
for browser in browsers['permanent']:
print(f" 🔥 Permanent: {browser['browser_id'][:8]}... | "
f"Requests: {browser['request_count']} | "
f"Memory: {browser['memory_mb']:.0f} MB")
if browsers['hot']:
for browser in browsers['hot']:
print(f" ♨️ Hot: {browser['browser_id'][:8]}... | "
f"Requests: {browser['request_count']} | "
f"Memory: {browser['memory_mb']:.0f} MB")
if browsers['cold']:
for browser in browsers['cold']:
print(f" ❄️ Cold: {browser['browser_id'][:8]}... | "
f"Requests: {browser['request_count']} | "
f"Memory: {browser['memory_mb']:.0f} MB")
async def demo_4_monitor_api_endpoints():
"""Demo 4: Monitor API Endpoints - Complete API surface"""
print_section(
"Demo 4: Monitor API Endpoints",
"Programmatic access to all monitoring data"
)
async with httpx.AsyncClient(timeout=30.0) as client:
print("🔌 Testing Monitor API endpoints...")
# Endpoint performance statistics
print_subsection("Endpoint Performance Statistics")
response = await client.get(f"{CRAWL4AI_BASE_URL}/monitor/endpoints/stats")
endpoint_stats = response.json()
print("\n📊 Per-Endpoint Analytics:")
for endpoint, stats in endpoint_stats.items():
print(f" {endpoint}:")
print(f" • Requests: {stats['count']}")
print(f" • Avg Latency: {stats['avg_latency_ms']:.0f}ms")
print(f" • Success Rate: {stats['success_rate_percent']:.1f}%")
# Timeline data for charts
print_subsection("Timeline Data (for Charts)")
response = await client.get(f"{CRAWL4AI_BASE_URL}/monitor/timeline?minutes=5")
timeline = response.json()
print(f"\n📈 Timeline Metrics (last 5 minutes):")
print(f" • Data Points: {len(timeline['memory'])}")
if timeline['memory']:
latest = timeline['memory'][-1]
print(f" • Latest Memory: {latest['value']:.1f}%")
print(f" • Timestamp: {latest['timestamp']}")
# Janitor logs
print_subsection("Janitor Cleanup Events")
response = await client.get(f"{CRAWL4AI_BASE_URL}/monitor/logs/janitor?limit=3")
janitor_logs = response.json()
print(f"\n🧹 Recent Cleanup Activities:")
if janitor_logs:
for log in janitor_logs[:3]:
print(f" • {log['timestamp']}: {log['message']}")
else:
print(" (No cleanup events yet - janitor runs periodically)")
# Error logs
print_subsection("Error Monitoring")
response = await client.get(f"{CRAWL4AI_BASE_URL}/monitor/logs/errors?limit=3")
error_logs = response.json()
print(f"\n❌ Recent Errors:")
if error_logs:
for log in error_logs[:3]:
print(f" • {log['timestamp']}: {log['error_type']}")
print(f" {log['message'][:100]}...")
else:
print(" ✅ No recent errors!")
async def demo_5_websocket_streaming():
"""Demo 5: WebSocket Streaming - Real-time updates"""
print_section(
"Demo 5: WebSocket Streaming",
"Live monitoring with 2-second update intervals"
)
print("⚡ WebSocket Streaming Demo")
print("\n💡 The monitoring dashboard uses WebSocket for real-time updates")
print(f" • Connection: ws://localhost:11235/monitor/ws")
print(f" • Update Interval: 2 seconds")
print(f" • Data: Health, requests, browsers, memory, errors")
print("\n📝 Sample WebSocket Integration Code:")
print("""
import websockets
import json
async def monitor_realtime():
uri = "ws://localhost:11235/monitor/ws"
async with websockets.connect(uri) as websocket:
while True:
data = await websocket.recv()
update = json.loads(data)
print(f"Memory: {update['health']['container']['memory_percent']:.1f}%")
print(f"Active Requests: {len(update['requests']['active'])}")
print(f"Browser Pool: {update['health']['pool']['permanent']['active']}")
""")
print("\n🌐 Open the dashboard to see WebSocket in action:")
print(f" {MONITOR_DASHBOARD_URL}")
async def demo_6_control_actions():
"""Demo 6: Control Actions - Manual browser management"""
print_section(
"Demo 6: Control Actions",
"Manual control over browser pool and cleanup"
)
async with httpx.AsyncClient(timeout=30.0) as client:
print("🎮 Testing control actions...")
# Force cleanup
print_subsection("Force Immediate Cleanup")
print("🧹 Triggering manual cleanup...")
try:
response = await client.post(f"{CRAWL4AI_BASE_URL}/monitor/actions/cleanup")
if response.status_code == 200:
result = response.json()
print(f" ✅ Cleanup completed")
print(f" • Browsers cleaned: {result.get('cleaned_count', 0)}")
print(f" • Memory freed: {result.get('memory_freed_mb', 0):.1f} MB")
else:
print(f" ⚠️ Response: {response.status_code}")
except Exception as e:
print(f" ℹ️ Cleanup action: {e}")
# Get browser list for potential kill/restart
print_subsection("Browser Management")
response = await client.get(f"{CRAWL4AI_BASE_URL}/monitor/browsers")
browsers = response.json()
cold_browsers = browsers.get('cold', [])
if cold_browsers:
browser_id = cold_browsers[0]['browser_id']
print(f"\n🎯 Example: Kill specific browser")
print(f" POST /monitor/actions/kill_browser")
print(f" JSON: {{'browser_id': '{browser_id[:16]}...'}}")
print(f" → Kills the browser and frees resources")
print(f"\n🔄 Example: Restart browser")
print(f" POST /monitor/actions/restart_browser")
print(f" JSON: {{'browser_id': 'browser_id_here'}}")
print(f" → Restart a specific browser instance")
# Reset statistics
print_subsection("Reset Statistics")
print("📊 Statistics can be reset for fresh monitoring:")
print(f" POST /monitor/stats/reset")
print(f" → Clears all accumulated statistics")
async def demo_7_production_metrics():
"""Demo 7: Production Metrics - Key indicators for operations"""
print_section(
"Demo 7: Production Metrics",
"Critical metrics for production monitoring"
)
async with httpx.AsyncClient(timeout=30.0) as client:
print("📊 Key Production Metrics:")
# Overall health
response = await client.get(f"{CRAWL4AI_BASE_URL}/monitor/health")
health = response.json()
# Browser efficiency
response = await client.get(f"{CRAWL4AI_BASE_URL}/monitor/browsers")
browsers = response.json()
print("\n🎯 Critical Metrics to Track:")
print(f"\n1️⃣ Memory Usage Trends")
print(f" • Current: {health['container']['memory_percent']:.1f}%")
print(f" • Alert if: >80%")
print(f" • Action: Trigger cleanup or scale")
print(f"\n2️⃣ Request Success Rate")
print(f" • Current: {health['stats']['success_rate_percent']:.1f}%")
print(f" • Target: >95%")
print(f" • Alert if: <90%")
print(f"\n3️⃣ Average Latency")
print(f" • Current: {health['stats']['avg_latency_ms']:.0f}ms")
print(f" • Target: <2000ms")
print(f" • Alert if: >5000ms")
print(f"\n4️⃣ Browser Pool Efficiency")
print(f" • Reuse Rate: {browsers['summary']['reuse_rate_percent']:.1f}%")
print(f" • Target: >80%")
print(f" • Indicates: Effective browser pooling")
print(f"\n5️⃣ Total Browsers")
print(f" • Current: {browsers['summary']['total_count']}")
print(f" • Alert if: >20 (possible leak)")
print(f" • Check: Janitor is running correctly")
print(f"\n6️⃣ Error Frequency")
response = await client.get(f"{CRAWL4AI_BASE_URL}/monitor/logs/errors?limit=10")
errors = response.json()
print(f" • Recent Errors: {len(errors)}")
print(f" • Alert if: >10 in last hour")
print(f" • Action: Review error patterns")
print("\n💡 Integration Examples:")
print(" • Prometheus: Scrape /monitor/health")
print(" • Alerting: Monitor memory, success rate, latency")
print(" • Dashboards: WebSocket streaming to custom UI")
print(" • Log Aggregation: Collect /monitor/logs/* endpoints")
async def demo_8_self_hosting_value():
"""Demo 8: Self-Hosting Value Proposition"""
print_section(
"Demo 8: Why Self-Host Crawl4AI?",
"The value proposition of owning your infrastructure"
)
print("🎯 Self-Hosting Benefits:\n")
print("🔒 Data Privacy & Security")
print(" • Your data never leaves your infrastructure")
print(" • No third-party access to crawled content")
print(" • Keep sensitive workflows behind your firewall")
print("\n💰 Cost Control")
print(" • No per-request pricing or rate limits")
print(" • Predictable infrastructure costs")
print(" • Scale based on your actual needs")
print("\n🎯 Full Customization")
print(" • Complete control over browser configs")
print(" • Custom hooks and strategies")
print(" • Tailored monitoring and alerting")
print("\n📊 Complete Transparency")
print(" • Real-time monitoring dashboard")
print(" • Full visibility into system performance")
print(" • Detailed request and error tracking")
print("\n⚡ Performance & Flexibility")
print(" • Direct access, no network overhead")
print(" • Integrate with existing infrastructure")
print(" • Custom resource allocation")
print("\n🛡️ Enterprise-Grade Operations")
print(" • Prometheus integration ready")
print(" • WebSocket for real-time dashboards")
print(" • Full API for automation")
print(" • Manual controls for troubleshooting")
print(f"\n🌐 Get Started:")
print(f" docker pull unclecode/crawl4ai:0.7.7")
print(f" docker run -d -p 11235:11235 --shm-size=1g unclecode/crawl4ai:0.7.7")
print(f" # Visit: {MONITOR_DASHBOARD_URL}")
def print_summary():
"""Print comprehensive demo summary"""
print("\n" + "=" * 70)
print("📊 DEMO SUMMARY - Crawl4AI v0.7.7")
print("=" * 70)
print("\n✨ Features Demonstrated:")
print("=" * 70)
print("✅ System Health Overview")
print(" → Real-time CPU, memory, network, and uptime monitoring")
print("\n✅ Request Tracking")
print(" → Active and completed request monitoring with full details")
print("\n✅ Browser Pool Management")
print(" → 3-tier architecture: Permanent, Hot, and Cold pools")
print(" → Automatic promotion and cleanup")
print("\n✅ Monitor API Endpoints")
print(" → Complete REST API for programmatic access")
print(" → Health, requests, browsers, timeline, logs, errors")
print("\n✅ WebSocket Streaming")
print(" → Real-time updates every 2 seconds")
print(" → Build custom dashboards with live data")
print("\n✅ Control Actions")
print(" → Manual browser management (kill, restart)")
print(" → Force cleanup and statistics reset")
print("\n✅ Production Metrics")
print(" → 6 critical metrics for operational excellence")
print(" → Prometheus integration patterns")
print("\n✅ Self-Hosting Value")
print(" → Data privacy, cost control, full customization")
print(" → Enterprise-grade transparency and control")
print("\n" + "=" * 70)
print("🎯 What's New in v0.7.7?")
print("=" * 70)
print("• 📊 Complete Real-time Monitoring System")
print("• 🌐 Interactive Web Dashboard (/dashboard)")
print("• 🔌 Comprehensive Monitor API")
print("• ⚡ WebSocket Streaming (2-second updates)")
print("• 🎮 Manual Control Actions")
print("• 📈 Production Integration Examples")
print("• 🏭 Prometheus, Alerting, Log Aggregation")
print("• 🔥 Smart Browser Pool (Permanent/Hot/Cold)")
print("• 🧹 Automatic Janitor Cleanup")
print("• 📋 Full Request & Error Tracking")
print("\n" + "=" * 70)
print("💡 Why This Matters")
print("=" * 70)
print("Before v0.7.7: Docker was just a containerized crawler")
print("After v0.7.7: Complete self-hosting platform with enterprise monitoring")
print("\nYou now have:")
print(" • Full visibility into what's happening inside")
print(" • Real-time operational dashboards")
print(" • Complete control over browser resources")
print(" • Production-ready observability")
print(" • Zero external dependencies")
print("\n" + "=" * 70)
print("📚 Next Steps")
print("=" * 70)
print(f"1. Open the dashboard: {MONITOR_DASHBOARD_URL}")
print("2. Read the docs: https://docs.crawl4ai.com/basic/self-hosting/")
print("3. Try the Monitor API endpoints yourself")
print("4. Set up Prometheus integration for production")
print("5. Build custom dashboards with WebSocket streaming")
print("\n" + "=" * 70)
print("🔗 Resources")
print("=" * 70)
print(f"• Dashboard: {MONITOR_DASHBOARD_URL}")
print(f"• Health API: {CRAWL4AI_BASE_URL}/monitor/health")
print(f"• Documentation: https://docs.crawl4ai.com/")
print(f"• GitHub: https://github.com/unclecode/crawl4ai")
print("\n" + "=" * 70)
print("🎉 You're now in control of your web crawling destiny!")
print("=" * 70)
async def main():
"""Run all demos"""
print("\n" + "=" * 70)
print("🚀 Crawl4AI v0.7.7 Release Demo")
print("=" * 70)
print("Feature: Self-Hosting with Real-time Monitoring Dashboard")
print("=" * 70)
# Check if server is running
print("\n🔍 Checking Crawl4AI server...")
server_running = await check_server_health()
if not server_running:
print(f"❌ Cannot connect to Crawl4AI at {CRAWL4AI_BASE_URL}")
print("\nPlease start the Docker container:")
print(" docker pull unclecode/crawl4ai:0.7.7")
print(" docker run -d -p 11235:11235 --shm-size=1g unclecode/crawl4ai:0.7.7")
print("\nThen re-run this demo.")
return
print(f"✅ Crawl4AI server is running!")
print(f"📊 Dashboard available at: {MONITOR_DASHBOARD_URL}")
# Run all demos
demos = [
demo_1_system_health_overview,
demo_2_request_tracking,
demo_3_browser_pool_management,
demo_4_monitor_api_endpoints,
demo_5_websocket_streaming,
demo_6_control_actions,
demo_7_production_metrics,
demo_8_self_hosting_value,
]
for i, demo_func in enumerate(demos, 1):
try:
await demo_func()
if i < len(demos):
await asyncio.sleep(2) # Brief pause between demos
except KeyboardInterrupt:
print(f"\n\n⚠️ Demo interrupted by user")
return
except Exception as e:
print(f"\n❌ Demo {i} error: {e}")
print("Continuing to next demo...\n")
continue
# Print comprehensive summary
print_summary()
print("\n" + "=" * 70)
print("✅ Demo completed!")
print("=" * 70)
if __name__ == "__main__":
try:
asyncio.run(main())
except KeyboardInterrupt:
print("\n\n👋 Demo stopped by user. Thanks for trying Crawl4AI v0.7.7!")
except Exception as e:
print(f"\n\n❌ Demo failed: {e}")
print("Make sure the Docker container is running:")
print(" docker run -d -p 11235:11235 --shm-size=1g unclecode/crawl4ai:0.7.7")
| python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
unclecode/crawl4ai | https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/releases_review/v0_4_3b2_features_demo.py | docs/releases_review/v0_4_3b2_features_demo.py | """
Crawl4ai v0.4.3b2 Features Demo
============================
This demonstration showcases three major categories of new features in Crawl4ai v0.4.3:
1. Efficiency & Speed:
- Memory-efficient dispatcher strategies
- New scraping algorithm
- Streaming support for batch crawling
2. LLM Integration:
- Automatic schema generation
- LLM-powered content filtering
- Smart markdown generation
3. Core Improvements:
- Robots.txt compliance
- Proxy rotation
- Enhanced URL handling
- Shared data among hooks
- add page routes
Each demo function can be run independently or as part of the full suite.
"""
import asyncio
import os
import json
import re
import random
from typing import Optional, Dict
from dotenv import load_dotenv
from crawl4ai import (
AsyncWebCrawler,
BrowserConfig,
CrawlerRunConfig,
CacheMode,
DisplayMode,
MemoryAdaptiveDispatcher,
CrawlerMonitor,
DefaultMarkdownGenerator,
LXMLWebScrapingStrategy,
JsonCssExtractionStrategy,
LLMContentFilter
)
load_dotenv()
async def demo_memory_dispatcher():
"""Demonstrates the new memory-efficient dispatcher system.
Key Features:
- Adaptive memory management
- Real-time performance monitoring
- Concurrent session control
"""
print("\n=== Memory Dispatcher Demo ===")
try:
# Configuration
browser_config = BrowserConfig(headless=True, verbose=False)
crawler_config = CrawlerRunConfig(
cache_mode=CacheMode.BYPASS,
markdown_generator=DefaultMarkdownGenerator()
)
# Test URLs
urls = ["http://example.com", "http://example.org", "http://example.net"] * 3
print("\n📈 Initializing crawler with memory monitoring...")
async with AsyncWebCrawler(config=browser_config) as crawler:
monitor = CrawlerMonitor(
max_visible_rows=10,
display_mode=DisplayMode.DETAILED
)
dispatcher = MemoryAdaptiveDispatcher(
memory_threshold_percent=80.0,
check_interval=0.5,
max_session_permit=5,
monitor=monitor
)
print("\n🚀 Starting batch crawl...")
results = await crawler.arun_many(
urls=urls,
config=crawler_config,
dispatcher=dispatcher
)
print(f"\n✅ Completed {len(results)} URLs successfully")
except Exception as e:
print(f"\n❌ Error in memory dispatcher demo: {str(e)}")
async def demo_streaming_support():
"""
2. Streaming Support Demo
======================
Shows how to process URLs as they complete using streaming
"""
print("\n=== 2. Streaming Support Demo ===")
browser_config = BrowserConfig(headless=True, verbose=False)
crawler_config = CrawlerRunConfig(cache_mode=CacheMode.BYPASS, stream=True)
# Test URLs
urls = ["http://example.com", "http://example.org", "http://example.net"] * 2
async with AsyncWebCrawler(config=browser_config) as crawler:
# Initialize dispatcher for streaming
dispatcher = MemoryAdaptiveDispatcher(max_session_permit=3, check_interval=0.5)
print("Starting streaming crawl...")
async for result in await crawler.arun_many(
urls=urls,
config=crawler_config,
dispatcher=dispatcher
):
# Process each result as it arrives
print(
f"Received result for {result.url} - Success: {result.success}"
)
if result.success:
print(f"Content length: {len(result.markdown)}")
async def demo_content_scraping():
"""
3. Content Scraping Strategy Demo
==============================
Demonstrates the new LXMLWebScrapingStrategy for faster content scraping.
"""
print("\n=== 3. Content Scraping Strategy Demo ===")
crawler = AsyncWebCrawler()
url = "https://example.com/article"
# Configure with the new LXML strategy
config = CrawlerRunConfig(
scraping_strategy=LXMLWebScrapingStrategy(),
verbose=True
)
print("Scraping content with LXML strategy...")
async with crawler:
result = await crawler.arun(url, config=config)
if result.success:
print("Successfully scraped content using LXML strategy")
async def demo_llm_markdown():
"""
4. LLM-Powered Markdown Generation Demo
===================================
Shows how to use the new LLM-powered content filtering and markdown generation.
"""
print("\n=== 4. LLM-Powered Markdown Generation Demo ===")
crawler = AsyncWebCrawler()
url = "https://docs.python.org/3/tutorial/classes.html"
content_filter = LLMContentFilter(
provider="openai/gpt-4o",
api_token=os.getenv("OPENAI_API_KEY"),
instruction="""
Focus on extracting the core educational content about Python classes.
Include:
- Key concepts and their explanations
- Important code examples
- Essential technical details
Exclude:
- Navigation elements
- Sidebars
- Footer content
- Version information
- Any non-essential UI elements
Format the output as clean markdown with proper code blocks and headers.
""",
verbose=True,
)
# Configure LLM-powered markdown generation
config = CrawlerRunConfig(
markdown_generator=DefaultMarkdownGenerator(
content_filter=content_filter
),
cache_mode = CacheMode.BYPASS,
verbose=True
)
print("Generating focused markdown with LLM...")
async with crawler:
result = await crawler.arun(url, config=config)
if result.success and result.markdown_v2:
print("Successfully generated LLM-filtered markdown")
print("First 500 chars of filtered content:")
print(result.markdown_v2.fit_markdown[:500])
print("Successfully generated LLM-filtered markdown")
async def demo_robots_compliance():
"""
5. Robots.txt Compliance Demo
==========================
Demonstrates the new robots.txt compliance feature with SQLite caching.
"""
print("\n=== 5. Robots.txt Compliance Demo ===")
crawler = AsyncWebCrawler()
urls = ["https://example.com", "https://facebook.com", "https://twitter.com"]
# Enable robots.txt checking
config = CrawlerRunConfig(check_robots_txt=True, verbose=True)
print("Crawling with robots.txt compliance...")
async with crawler:
results = await crawler.arun_many(urls, config=config)
for result in results:
if result.status_code == 403:
print(f"Access blocked by robots.txt: {result.url}")
elif result.success:
print(f"Successfully crawled: {result.url}")
async def demo_json_schema_generation():
"""
7. LLM-Powered Schema Generation Demo
=================================
Demonstrates automatic CSS and XPath schema generation using LLM models.
"""
print("\n=== 7. LLM-Powered Schema Generation Demo ===")
# Example HTML content for a job listing
html_content = """
<div class="job-listing">
<h1 class="job-title">Senior Software Engineer</h1>
<div class="job-details">
<span class="location">San Francisco, CA</span>
<span class="salary">$150,000 - $200,000</span>
<div class="requirements">
<h2>Requirements</h2>
<ul>
<li>5+ years Python experience</li>
<li>Strong background in web crawling</li>
</ul>
</div>
</div>
</div>
"""
print("Generating CSS selectors schema...")
# Generate CSS selectors with a specific query
css_schema = JsonCssExtractionStrategy.generate_schema(
html_content,
schema_type="CSS",
query="Extract job title, location, and salary information",
provider="openai/gpt-4o", # or use other providers like "ollama"
)
print("\nGenerated CSS Schema:")
print(css_schema)
# Example of using the generated schema with crawler
crawler = AsyncWebCrawler()
url = "https://example.com/job-listing"
# Create an extraction strategy with the generated schema
extraction_strategy = JsonCssExtractionStrategy(schema=css_schema)
config = CrawlerRunConfig(extraction_strategy=extraction_strategy, verbose=True)
print("\nTesting generated schema with crawler...")
async with crawler:
result = await crawler.arun(url, config=config)
if result.success:
print(json.dumps(result.extracted_content, indent=2) if result.extracted_content else None)
print("Successfully used generated schema for crawling")
async def demo_proxy_rotation():
"""
8. Proxy Rotation Demo
===================
Demonstrates how to rotate proxies for each request using Crawl4ai.
"""
print("\n=== 8. Proxy Rotation Demo ===")
async def get_next_proxy(proxy_file: str = "proxies.txt") -> Optional[Dict]:
"""Get next proxy from local file"""
try:
proxies = os.getenv("PROXIES", "").split(",")
ip, port, username, password = random.choice(proxies).split(":")
return {
"server": f"http://{ip}:{port}",
"username": username,
"password": password,
"ip": ip # Store original IP for verification
}
except Exception as e:
print(f"Error loading proxy: {e}")
return None
# Create 10 test requests to httpbin
urls = ["https://httpbin.org/ip"] * 2
browser_config = BrowserConfig(headless=True, verbose=False)
run_config = CrawlerRunConfig(cache_mode=CacheMode.BYPASS)
async with AsyncWebCrawler(config=browser_config) as crawler:
for url in urls:
proxy = await get_next_proxy()
if not proxy:
print("No proxy available, skipping...")
continue
# Create new config with proxy
current_config = run_config.clone(proxy_config=proxy, user_agent="")
result = await crawler.arun(url=url, config=current_config)
if result.success:
ip_match = re.search(r'(?:[0-9]{1,3}\.){3}[0-9]{1,3}', result.html)
print(f"Proxy {proxy['ip']} -> Response IP: {ip_match.group(0) if ip_match else 'Not found'}")
verified = ip_match.group(0) == proxy['ip']
if verified:
print(f"✅ Proxy working! IP matches: {proxy['ip']}")
else:
print("❌ Proxy failed or IP mismatch!")
else:
print(f"Failed with proxy {proxy['ip']}")
async def main():
"""Run all feature demonstrations."""
print("\n📊 Running Crawl4ai v0.4.3 Feature Demos\n")
# Efficiency & Speed Demos
print("\n🚀 EFFICIENCY & SPEED DEMOS")
await demo_memory_dispatcher()
await demo_streaming_support()
await demo_content_scraping()
# # LLM Integration Demos
print("\n🤖 LLM INTEGRATION DEMOS")
await demo_json_schema_generation()
await demo_llm_markdown()
# # Core Improvements
print("\n🔧 CORE IMPROVEMENT DEMOS")
await demo_robots_compliance()
await demo_proxy_rotation()
if __name__ == "__main__":
asyncio.run(main())
| python | Apache-2.0 | c85f56b085a1d5b62779774d48887345e566869b | 2026-01-04T14:38:51.943025Z | false |
ageitgey/face_recognition | https://github.com/ageitgey/face_recognition/blob/2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1/setup.py | setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
'face_recognition_models>=0.3.0',
'Click>=6.0',
'dlib>=19.7',
'numpy',
'Pillow'
]
test_requirements = [
'tox',
'flake8'
]
setup(
name='face_recognition',
version='1.4.0',
description="Recognize faces from Python or from the command line",
long_description=readme + '\n\n' + history,
author="Adam Geitgey",
author_email='ageitgey@gmail.com',
url='https://github.com/ageitgey/face_recognition',
packages=[
'face_recognition',
],
package_dir={'face_recognition': 'face_recognition'},
package_data={
'face_recognition': ['models/*.dat']
},
entry_points={
'console_scripts': [
'face_recognition=face_recognition.face_recognition_cli:main',
'face_detection=face_recognition.face_detection_cli:main'
]
},
install_requires=requirements,
license="MIT license",
zip_safe=False,
keywords='face_recognition',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
test_suite='tests',
tests_require=test_requirements
)
| python | MIT | 2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1 | 2026-01-04T14:39:09.099486Z | false |
ageitgey/face_recognition | https://github.com/ageitgey/face_recognition/blob/2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1/tests/test_face_recognition.py | tests/test_face_recognition.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_face_recognition
----------------------------------
Tests for `face_recognition` module.
"""
import unittest
import os
import numpy as np
from click.testing import CliRunner
from face_recognition import api
from face_recognition import face_recognition_cli
from face_recognition import face_detection_cli
class Test_face_recognition(unittest.TestCase):
def test_load_image_file(self):
img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg'))
self.assertEqual(img.shape, (1137, 910, 3))
def test_load_image_file_32bit(self):
img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', '32bit.png'))
self.assertEqual(img.shape, (1200, 626, 3))
def test_raw_face_locations(self):
img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg'))
detected_faces = api._raw_face_locations(img)
self.assertEqual(len(detected_faces), 1)
self.assertEqual(detected_faces[0].top(), 142)
self.assertEqual(detected_faces[0].bottom(), 409)
def test_cnn_raw_face_locations(self):
img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg'))
detected_faces = api._raw_face_locations(img, model="cnn")
self.assertEqual(len(detected_faces), 1)
self.assertAlmostEqual(detected_faces[0].rect.top(), 144, delta=25)
self.assertAlmostEqual(detected_faces[0].rect.bottom(), 389, delta=25)
def test_raw_face_locations_32bit_image(self):
img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', '32bit.png'))
detected_faces = api._raw_face_locations(img)
self.assertEqual(len(detected_faces), 1)
self.assertEqual(detected_faces[0].top(), 290)
self.assertEqual(detected_faces[0].bottom(), 558)
def test_cnn_raw_face_locations_32bit_image(self):
img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', '32bit.png'))
detected_faces = api._raw_face_locations(img, model="cnn")
self.assertEqual(len(detected_faces), 1)
self.assertAlmostEqual(detected_faces[0].rect.top(), 259, delta=25)
self.assertAlmostEqual(detected_faces[0].rect.bottom(), 552, delta=25)
def test_face_locations(self):
img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg'))
detected_faces = api.face_locations(img)
self.assertEqual(len(detected_faces), 1)
self.assertEqual(detected_faces[0], (142, 617, 409, 349))
def test_cnn_face_locations(self):
img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg'))
detected_faces = api.face_locations(img, model="cnn")
self.assertEqual(len(detected_faces), 1)
self.assertAlmostEqual(detected_faces[0][0], 144, delta=25)
self.assertAlmostEqual(detected_faces[0][1], 608, delta=25)
self.assertAlmostEqual(detected_faces[0][2], 389, delta=25)
self.assertAlmostEqual(detected_faces[0][3], 363, delta=25)
def test_partial_face_locations(self):
img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama_partial_face.jpg'))
detected_faces = api.face_locations(img)
self.assertEqual(len(detected_faces), 1)
self.assertEqual(detected_faces[0], (142, 191, 365, 0))
img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama_partial_face2.jpg'))
detected_faces = api.face_locations(img)
self.assertEqual(len(detected_faces), 1)
self.assertEqual(detected_faces[0], (142, 551, 409, 349))
def test_raw_face_locations_batched(self):
img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg'))
images = [img, img, img]
batched_detected_faces = api._raw_face_locations_batched(images, number_of_times_to_upsample=0)
for detected_faces in batched_detected_faces:
self.assertEqual(len(detected_faces), 1)
self.assertEqual(detected_faces[0].rect.top(), 154)
self.assertEqual(detected_faces[0].rect.bottom(), 390)
def test_batched_face_locations(self):
img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg'))
images = [img, img, img]
batched_detected_faces = api.batch_face_locations(images, number_of_times_to_upsample=0)
for detected_faces in batched_detected_faces:
self.assertEqual(len(detected_faces), 1)
self.assertEqual(detected_faces[0], (154, 611, 390, 375))
def test_raw_face_landmarks(self):
img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg'))
face_landmarks = api._raw_face_landmarks(img)
example_landmark = face_landmarks[0].parts()[10]
self.assertEqual(len(face_landmarks), 1)
self.assertEqual(face_landmarks[0].num_parts, 68)
self.assertEqual((example_landmark.x, example_landmark.y), (552, 399))
def test_face_landmarks(self):
img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg'))
face_landmarks = api.face_landmarks(img)
self.assertEqual(
set(face_landmarks[0].keys()),
set(['chin', 'left_eyebrow', 'right_eyebrow', 'nose_bridge',
'nose_tip', 'left_eye', 'right_eye', 'top_lip',
'bottom_lip']))
self.assertEqual(
face_landmarks[0]['chin'],
[(369, 220), (372, 254), (378, 289), (384, 322), (395, 353),
(414, 382), (437, 407), (464, 424), (495, 428), (527, 420),
(552, 399), (576, 372), (594, 344), (604, 314), (610, 282),
(613, 250), (615, 219)])
def test_face_landmarks_small_model(self):
img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg'))
face_landmarks = api.face_landmarks(img, model="small")
self.assertEqual(
set(face_landmarks[0].keys()),
set(['nose_tip', 'left_eye', 'right_eye']))
self.assertEqual(face_landmarks[0]['nose_tip'], [(496, 295)])
def test_face_encodings(self):
img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg'))
encodings = api.face_encodings(img)
self.assertEqual(len(encodings), 1)
self.assertEqual(len(encodings[0]), 128)
def test_face_encodings_large_model(self):
img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg'))
encodings = api.face_encodings(img, model='large')
self.assertEqual(len(encodings), 1)
self.assertEqual(len(encodings[0]), 128)
def test_face_distance(self):
img_a1 = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg'))
img_a2 = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama2.jpg'))
img_a3 = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama3.jpg'))
img_b1 = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'biden.jpg'))
face_encoding_a1 = api.face_encodings(img_a1)[0]
face_encoding_a2 = api.face_encodings(img_a2)[0]
face_encoding_a3 = api.face_encodings(img_a3)[0]
face_encoding_b1 = api.face_encodings(img_b1)[0]
faces_to_compare = [
face_encoding_a2,
face_encoding_a3,
face_encoding_b1]
distance_results = api.face_distance(faces_to_compare, face_encoding_a1)
# 0.6 is the default face distance match threshold. So we'll spot-check that the numbers returned
# are above or below that based on if they should match (since the exact numbers could vary).
self.assertEqual(type(distance_results), np.ndarray)
self.assertLessEqual(distance_results[0], 0.6)
self.assertLessEqual(distance_results[1], 0.6)
self.assertGreater(distance_results[2], 0.6)
def test_face_distance_empty_lists(self):
img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'biden.jpg'))
face_encoding = api.face_encodings(img)[0]
# empty python list
faces_to_compare = []
distance_results = api.face_distance(faces_to_compare, face_encoding)
self.assertEqual(type(distance_results), np.ndarray)
self.assertEqual(len(distance_results), 0)
# empty numpy list
faces_to_compare = np.array([])
distance_results = api.face_distance(faces_to_compare, face_encoding)
self.assertEqual(type(distance_results), np.ndarray)
self.assertEqual(len(distance_results), 0)
def test_compare_faces(self):
img_a1 = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg'))
img_a2 = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama2.jpg'))
img_a3 = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama3.jpg'))
img_b1 = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'biden.jpg'))
face_encoding_a1 = api.face_encodings(img_a1)[0]
face_encoding_a2 = api.face_encodings(img_a2)[0]
face_encoding_a3 = api.face_encodings(img_a3)[0]
face_encoding_b1 = api.face_encodings(img_b1)[0]
faces_to_compare = [
face_encoding_a2,
face_encoding_a3,
face_encoding_b1]
match_results = api.compare_faces(faces_to_compare, face_encoding_a1)
self.assertEqual(type(match_results), list)
self.assertTrue(match_results[0])
self.assertTrue(match_results[1])
self.assertFalse(match_results[2])
def test_compare_faces_empty_lists(self):
img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'biden.jpg'))
face_encoding = api.face_encodings(img)[0]
# empty python list
faces_to_compare = []
match_results = api.compare_faces(faces_to_compare, face_encoding)
self.assertEqual(type(match_results), list)
self.assertListEqual(match_results, [])
# empty numpy list
faces_to_compare = np.array([])
match_results = api.compare_faces(faces_to_compare, face_encoding)
self.assertEqual(type(match_results), list)
self.assertListEqual(match_results, [])
def test_command_line_interface_options(self):
target_string = 'Show this message and exit.'
runner = CliRunner()
help_result = runner.invoke(face_recognition_cli.main, ['--help'])
self.assertEqual(help_result.exit_code, 0)
self.assertTrue(target_string in help_result.output)
def test_command_line_interface(self):
target_string = 'obama.jpg,obama'
runner = CliRunner()
image_folder = os.path.join(os.path.dirname(__file__), 'test_images')
image_file = os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg')
result = runner.invoke(face_recognition_cli.main, args=[image_folder, image_file])
self.assertEqual(result.exit_code, 0)
self.assertTrue(target_string in result.output)
def test_command_line_interface_big_image(self):
target_string = 'obama3.jpg,obama'
runner = CliRunner()
image_folder = os.path.join(os.path.dirname(__file__), 'test_images')
image_file = os.path.join(os.path.dirname(__file__), 'test_images', 'obama3.jpg')
result = runner.invoke(face_recognition_cli.main, args=[image_folder, image_file])
self.assertEqual(result.exit_code, 0)
self.assertTrue(target_string in result.output)
def test_command_line_interface_tolerance(self):
target_string = 'obama.jpg,obama'
runner = CliRunner()
image_folder = os.path.join(os.path.dirname(__file__), 'test_images')
image_file = os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg')
result = runner.invoke(face_recognition_cli.main, args=[image_folder, image_file, "--tolerance", "0.55"])
self.assertEqual(result.exit_code, 0)
self.assertTrue(target_string in result.output)
def test_command_line_interface_show_distance(self):
target_string = 'obama.jpg,obama,0.0'
runner = CliRunner()
image_folder = os.path.join(os.path.dirname(__file__), 'test_images')
image_file = os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg')
result = runner.invoke(face_recognition_cli.main, args=[image_folder, image_file, "--show-distance", "1"])
self.assertEqual(result.exit_code, 0)
self.assertTrue(target_string in result.output)
def test_fd_command_line_interface_options(self):
target_string = 'Show this message and exit.'
runner = CliRunner()
help_result = runner.invoke(face_detection_cli.main, ['--help'])
self.assertEqual(help_result.exit_code, 0)
self.assertTrue(target_string in help_result.output)
def test_fd_command_line_interface(self):
runner = CliRunner()
image_file = os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg')
result = runner.invoke(face_detection_cli.main, args=[image_file])
self.assertEqual(result.exit_code, 0)
parts = result.output.split(",")
self.assertTrue("obama.jpg" in parts[0])
self.assertEqual(len(parts), 5)
def test_fd_command_line_interface_folder(self):
runner = CliRunner()
image_file = os.path.join(os.path.dirname(__file__), 'test_images')
result = runner.invoke(face_detection_cli.main, args=[image_file])
self.assertEqual(result.exit_code, 0)
self.assertTrue("obama_partial_face2.jpg" in result.output)
self.assertTrue("obama.jpg" in result.output)
self.assertTrue("obama2.jpg" in result.output)
self.assertTrue("obama3.jpg" in result.output)
self.assertTrue("biden.jpg" in result.output)
def test_fd_command_line_interface_hog_model(self):
target_string = 'obama.jpg'
runner = CliRunner()
image_file = os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg')
result = runner.invoke(face_detection_cli.main, args=[image_file, "--model", "hog"])
self.assertEqual(result.exit_code, 0)
self.assertTrue(target_string in result.output)
def test_fd_command_line_interface_cnn_model(self):
target_string = 'obama.jpg'
runner = CliRunner()
image_file = os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg')
result = runner.invoke(face_detection_cli.main, args=[image_file, "--model", "cnn"])
self.assertEqual(result.exit_code, 0)
self.assertTrue(target_string in result.output)
| python | MIT | 2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1 | 2026-01-04T14:39:09.099486Z | false |
ageitgey/face_recognition | https://github.com/ageitgey/face_recognition/blob/2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1/tests/__init__.py | tests/__init__.py | # -*- coding: utf-8 -*-
| python | MIT | 2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1 | 2026-01-04T14:39:09.099486Z | false |
ageitgey/face_recognition | https://github.com/ageitgey/face_recognition/blob/2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1/face_recognition/face_recognition_cli.py | face_recognition/face_recognition_cli.py | # -*- coding: utf-8 -*-
from __future__ import print_function
import click
import os
import re
import face_recognition.api as face_recognition
import multiprocessing
import itertools
import sys
import PIL.Image
import numpy as np
def scan_known_people(known_people_folder):
known_names = []
known_face_encodings = []
for file in image_files_in_folder(known_people_folder):
basename = os.path.splitext(os.path.basename(file))[0]
img = face_recognition.load_image_file(file)
encodings = face_recognition.face_encodings(img)
if len(encodings) > 1:
click.echo("WARNING: More than one face found in {}. Only considering the first face.".format(file))
if len(encodings) == 0:
click.echo("WARNING: No faces found in {}. Ignoring file.".format(file))
else:
known_names.append(basename)
known_face_encodings.append(encodings[0])
return known_names, known_face_encodings
def print_result(filename, name, distance, show_distance=False):
if show_distance:
print("{},{},{}".format(filename, name, distance))
else:
print("{},{}".format(filename, name))
def test_image(image_to_check, known_names, known_face_encodings, tolerance=0.6, show_distance=False):
unknown_image = face_recognition.load_image_file(image_to_check)
# Scale down image if it's giant so things run a little faster
if max(unknown_image.shape) > 1600:
pil_img = PIL.Image.fromarray(unknown_image)
pil_img.thumbnail((1600, 1600), PIL.Image.LANCZOS)
unknown_image = np.array(pil_img)
unknown_encodings = face_recognition.face_encodings(unknown_image)
for unknown_encoding in unknown_encodings:
distances = face_recognition.face_distance(known_face_encodings, unknown_encoding)
result = list(distances <= tolerance)
if True in result:
[print_result(image_to_check, name, distance, show_distance) for is_match, name, distance in zip(result, known_names, distances) if is_match]
else:
print_result(image_to_check, "unknown_person", None, show_distance)
if not unknown_encodings:
# print out fact that no faces were found in image
print_result(image_to_check, "no_persons_found", None, show_distance)
def image_files_in_folder(folder):
return [os.path.join(folder, f) for f in os.listdir(folder) if re.match(r'.*\.(jpg|jpeg|png)', f, flags=re.I)]
def process_images_in_process_pool(images_to_check, known_names, known_face_encodings, number_of_cpus, tolerance, show_distance):
if number_of_cpus == -1:
processes = None
else:
processes = number_of_cpus
# macOS will crash due to a bug in libdispatch if you don't use 'forkserver'
context = multiprocessing
if "forkserver" in multiprocessing.get_all_start_methods():
context = multiprocessing.get_context("forkserver")
pool = context.Pool(processes=processes)
function_parameters = zip(
images_to_check,
itertools.repeat(known_names),
itertools.repeat(known_face_encodings),
itertools.repeat(tolerance),
itertools.repeat(show_distance)
)
pool.starmap(test_image, function_parameters)
@click.command()
@click.argument('known_people_folder')
@click.argument('image_to_check')
@click.option('--cpus', default=1, help='number of CPU cores to use in parallel (can speed up processing lots of images). -1 means "use all in system"')
@click.option('--tolerance', default=0.6, help='Tolerance for face comparisons. Default is 0.6. Lower this if you get multiple matches for the same person.')
@click.option('--show-distance', default=False, type=bool, help='Output face distance. Useful for tweaking tolerance setting.')
def main(known_people_folder, image_to_check, cpus, tolerance, show_distance):
known_names, known_face_encodings = scan_known_people(known_people_folder)
# Multi-core processing only supported on Python 3.4 or greater
if (sys.version_info < (3, 4)) and cpus != 1:
click.echo("WARNING: Multi-processing support requires Python 3.4 or greater. Falling back to single-threaded processing!")
cpus = 1
if os.path.isdir(image_to_check):
if cpus == 1:
[test_image(image_file, known_names, known_face_encodings, tolerance, show_distance) for image_file in image_files_in_folder(image_to_check)]
else:
process_images_in_process_pool(image_files_in_folder(image_to_check), known_names, known_face_encodings, cpus, tolerance, show_distance)
else:
test_image(image_to_check, known_names, known_face_encodings, tolerance, show_distance)
if __name__ == "__main__":
main()
| python | MIT | 2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1 | 2026-01-04T14:39:09.099486Z | false |
ageitgey/face_recognition | https://github.com/ageitgey/face_recognition/blob/2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1/face_recognition/api.py | face_recognition/api.py | # -*- coding: utf-8 -*-
import PIL.Image
import dlib
import numpy as np
from PIL import ImageFile
try:
import face_recognition_models
except Exception:
print("Please install `face_recognition_models` with this command before using `face_recognition`:\n")
print("pip install git+https://github.com/ageitgey/face_recognition_models")
quit()
ImageFile.LOAD_TRUNCATED_IMAGES = True
face_detector = dlib.get_frontal_face_detector()
predictor_68_point_model = face_recognition_models.pose_predictor_model_location()
pose_predictor_68_point = dlib.shape_predictor(predictor_68_point_model)
predictor_5_point_model = face_recognition_models.pose_predictor_five_point_model_location()
pose_predictor_5_point = dlib.shape_predictor(predictor_5_point_model)
cnn_face_detection_model = face_recognition_models.cnn_face_detector_model_location()
cnn_face_detector = dlib.cnn_face_detection_model_v1(cnn_face_detection_model)
face_recognition_model = face_recognition_models.face_recognition_model_location()
face_encoder = dlib.face_recognition_model_v1(face_recognition_model)
def _rect_to_css(rect):
"""
Convert a dlib 'rect' object to a plain tuple in (top, right, bottom, left) order
:param rect: a dlib 'rect' object
:return: a plain tuple representation of the rect in (top, right, bottom, left) order
"""
return rect.top(), rect.right(), rect.bottom(), rect.left()
def _css_to_rect(css):
"""
Convert a tuple in (top, right, bottom, left) order to a dlib `rect` object
:param css: plain tuple representation of the rect in (top, right, bottom, left) order
:return: a dlib `rect` object
"""
return dlib.rectangle(css[3], css[0], css[1], css[2])
def _trim_css_to_bounds(css, image_shape):
"""
Make sure a tuple in (top, right, bottom, left) order is within the bounds of the image.
:param css: plain tuple representation of the rect in (top, right, bottom, left) order
:param image_shape: numpy shape of the image array
:return: a trimmed plain tuple representation of the rect in (top, right, bottom, left) order
"""
return max(css[0], 0), min(css[1], image_shape[1]), min(css[2], image_shape[0]), max(css[3], 0)
def face_distance(face_encodings, face_to_compare):
"""
Given a list of face encodings, compare them to a known face encoding and get a euclidean distance
for each comparison face. The distance tells you how similar the faces are.
:param face_encodings: List of face encodings to compare
:param face_to_compare: A face encoding to compare against
:return: A numpy ndarray with the distance for each face in the same order as the 'faces' array
"""
if len(face_encodings) == 0:
return np.empty((0))
return np.linalg.norm(face_encodings - face_to_compare, axis=1)
def load_image_file(file, mode='RGB'):
"""
Loads an image file (.jpg, .png, etc) into a numpy array
:param file: image file name or file object to load
:param mode: format to convert the image to. Only 'RGB' (8-bit RGB, 3 channels) and 'L' (black and white) are supported.
:return: image contents as numpy array
"""
im = PIL.Image.open(file)
if mode:
im = im.convert(mode)
return np.array(im)
def _raw_face_locations(img, number_of_times_to_upsample=1, model="hog"):
"""
Returns an array of bounding boxes of human faces in a image
:param img: An image (as a numpy array)
:param number_of_times_to_upsample: How many times to upsample the image looking for faces. Higher numbers find smaller faces.
:param model: Which face detection model to use. "hog" is less accurate but faster on CPUs. "cnn" is a more accurate
deep-learning model which is GPU/CUDA accelerated (if available). The default is "hog".
:return: A list of dlib 'rect' objects of found face locations
"""
if model == "cnn":
return cnn_face_detector(img, number_of_times_to_upsample)
else:
return face_detector(img, number_of_times_to_upsample)
def face_locations(img, number_of_times_to_upsample=1, model="hog"):
"""
Returns an array of bounding boxes of human faces in a image
:param img: An image (as a numpy array)
:param number_of_times_to_upsample: How many times to upsample the image looking for faces. Higher numbers find smaller faces.
:param model: Which face detection model to use. "hog" is less accurate but faster on CPUs. "cnn" is a more accurate
deep-learning model which is GPU/CUDA accelerated (if available). The default is "hog".
:return: A list of tuples of found face locations in css (top, right, bottom, left) order
"""
if model == "cnn":
return [_trim_css_to_bounds(_rect_to_css(face.rect), img.shape) for face in _raw_face_locations(img, number_of_times_to_upsample, "cnn")]
else:
return [_trim_css_to_bounds(_rect_to_css(face), img.shape) for face in _raw_face_locations(img, number_of_times_to_upsample, model)]
def _raw_face_locations_batched(images, number_of_times_to_upsample=1, batch_size=128):
"""
Returns an 2d array of dlib rects of human faces in a image using the cnn face detector
:param images: A list of images (each as a numpy array)
:param number_of_times_to_upsample: How many times to upsample the image looking for faces. Higher numbers find smaller faces.
:return: A list of dlib 'rect' objects of found face locations
"""
return cnn_face_detector(images, number_of_times_to_upsample, batch_size=batch_size)
def batch_face_locations(images, number_of_times_to_upsample=1, batch_size=128):
"""
Returns an 2d array of bounding boxes of human faces in a image using the cnn face detector
If you are using a GPU, this can give you much faster results since the GPU
can process batches of images at once. If you aren't using a GPU, you don't need this function.
:param images: A list of images (each as a numpy array)
:param number_of_times_to_upsample: How many times to upsample the image looking for faces. Higher numbers find smaller faces.
:param batch_size: How many images to include in each GPU processing batch.
:return: A list of tuples of found face locations in css (top, right, bottom, left) order
"""
def convert_cnn_detections_to_css(detections):
return [_trim_css_to_bounds(_rect_to_css(face.rect), images[0].shape) for face in detections]
raw_detections_batched = _raw_face_locations_batched(images, number_of_times_to_upsample, batch_size)
return list(map(convert_cnn_detections_to_css, raw_detections_batched))
def _raw_face_landmarks(face_image, face_locations=None, model="large"):
if face_locations is None:
face_locations = _raw_face_locations(face_image)
else:
face_locations = [_css_to_rect(face_location) for face_location in face_locations]
pose_predictor = pose_predictor_68_point
if model == "small":
pose_predictor = pose_predictor_5_point
return [pose_predictor(face_image, face_location) for face_location in face_locations]
def face_landmarks(face_image, face_locations=None, model="large"):
"""
Given an image, returns a dict of face feature locations (eyes, nose, etc) for each face in the image
:param face_image: image to search
:param face_locations: Optionally provide a list of face locations to check.
:param model: Optional - which model to use. "large" (default) or "small" which only returns 5 points but is faster.
:return: A list of dicts of face feature locations (eyes, nose, etc)
"""
landmarks = _raw_face_landmarks(face_image, face_locations, model)
landmarks_as_tuples = [[(p.x, p.y) for p in landmark.parts()] for landmark in landmarks]
# For a definition of each point index, see https://cdn-images-1.medium.com/max/1600/1*AbEg31EgkbXSQehuNJBlWg.png
if model == 'large':
return [{
"chin": points[0:17],
"left_eyebrow": points[17:22],
"right_eyebrow": points[22:27],
"nose_bridge": points[27:31],
"nose_tip": points[31:36],
"left_eye": points[36:42],
"right_eye": points[42:48],
"top_lip": points[48:55] + [points[64]] + [points[63]] + [points[62]] + [points[61]] + [points[60]],
"bottom_lip": points[54:60] + [points[48]] + [points[60]] + [points[67]] + [points[66]] + [points[65]] + [points[64]]
} for points in landmarks_as_tuples]
elif model == 'small':
return [{
"nose_tip": [points[4]],
"left_eye": points[2:4],
"right_eye": points[0:2],
} for points in landmarks_as_tuples]
else:
raise ValueError("Invalid landmarks model type. Supported models are ['small', 'large'].")
def face_encodings(face_image, known_face_locations=None, num_jitters=1, model="small"):
"""
Given an image, return the 128-dimension face encoding for each face in the image.
:param face_image: The image that contains one or more faces
:param known_face_locations: Optional - the bounding boxes of each face if you already know them.
:param num_jitters: How many times to re-sample the face when calculating encoding. Higher is more accurate, but slower (i.e. 100 is 100x slower)
:param model: Optional - which model to use. "large" or "small" (default) which only returns 5 points but is faster.
:return: A list of 128-dimensional face encodings (one for each face in the image)
"""
raw_landmarks = _raw_face_landmarks(face_image, known_face_locations, model)
return [np.array(face_encoder.compute_face_descriptor(face_image, raw_landmark_set, num_jitters)) for raw_landmark_set in raw_landmarks]
def compare_faces(known_face_encodings, face_encoding_to_check, tolerance=0.6):
"""
Compare a list of face encodings against a candidate encoding to see if they match.
:param known_face_encodings: A list of known face encodings
:param face_encoding_to_check: A single face encoding to compare against the list
:param tolerance: How much distance between faces to consider it a match. Lower is more strict. 0.6 is typical best performance.
:return: A list of True/False values indicating which known_face_encodings match the face encoding to check
"""
return list(face_distance(known_face_encodings, face_encoding_to_check) <= tolerance)
| python | MIT | 2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1 | 2026-01-04T14:39:09.099486Z | false |
ageitgey/face_recognition | https://github.com/ageitgey/face_recognition/blob/2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1/face_recognition/face_detection_cli.py | face_recognition/face_detection_cli.py | # -*- coding: utf-8 -*-
from __future__ import print_function
import click
import os
import re
import face_recognition.api as face_recognition
import multiprocessing
import sys
import itertools
def print_result(filename, location):
top, right, bottom, left = location
print("{},{},{},{},{}".format(filename, top, right, bottom, left))
def test_image(image_to_check, model, upsample):
unknown_image = face_recognition.load_image_file(image_to_check)
face_locations = face_recognition.face_locations(unknown_image, number_of_times_to_upsample=upsample, model=model)
for face_location in face_locations:
print_result(image_to_check, face_location)
def image_files_in_folder(folder):
return [os.path.join(folder, f) for f in os.listdir(folder) if re.match(r'.*\.(jpg|jpeg|png)', f, flags=re.I)]
def process_images_in_process_pool(images_to_check, number_of_cpus, model, upsample):
if number_of_cpus == -1:
processes = None
else:
processes = number_of_cpus
# macOS will crash due to a bug in libdispatch if you don't use 'forkserver'
context = multiprocessing
if "forkserver" in multiprocessing.get_all_start_methods():
context = multiprocessing.get_context("forkserver")
pool = context.Pool(processes=processes)
function_parameters = zip(
images_to_check,
itertools.repeat(model),
itertools.repeat(upsample),
)
pool.starmap(test_image, function_parameters)
@click.command()
@click.argument('image_to_check')
@click.option('--cpus', default=1, help='number of CPU cores to use in parallel. -1 means "use all in system"')
@click.option('--model', default="hog", help='Which face detection model to use. Options are "hog" or "cnn".')
@click.option('--upsample', default=0, help='How many times to upsample the image looking for faces. Higher numbers find smaller faces.')
def main(image_to_check, cpus, model, upsample):
# Multi-core processing only supported on Python 3.4 or greater
if (sys.version_info < (3, 4)) and cpus != 1:
click.echo("WARNING: Multi-processing support requires Python 3.4 or greater. Falling back to single-threaded processing!")
cpus = 1
if os.path.isdir(image_to_check):
if cpus == 1:
[test_image(image_file, model, upsample) for image_file in image_files_in_folder(image_to_check)]
else:
process_images_in_process_pool(image_files_in_folder(image_to_check), cpus, model, upsample)
else:
test_image(image_to_check, model, upsample)
if __name__ == "__main__":
main()
| python | MIT | 2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1 | 2026-01-04T14:39:09.099486Z | false |
ageitgey/face_recognition | https://github.com/ageitgey/face_recognition/blob/2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1/face_recognition/__init__.py | face_recognition/__init__.py | # -*- coding: utf-8 -*-
__author__ = """Adam Geitgey"""
__email__ = 'ageitgey@gmail.com'
__version__ = '1.4.0'
from .api import load_image_file, face_locations, batch_face_locations, face_landmarks, face_encodings, compare_faces, face_distance
| python | MIT | 2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1 | 2026-01-04T14:39:09.099486Z | false |
ageitgey/face_recognition | https://github.com/ageitgey/face_recognition/blob/2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1/docs/conf.py | docs/conf.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# face_recognition documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
from unittest.mock import MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return MagicMock()
MOCK_MODULES = ['face_recognition_models', 'Click', 'dlib', 'numpy', 'PIL']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import face_recognition
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Face Recognition'
copyright = u"2017, Adam Geitgey"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = face_recognition.__version__
# The full version, including alpha/beta/rc tags.
release = face_recognition.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'face_recognitiondoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'face_recognition.tex',
u'Face Recognition Documentation',
u'Adam Geitgey', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'face_recognition',
u'Face Recognition Documentation',
[u'Adam Geitgey'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'face_recognition',
u'Face Recognition Documentation',
u'Adam Geitgey',
'face_recognition',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| python | MIT | 2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1 | 2026-01-04T14:39:09.099486Z | false |
ageitgey/face_recognition | https://github.com/ageitgey/face_recognition/blob/2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1/examples/find_faces_in_picture_cnn.py | examples/find_faces_in_picture_cnn.py | from PIL import Image
import face_recognition
# Load the jpg file into a numpy array
image = face_recognition.load_image_file("biden.jpg")
# Find all the faces in the image using a pre-trained convolutional neural network.
# This method is more accurate than the default HOG model, but it's slower
# unless you have an nvidia GPU and dlib compiled with CUDA extensions. But if you do,
# this will use GPU acceleration and perform well.
# See also: find_faces_in_picture.py
face_locations = face_recognition.face_locations(image, number_of_times_to_upsample=0, model="cnn")
print("I found {} face(s) in this photograph.".format(len(face_locations)))
for face_location in face_locations:
# Print the location of each face in this image
top, right, bottom, left = face_location
print("A face is located at pixel location Top: {}, Left: {}, Bottom: {}, Right: {}".format(top, left, bottom, right))
# You can access the actual face itself like this:
face_image = image[top:bottom, left:right]
pil_image = Image.fromarray(face_image)
pil_image.show()
| python | MIT | 2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1 | 2026-01-04T14:39:09.099486Z | false |
ageitgey/face_recognition | https://github.com/ageitgey/face_recognition/blob/2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1/examples/blur_faces_on_webcam.py | examples/blur_faces_on_webcam.py | import face_recognition
import cv2
# This is a demo of blurring faces in video.
# PLEASE NOTE: This example requires OpenCV (the `cv2` library) to be installed only to read from your webcam.
# OpenCV is *not* required to use the face_recognition library. It's only required if you want to run this
# specific demo. If you have trouble installing it, try any of the other demos that don't require it instead.
# Get a reference to webcam #0 (the default one)
video_capture = cv2.VideoCapture(0)
# Initialize some variables
face_locations = []
while True:
# Grab a single frame of video
ret, frame = video_capture.read()
# Resize frame of video to 1/4 size for faster face detection processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(small_frame, model="cnn")
# Display the results
for top, right, bottom, left in face_locations:
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Extract the region of the image that contains the face
face_image = frame[top:bottom, left:right]
# Blur the face image
face_image = cv2.GaussianBlur(face_image, (99, 99), 30)
# Put the blurred face region back into the frame image
frame[top:bottom, left:right] = face_image
# Display the resulting image
cv2.imshow('Video', frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
| python | MIT | 2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1 | 2026-01-04T14:39:09.099486Z | false |
ageitgey/face_recognition | https://github.com/ageitgey/face_recognition/blob/2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1/examples/web_service_example_Simplified_Chinese.py | examples/web_service_example_Simplified_Chinese.py | # 这是一个非常简单的使用Web服务上传图片运行人脸识别的案例,后端服务器会识别这张图片是不是奥巴马,并把识别结果以json键值对输出
# 比如:运行以下代码
# $ curl -XPOST -F "file=@obama2.jpg" http://127.0.0.1:5001
# 会返回:
# {
# "face_found_in_image": true,
# "is_picture_of_obama": true
# }
#
# 本项目基于Flask框架的案例 http://flask.pocoo.org/docs/0.12/patterns/fileuploads/
# 提示:运行本案例需要安装Flask,你可以用下面的代码安装Flask
# $ pip3 install flask
import face_recognition
from flask import Flask, jsonify, request, redirect
# You can change this to any folder on your system
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif'}
app = Flask(__name__)
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/', methods=['GET', 'POST'])
def upload_image():
# 检测图片是否上传成功
if request.method == 'POST':
if 'file' not in request.files:
return redirect(request.url)
file = request.files['file']
if file.filename == '':
return redirect(request.url)
if file and allowed_file(file.filename):
# 图片上传成功,检测图片中的人脸
return detect_faces_in_image(file)
# 图片上传失败,输出以下html代码
return '''
<!doctype html>
<title>Is this a picture of Obama?</title>
<h1>Upload a picture and see if it's a picture of Obama!</h1>
<form method="POST" enctype="multipart/form-data">
<input type="file" name="file">
<input type="submit" value="Upload">
</form>
'''
def detect_faces_in_image(file_stream):
# 用face_recognition.face_encodings(img)接口提前把奥巴马人脸的编码录入
known_face_encoding = [-0.09634063, 0.12095481, -0.00436332, -0.07643753, 0.0080383,
0.01902981, -0.07184699, -0.09383309, 0.18518871, -0.09588896,
0.23951106, 0.0986533 , -0.22114635, -0.1363683 , 0.04405268,
0.11574756, -0.19899382, -0.09597053, -0.11969153, -0.12277931,
0.03416885, -0.00267565, 0.09203379, 0.04713435, -0.12731361,
-0.35371891, -0.0503444 , -0.17841317, -0.00310897, -0.09844551,
-0.06910533, -0.00503746, -0.18466514, -0.09851682, 0.02903969,
-0.02174894, 0.02261871, 0.0032102 , 0.20312519, 0.02999607,
-0.11646006, 0.09432904, 0.02774341, 0.22102901, 0.26725179,
0.06896867, -0.00490024, -0.09441824, 0.11115381, -0.22592428,
0.06230862, 0.16559327, 0.06232892, 0.03458837, 0.09459756,
-0.18777156, 0.00654241, 0.08582542, -0.13578284, 0.0150229 ,
0.00670836, -0.08195844, -0.04346499, 0.03347827, 0.20310158,
0.09987706, -0.12370517, -0.06683611, 0.12704916, -0.02160804,
0.00984683, 0.00766284, -0.18980607, -0.19641446, -0.22800779,
0.09010898, 0.39178532, 0.18818057, -0.20875394, 0.03097027,
-0.21300618, 0.02532415, 0.07938635, 0.01000703, -0.07719778,
-0.12651891, -0.04318593, 0.06219772, 0.09163868, 0.05039065,
-0.04922386, 0.21839413, -0.02394437, 0.06173781, 0.0292527 ,
0.06160797, -0.15553983, -0.02440624, -0.17509389, -0.0630486 ,
0.01428208, -0.03637431, 0.03971229, 0.13983178, -0.23006812,
0.04999552, 0.0108454 , -0.03970895, 0.02501768, 0.08157793,
-0.03224047, -0.04502571, 0.0556995 , -0.24374914, 0.25514284,
0.24795187, 0.04060191, 0.17597422, 0.07966681, 0.01920104,
-0.01194376, -0.02300822, -0.17204897, -0.0596558 , 0.05307484,
0.07417042, 0.07126575, 0.00209804]
# 载入用户上传的图片
img = face_recognition.load_image_file(file_stream)
# 为用户上传的图片中的人脸编码
unknown_face_encodings = face_recognition.face_encodings(img)
face_found = False
is_obama = False
if len(unknown_face_encodings) > 0:
face_found = True
# 看看图片中的第一张脸是不是奥巴马
match_results = face_recognition.compare_faces([known_face_encoding], unknown_face_encodings[0])
if match_results[0]:
is_obama = True
# 讲识别结果以json键值对的数据结构输出
result = {
"face_found_in_image": face_found,
"is_picture_of_obama": is_obama
}
return jsonify(result)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5001, debug=True)
| python | MIT | 2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1 | 2026-01-04T14:39:09.099486Z | false |
ageitgey/face_recognition | https://github.com/ageitgey/face_recognition/blob/2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1/examples/web_service_example.py | examples/web_service_example.py | # This is a _very simple_ example of a web service that recognizes faces in uploaded images.
# Upload an image file and it will check if the image contains a picture of Barack Obama.
# The result is returned as json. For example:
#
# $ curl -XPOST -F "file=@obama2.jpg" http://127.0.0.1:5001
#
# Returns:
#
# {
# "face_found_in_image": true,
# "is_picture_of_obama": true
# }
#
# This example is based on the Flask file upload example: http://flask.pocoo.org/docs/0.12/patterns/fileuploads/
# NOTE: This example requires flask to be installed! You can install it with pip:
# $ pip3 install flask
import face_recognition
from flask import Flask, jsonify, request, redirect
# You can change this to any folder on your system
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif'}
app = Flask(__name__)
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/', methods=['GET', 'POST'])
def upload_image():
# Check if a valid image file was uploaded
if request.method == 'POST':
if 'file' not in request.files:
return redirect(request.url)
file = request.files['file']
if file.filename == '':
return redirect(request.url)
if file and allowed_file(file.filename):
# The image file seems valid! Detect faces and return the result.
return detect_faces_in_image(file)
# If no valid image file was uploaded, show the file upload form:
return '''
<!doctype html>
<title>Is this a picture of Obama?</title>
<h1>Upload a picture and see if it's a picture of Obama!</h1>
<form method="POST" enctype="multipart/form-data">
<input type="file" name="file">
<input type="submit" value="Upload">
</form>
'''
def detect_faces_in_image(file_stream):
# Pre-calculated face encoding of Obama generated with face_recognition.face_encodings(img)
known_face_encoding = [-0.09634063, 0.12095481, -0.00436332, -0.07643753, 0.0080383,
0.01902981, -0.07184699, -0.09383309, 0.18518871, -0.09588896,
0.23951106, 0.0986533 , -0.22114635, -0.1363683 , 0.04405268,
0.11574756, -0.19899382, -0.09597053, -0.11969153, -0.12277931,
0.03416885, -0.00267565, 0.09203379, 0.04713435, -0.12731361,
-0.35371891, -0.0503444 , -0.17841317, -0.00310897, -0.09844551,
-0.06910533, -0.00503746, -0.18466514, -0.09851682, 0.02903969,
-0.02174894, 0.02261871, 0.0032102 , 0.20312519, 0.02999607,
-0.11646006, 0.09432904, 0.02774341, 0.22102901, 0.26725179,
0.06896867, -0.00490024, -0.09441824, 0.11115381, -0.22592428,
0.06230862, 0.16559327, 0.06232892, 0.03458837, 0.09459756,
-0.18777156, 0.00654241, 0.08582542, -0.13578284, 0.0150229 ,
0.00670836, -0.08195844, -0.04346499, 0.03347827, 0.20310158,
0.09987706, -0.12370517, -0.06683611, 0.12704916, -0.02160804,
0.00984683, 0.00766284, -0.18980607, -0.19641446, -0.22800779,
0.09010898, 0.39178532, 0.18818057, -0.20875394, 0.03097027,
-0.21300618, 0.02532415, 0.07938635, 0.01000703, -0.07719778,
-0.12651891, -0.04318593, 0.06219772, 0.09163868, 0.05039065,
-0.04922386, 0.21839413, -0.02394437, 0.06173781, 0.0292527 ,
0.06160797, -0.15553983, -0.02440624, -0.17509389, -0.0630486 ,
0.01428208, -0.03637431, 0.03971229, 0.13983178, -0.23006812,
0.04999552, 0.0108454 , -0.03970895, 0.02501768, 0.08157793,
-0.03224047, -0.04502571, 0.0556995 , -0.24374914, 0.25514284,
0.24795187, 0.04060191, 0.17597422, 0.07966681, 0.01920104,
-0.01194376, -0.02300822, -0.17204897, -0.0596558 , 0.05307484,
0.07417042, 0.07126575, 0.00209804]
# Load the uploaded image file
img = face_recognition.load_image_file(file_stream)
# Get face encodings for any faces in the uploaded image
unknown_face_encodings = face_recognition.face_encodings(img)
face_found = False
is_obama = False
if len(unknown_face_encodings) > 0:
face_found = True
# See if the first face in the uploaded image matches the known face of Obama
match_results = face_recognition.compare_faces([known_face_encoding], unknown_face_encodings[0])
if match_results[0]:
is_obama = True
# Return the result as json
result = {
"face_found_in_image": face_found,
"is_picture_of_obama": is_obama
}
return jsonify(result)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5001, debug=True)
| python | MIT | 2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1 | 2026-01-04T14:39:09.099486Z | false |
ageitgey/face_recognition | https://github.com/ageitgey/face_recognition/blob/2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1/examples/find_faces_in_batches.py | examples/find_faces_in_batches.py | import face_recognition
import cv2
# This code finds all faces in a list of images using the CNN model.
#
# This demo is for the _special case_ when you need to find faces in LOTS of images very quickly and all the images
# are the exact same size. This is common in video processing applications where you have lots of video frames
# to process.
#
# If you are processing a lot of images and using a GPU with CUDA, batch processing can be ~3x faster then processing
# single images at a time. But if you aren't using a GPU, then batch processing isn't going to be very helpful.
#
# PLEASE NOTE: This example requires OpenCV (the `cv2` library) to be installed only to read the video file.
# OpenCV is *not* required to use the face_recognition library. It's only required if you want to run this
# specific demo. If you have trouble installing it, try any of the other demos that don't require it instead.
# Open video file
video_capture = cv2.VideoCapture("short_hamilton_clip.mp4")
frames = []
frame_count = 0
while video_capture.isOpened():
# Grab a single frame of video
ret, frame = video_capture.read()
# Bail out when the video file ends
if not ret:
break
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
frame = frame[:, :, ::-1]
# Save each frame of the video to a list
frame_count += 1
frames.append(frame)
# Every 128 frames (the default batch size), batch process the list of frames to find faces
if len(frames) == 128:
batch_of_face_locations = face_recognition.batch_face_locations(frames, number_of_times_to_upsample=0)
# Now let's list all the faces we found in all 128 frames
for frame_number_in_batch, face_locations in enumerate(batch_of_face_locations):
number_of_faces_in_frame = len(face_locations)
frame_number = frame_count - 128 + frame_number_in_batch
print("I found {} face(s) in frame #{}.".format(number_of_faces_in_frame, frame_number))
for face_location in face_locations:
# Print the location of each face in this frame
top, right, bottom, left = face_location
print(" - A face is located at pixel location Top: {}, Left: {}, Bottom: {}, Right: {}".format(top, left, bottom, right))
# Clear the frames array to start the next batch
frames = []
| python | MIT | 2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1 | 2026-01-04T14:39:09.099486Z | false |
ageitgey/face_recognition | https://github.com/ageitgey/face_recognition/blob/2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1/examples/recognize_faces_in_pictures.py | examples/recognize_faces_in_pictures.py | import face_recognition
# Load the jpg files into numpy arrays
biden_image = face_recognition.load_image_file("biden.jpg")
obama_image = face_recognition.load_image_file("obama.jpg")
unknown_image = face_recognition.load_image_file("obama2.jpg")
# Get the face encodings for each face in each image file
# Since there could be more than one face in each image, it returns a list of encodings.
# But since I know each image only has one face, I only care about the first encoding in each image, so I grab index 0.
try:
biden_face_encoding = face_recognition.face_encodings(biden_image)[0]
obama_face_encoding = face_recognition.face_encodings(obama_image)[0]
unknown_face_encoding = face_recognition.face_encodings(unknown_image)[0]
except IndexError:
print("I wasn't able to locate any faces in at least one of the images. Check the image files. Aborting...")
quit()
known_faces = [
biden_face_encoding,
obama_face_encoding
]
# results is an array of True/False telling if the unknown face matched anyone in the known_faces array
results = face_recognition.compare_faces(known_faces, unknown_face_encoding)
print("Is the unknown face a picture of Biden? {}".format(results[0]))
print("Is the unknown face a picture of Obama? {}".format(results[1]))
print("Is the unknown face a new person that we've never seen before? {}".format(not True in results))
| python | MIT | 2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1 | 2026-01-04T14:39:09.099486Z | false |
ageitgey/face_recognition | https://github.com/ageitgey/face_recognition/blob/2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1/examples/find_faces_in_picture.py | examples/find_faces_in_picture.py | from PIL import Image
import face_recognition
# Load the jpg file into a numpy array
image = face_recognition.load_image_file("biden.jpg")
# Find all the faces in the image using the default HOG-based model.
# This method is fairly accurate, but not as accurate as the CNN model and not GPU accelerated.
# See also: find_faces_in_picture_cnn.py
face_locations = face_recognition.face_locations(image)
print("I found {} face(s) in this photograph.".format(len(face_locations)))
for face_location in face_locations:
# Print the location of each face in this image
top, right, bottom, left = face_location
print("A face is located at pixel location Top: {}, Left: {}, Bottom: {}, Right: {}".format(top, left, bottom, right))
# You can access the actual face itself like this:
face_image = image[top:bottom, left:right]
pil_image = Image.fromarray(face_image)
pil_image.show()
| python | MIT | 2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1 | 2026-01-04T14:39:09.099486Z | false |
ageitgey/face_recognition | https://github.com/ageitgey/face_recognition/blob/2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1/examples/facerec_on_raspberry_pi.py | examples/facerec_on_raspberry_pi.py | # This is a demo of running face recognition on a Raspberry Pi.
# This program will print out the names of anyone it recognizes to the console.
# To run this, you need a Raspberry Pi 2 (or greater) with face_recognition and
# the picamera[array] module installed.
# You can follow this installation instructions to get your RPi set up:
# https://gist.github.com/ageitgey/1ac8dbe8572f3f533df6269dab35df65
import face_recognition
import picamera
import numpy as np
# Get a reference to the Raspberry Pi camera.
# If this fails, make sure you have a camera connected to the RPi and that you
# enabled your camera in raspi-config and rebooted first.
camera = picamera.PiCamera()
camera.resolution = (320, 240)
output = np.empty((240, 320, 3), dtype=np.uint8)
# Load a sample picture and learn how to recognize it.
print("Loading known face image(s)")
obama_image = face_recognition.load_image_file("obama_small.jpg")
obama_face_encoding = face_recognition.face_encodings(obama_image)[0]
# Initialize some variables
face_locations = []
face_encodings = []
while True:
print("Capturing image.")
# Grab a single frame of video from the RPi camera as a numpy array
camera.capture(output, format="rgb")
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(output)
print("Found {} faces in image.".format(len(face_locations)))
face_encodings = face_recognition.face_encodings(output, face_locations)
# Loop over each face found in the frame to see if it's someone we know.
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
match = face_recognition.compare_faces([obama_face_encoding], face_encoding)
name = "<Unknown Person>"
if match[0]:
name = "Barack Obama"
print("I see someone named {}!".format(name))
| python | MIT | 2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1 | 2026-01-04T14:39:09.099486Z | false |
ageitgey/face_recognition | https://github.com/ageitgey/face_recognition/blob/2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1/examples/face_distance.py | examples/face_distance.py | import face_recognition
# Often instead of just checking if two faces match or not (True or False), it's helpful to see how similar they are.
# You can do that by using the face_distance function.
# The model was trained in a way that faces with a distance of 0.6 or less should be a match. But if you want to
# be more strict, you can look for a smaller face distance. For example, using a 0.55 cutoff would reduce false
# positive matches at the risk of more false negatives.
# Note: This isn't exactly the same as a "percent match". The scale isn't linear. But you can assume that images with a
# smaller distance are more similar to each other than ones with a larger distance.
# Load some images to compare against
known_obama_image = face_recognition.load_image_file("obama.jpg")
known_biden_image = face_recognition.load_image_file("biden.jpg")
# Get the face encodings for the known images
obama_face_encoding = face_recognition.face_encodings(known_obama_image)[0]
biden_face_encoding = face_recognition.face_encodings(known_biden_image)[0]
known_encodings = [
obama_face_encoding,
biden_face_encoding
]
# Load a test image and get encondings for it
image_to_test = face_recognition.load_image_file("obama2.jpg")
image_to_test_encoding = face_recognition.face_encodings(image_to_test)[0]
# See how far apart the test image is from the known faces
face_distances = face_recognition.face_distance(known_encodings, image_to_test_encoding)
for i, face_distance in enumerate(face_distances):
print("The test image has a distance of {:.2} from known image #{}".format(face_distance, i))
print("- With a normal cutoff of 0.6, would the test image match the known image? {}".format(face_distance < 0.6))
print("- With a very strict cutoff of 0.5, would the test image match the known image? {}".format(face_distance < 0.5))
print()
| python | MIT | 2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1 | 2026-01-04T14:39:09.099486Z | false |
ageitgey/face_recognition | https://github.com/ageitgey/face_recognition/blob/2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1/examples/facerec_from_webcam_multiprocessing.py | examples/facerec_from_webcam_multiprocessing.py | import face_recognition
import cv2
from multiprocessing import Process, Manager, cpu_count, set_start_method
import time
import numpy
import threading
import platform
# This is a little bit complicated (but fast) example of running face recognition on live video from your webcam.
# This example is using multiprocess.
# PLEASE NOTE: This example requires OpenCV (the `cv2` library) to be installed only to read from your webcam.
# OpenCV is *not* required to use the face_recognition library. It's only required if you want to run this
# specific demo. If you have trouble installing it, try any of the other demos that don't require it instead.
# Get next worker's id
def next_id(current_id, worker_num):
if current_id == worker_num:
return 1
else:
return current_id + 1
# Get previous worker's id
def prev_id(current_id, worker_num):
if current_id == 1:
return worker_num
else:
return current_id - 1
# A subprocess use to capture frames.
def capture(read_frame_list, Global, worker_num):
# Get a reference to webcam #0 (the default one)
video_capture = cv2.VideoCapture(0)
# video_capture.set(3, 640) # Width of the frames in the video stream.
# video_capture.set(4, 480) # Height of the frames in the video stream.
# video_capture.set(5, 30) # Frame rate.
print("Width: %d, Height: %d, FPS: %d" % (video_capture.get(3), video_capture.get(4), video_capture.get(5)))
while not Global.is_exit:
# If it's time to read a frame
if Global.buff_num != next_id(Global.read_num, worker_num):
# Grab a single frame of video
ret, frame = video_capture.read()
read_frame_list[Global.buff_num] = frame
Global.buff_num = next_id(Global.buff_num, worker_num)
else:
time.sleep(0.01)
# Release webcam
video_capture.release()
# Many subprocess use to process frames.
def process(worker_id, read_frame_list, write_frame_list, Global, worker_num):
known_face_encodings = Global.known_face_encodings
known_face_names = Global.known_face_names
while not Global.is_exit:
# Wait to read
while Global.read_num != worker_id or Global.read_num != prev_id(Global.buff_num, worker_num):
# If the user has requested to end the app, then stop waiting for webcam frames
if Global.is_exit:
break
time.sleep(0.01)
# Delay to make the video look smoother
time.sleep(Global.frame_delay)
# Read a single frame from frame list
frame_process = read_frame_list[worker_id]
# Expect next worker to read frame
Global.read_num = next_id(Global.read_num, worker_num)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_frame = frame_process[:, :, ::-1]
# Find all the faces and face encodings in the frame of video, cost most time
face_locations = face_recognition.face_locations(rgb_frame)
face_encodings = face_recognition.face_encodings(rgb_frame, face_locations)
# Loop through each face in this frame of video
for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unknown"
# If a match was found in known_face_encodings, just use the first one.
if True in matches:
first_match_index = matches.index(True)
name = known_face_names[first_match_index]
# Draw a box around the face
cv2.rectangle(frame_process, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame_process, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame_process, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
# Wait to write
while Global.write_num != worker_id:
time.sleep(0.01)
# Send frame to global
write_frame_list[worker_id] = frame_process
# Expect next worker to write frame
Global.write_num = next_id(Global.write_num, worker_num)
if __name__ == '__main__':
# Fix Bug on MacOS
if platform.system() == 'Darwin':
set_start_method('forkserver')
# Global variables
Global = Manager().Namespace()
Global.buff_num = 1
Global.read_num = 1
Global.write_num = 1
Global.frame_delay = 0
Global.is_exit = False
read_frame_list = Manager().dict()
write_frame_list = Manager().dict()
# Number of workers (subprocess use to process frames)
if cpu_count() > 2:
worker_num = cpu_count() - 1 # 1 for capturing frames
else:
worker_num = 2
# Subprocess list
p = []
# Create a thread to capture frames (if uses subprocess, it will crash on Mac)
p.append(threading.Thread(target=capture, args=(read_frame_list, Global, worker_num,)))
p[0].start()
# Load a sample picture and learn how to recognize it.
obama_image = face_recognition.load_image_file("obama.jpg")
obama_face_encoding = face_recognition.face_encodings(obama_image)[0]
# Load a second sample picture and learn how to recognize it.
biden_image = face_recognition.load_image_file("biden.jpg")
biden_face_encoding = face_recognition.face_encodings(biden_image)[0]
# Create arrays of known face encodings and their names
Global.known_face_encodings = [
obama_face_encoding,
biden_face_encoding
]
Global.known_face_names = [
"Barack Obama",
"Joe Biden"
]
# Create workers
for worker_id in range(1, worker_num + 1):
p.append(Process(target=process, args=(worker_id, read_frame_list, write_frame_list, Global, worker_num,)))
p[worker_id].start()
# Start to show video
last_num = 1
fps_list = []
tmp_time = time.time()
while not Global.is_exit:
while Global.write_num != last_num:
last_num = int(Global.write_num)
# Calculate fps
delay = time.time() - tmp_time
tmp_time = time.time()
fps_list.append(delay)
if len(fps_list) > 5 * worker_num:
fps_list.pop(0)
fps = len(fps_list) / numpy.sum(fps_list)
print("fps: %.2f" % fps)
# Calculate frame delay, in order to make the video look smoother.
# When fps is higher, should use a smaller ratio, or fps will be limited in a lower value.
# Larger ratio can make the video look smoother, but fps will hard to become higher.
# Smaller ratio can make fps higher, but the video looks not too smoother.
# The ratios below are tested many times.
if fps < 6:
Global.frame_delay = (1 / fps) * 0.75
elif fps < 20:
Global.frame_delay = (1 / fps) * 0.5
elif fps < 30:
Global.frame_delay = (1 / fps) * 0.25
else:
Global.frame_delay = 0
# Display the resulting image
cv2.imshow('Video', write_frame_list[prev_id(Global.write_num, worker_num)])
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
Global.is_exit = True
break
time.sleep(0.01)
# Quit
cv2.destroyAllWindows()
| python | MIT | 2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1 | 2026-01-04T14:39:09.099486Z | false |
ageitgey/face_recognition | https://github.com/ageitgey/face_recognition/blob/2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1/examples/face_recognition_knn.py | examples/face_recognition_knn.py | """
This is an example of using the k-nearest-neighbors (KNN) algorithm for face recognition.
When should I use this example?
This example is useful when you wish to recognize a large set of known people,
and make a prediction for an unknown person in a feasible computation time.
Algorithm Description:
The knn classifier is first trained on a set of labeled (known) faces and can then predict the person
in an unknown image by finding the k most similar faces (images with closet face-features under euclidean distance)
in its training set, and performing a majority vote (possibly weighted) on their label.
For example, if k=3, and the three closest face images to the given image in the training set are one image of Biden
and two images of Obama, The result would be 'Obama'.
* This implementation uses a weighted vote, such that the votes of closer-neighbors are weighted more heavily.
Usage:
1. Prepare a set of images of the known people you want to recognize. Organize the images in a single directory
with a sub-directory for each known person.
2. Then, call the 'train' function with the appropriate parameters. Make sure to pass in the 'model_save_path' if you
want to save the model to disk so you can re-use the model without having to re-train it.
3. Call 'predict' and pass in your trained model to recognize the people in an unknown image.
NOTE: This example requires scikit-learn to be installed! You can install it with pip:
$ pip3 install scikit-learn
"""
import math
from sklearn import neighbors
import os
import os.path
import pickle
from PIL import Image, ImageDraw
import face_recognition
from face_recognition.face_recognition_cli import image_files_in_folder
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg'}
def train(train_dir, model_save_path=None, n_neighbors=None, knn_algo='ball_tree', verbose=False):
"""
Trains a k-nearest neighbors classifier for face recognition.
:param train_dir: directory that contains a sub-directory for each known person, with its name.
(View in source code to see train_dir example tree structure)
Structure:
<train_dir>/
├── <person1>/
│ ├── <somename1>.jpeg
│ ├── <somename2>.jpeg
│ ├── ...
├── <person2>/
│ ├── <somename1>.jpeg
│ └── <somename2>.jpeg
└── ...
:param model_save_path: (optional) path to save model on disk
:param n_neighbors: (optional) number of neighbors to weigh in classification. Chosen automatically if not specified
:param knn_algo: (optional) underlying data structure to support knn.default is ball_tree
:param verbose: verbosity of training
:return: returns knn classifier that was trained on the given data.
"""
X = []
y = []
# Loop through each person in the training set
for class_dir in os.listdir(train_dir):
if not os.path.isdir(os.path.join(train_dir, class_dir)):
continue
# Loop through each training image for the current person
for img_path in image_files_in_folder(os.path.join(train_dir, class_dir)):
image = face_recognition.load_image_file(img_path)
face_bounding_boxes = face_recognition.face_locations(image)
if len(face_bounding_boxes) != 1:
# If there are no people (or too many people) in a training image, skip the image.
if verbose:
print("Image {} not suitable for training: {}".format(img_path, "Didn't find a face" if len(face_bounding_boxes) < 1 else "Found more than one face"))
else:
# Add face encoding for current image to the training set
X.append(face_recognition.face_encodings(image, known_face_locations=face_bounding_boxes)[0])
y.append(class_dir)
# Determine how many neighbors to use for weighting in the KNN classifier
if n_neighbors is None:
n_neighbors = int(round(math.sqrt(len(X))))
if verbose:
print("Chose n_neighbors automatically:", n_neighbors)
# Create and train the KNN classifier
knn_clf = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors, algorithm=knn_algo, weights='distance')
knn_clf.fit(X, y)
# Save the trained KNN classifier
if model_save_path is not None:
with open(model_save_path, 'wb') as f:
pickle.dump(knn_clf, f)
return knn_clf
def predict(X_img_path, knn_clf=None, model_path=None, distance_threshold=0.6):
"""
Recognizes faces in given image using a trained KNN classifier
:param X_img_path: path to image to be recognized
:param knn_clf: (optional) a knn classifier object. if not specified, model_save_path must be specified.
:param model_path: (optional) path to a pickled knn classifier. if not specified, model_save_path must be knn_clf.
:param distance_threshold: (optional) distance threshold for face classification. the larger it is, the more chance
of mis-classifying an unknown person as a known one.
:return: a list of names and face locations for the recognized faces in the image: [(name, bounding box), ...].
For faces of unrecognized persons, the name 'unknown' will be returned.
"""
if not os.path.isfile(X_img_path) or os.path.splitext(X_img_path)[1][1:] not in ALLOWED_EXTENSIONS:
raise Exception("Invalid image path: {}".format(X_img_path))
if knn_clf is None and model_path is None:
raise Exception("Must supply knn classifier either thourgh knn_clf or model_path")
# Load a trained KNN model (if one was passed in)
if knn_clf is None:
with open(model_path, 'rb') as f:
knn_clf = pickle.load(f)
# Load image file and find face locations
X_img = face_recognition.load_image_file(X_img_path)
X_face_locations = face_recognition.face_locations(X_img)
# If no faces are found in the image, return an empty result.
if len(X_face_locations) == 0:
return []
# Find encodings for faces in the test iamge
faces_encodings = face_recognition.face_encodings(X_img, known_face_locations=X_face_locations)
# Use the KNN model to find the best matches for the test face
closest_distances = knn_clf.kneighbors(faces_encodings, n_neighbors=1)
are_matches = [closest_distances[0][i][0] <= distance_threshold for i in range(len(X_face_locations))]
# Predict classes and remove classifications that aren't within the threshold
return [(pred, loc) if rec else ("unknown", loc) for pred, loc, rec in zip(knn_clf.predict(faces_encodings), X_face_locations, are_matches)]
def show_prediction_labels_on_image(img_path, predictions):
"""
Shows the face recognition results visually.
:param img_path: path to image to be recognized
:param predictions: results of the predict function
:return:
"""
pil_image = Image.open(img_path).convert("RGB")
draw = ImageDraw.Draw(pil_image)
for name, (top, right, bottom, left) in predictions:
# Draw a box around the face using the Pillow module
draw.rectangle(((left, top), (right, bottom)), outline=(0, 0, 255))
# There's a bug in Pillow where it blows up with non-UTF-8 text
# when using the default bitmap font
name = name.encode("UTF-8")
# Draw a label with a name below the face
text_width, text_height = draw.textsize(name)
draw.rectangle(((left, bottom - text_height - 10), (right, bottom)), fill=(0, 0, 255), outline=(0, 0, 255))
draw.text((left + 6, bottom - text_height - 5), name, fill=(255, 255, 255, 255))
# Remove the drawing library from memory as per the Pillow docs
del draw
# Display the resulting image
pil_image.show()
if __name__ == "__main__":
# STEP 1: Train the KNN classifier and save it to disk
# Once the model is trained and saved, you can skip this step next time.
print("Training KNN classifier...")
classifier = train("knn_examples/train", model_save_path="trained_knn_model.clf", n_neighbors=2)
print("Training complete!")
# STEP 2: Using the trained classifier, make predictions for unknown images
for image_file in os.listdir("knn_examples/test"):
full_file_path = os.path.join("knn_examples/test", image_file)
print("Looking for faces in {}".format(image_file))
# Find all people in the image using a trained classifier model
# Note: You can pass in either a classifier file name or a classifier model instance
predictions = predict(full_file_path, model_path="trained_knn_model.clf")
# Print results on the console
for name, (top, right, bottom, left) in predictions:
print("- Found {} at ({}, {})".format(name, left, top))
# Display results overlaid on an image
show_prediction_labels_on_image(os.path.join("knn_examples/test", image_file), predictions)
| python | MIT | 2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1 | 2026-01-04T14:39:09.099486Z | false |
ageitgey/face_recognition | https://github.com/ageitgey/face_recognition/blob/2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1/examples/facerec_from_webcam.py | examples/facerec_from_webcam.py | import face_recognition
import cv2
import numpy as np
# This is a super simple (but slow) example of running face recognition on live video from your webcam.
# There's a second example that's a little more complicated but runs faster.
# PLEASE NOTE: This example requires OpenCV (the `cv2` library) to be installed only to read from your webcam.
# OpenCV is *not* required to use the face_recognition library. It's only required if you want to run this
# specific demo. If you have trouble installing it, try any of the other demos that don't require it instead.
# Get a reference to webcam #0 (the default one)
video_capture = cv2.VideoCapture(0)
# Load a sample picture and learn how to recognize it.
obama_image = face_recognition.load_image_file("obama.jpg")
obama_face_encoding = face_recognition.face_encodings(obama_image)[0]
# Load a second sample picture and learn how to recognize it.
biden_image = face_recognition.load_image_file("biden.jpg")
biden_face_encoding = face_recognition.face_encodings(biden_image)[0]
# Create arrays of known face encodings and their names
known_face_encodings = [
obama_face_encoding,
biden_face_encoding
]
known_face_names = [
"Barack Obama",
"Joe Biden"
]
while True:
# Grab a single frame of video
ret, frame = video_capture.read()
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_frame = frame[:, :, ::-1]
# Find all the faces and face enqcodings in the frame of video
face_locations = face_recognition.face_locations(rgb_frame)
face_encodings = face_recognition.face_encodings(rgb_frame, face_locations)
# Loop through each face in this frame of video
for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unknown"
# If a match was found in known_face_encodings, just use the first one.
# if True in matches:
# first_match_index = matches.index(True)
# name = known_face_names[first_match_index]
# Or instead, use the known face with the smallest distance to the new face
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
# Display the resulting image
cv2.imshow('Video', frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
| python | MIT | 2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1 | 2026-01-04T14:39:09.099486Z | false |
ageitgey/face_recognition | https://github.com/ageitgey/face_recognition/blob/2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1/examples/digital_makeup.py | examples/digital_makeup.py | from PIL import Image, ImageDraw
import face_recognition
# Load the jpg file into a numpy array
image = face_recognition.load_image_file("biden.jpg")
# Find all facial features in all the faces in the image
face_landmarks_list = face_recognition.face_landmarks(image)
pil_image = Image.fromarray(image)
for face_landmarks in face_landmarks_list:
d = ImageDraw.Draw(pil_image, 'RGBA')
# Make the eyebrows into a nightmare
d.polygon(face_landmarks['left_eyebrow'], fill=(68, 54, 39, 128))
d.polygon(face_landmarks['right_eyebrow'], fill=(68, 54, 39, 128))
d.line(face_landmarks['left_eyebrow'], fill=(68, 54, 39, 150), width=5)
d.line(face_landmarks['right_eyebrow'], fill=(68, 54, 39, 150), width=5)
# Gloss the lips
d.polygon(face_landmarks['top_lip'], fill=(150, 0, 0, 128))
d.polygon(face_landmarks['bottom_lip'], fill=(150, 0, 0, 128))
d.line(face_landmarks['top_lip'], fill=(150, 0, 0, 64), width=8)
d.line(face_landmarks['bottom_lip'], fill=(150, 0, 0, 64), width=8)
# Sparkle the eyes
d.polygon(face_landmarks['left_eye'], fill=(255, 255, 255, 30))
d.polygon(face_landmarks['right_eye'], fill=(255, 255, 255, 30))
# Apply some eyeliner
d.line(face_landmarks['left_eye'] + [face_landmarks['left_eye'][0]], fill=(0, 0, 0, 110), width=6)
d.line(face_landmarks['right_eye'] + [face_landmarks['right_eye'][0]], fill=(0, 0, 0, 110), width=6)
pil_image.show()
| python | MIT | 2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1 | 2026-01-04T14:39:09.099486Z | false |
ageitgey/face_recognition | https://github.com/ageitgey/face_recognition/blob/2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1/examples/benchmark.py | examples/benchmark.py | import timeit
# Note: This example is only tested with Python 3 (not Python 2)
# This is a very simple benchmark to give you an idea of how fast each step of face recognition will run on your system.
# Notice that face detection gets very slow at large image sizes. So you might consider running face detection on a
# scaled down version of your image and then running face encodings on the the full size image.
TEST_IMAGES = [
"obama-240p.jpg",
"obama-480p.jpg",
"obama-720p.jpg",
"obama-1080p.jpg"
]
def run_test(setup, test, iterations_per_test=5, tests_to_run=10):
fastest_execution = min(timeit.Timer(test, setup=setup).repeat(tests_to_run, iterations_per_test))
execution_time = fastest_execution / iterations_per_test
fps = 1.0 / execution_time
return execution_time, fps
setup_locate_faces = """
import face_recognition
image = face_recognition.load_image_file("{}")
"""
test_locate_faces = """
face_locations = face_recognition.face_locations(image)
"""
setup_face_landmarks = """
import face_recognition
image = face_recognition.load_image_file("{}")
face_locations = face_recognition.face_locations(image)
"""
test_face_landmarks = """
landmarks = face_recognition.face_landmarks(image, face_locations=face_locations)[0]
"""
setup_encode_face = """
import face_recognition
image = face_recognition.load_image_file("{}")
face_locations = face_recognition.face_locations(image)
"""
test_encode_face = """
encoding = face_recognition.face_encodings(image, known_face_locations=face_locations)[0]
"""
setup_end_to_end = """
import face_recognition
image = face_recognition.load_image_file("{}")
"""
test_end_to_end = """
encoding = face_recognition.face_encodings(image)[0]
"""
print("Benchmarks (Note: All benchmarks are only using a single CPU core)")
print()
for image in TEST_IMAGES:
size = image.split("-")[1].split(".")[0]
print("Timings at {}:".format(size))
print(" - Face locations: {:.4f}s ({:.2f} fps)".format(*run_test(setup_locate_faces.format(image), test_locate_faces)))
print(" - Face landmarks: {:.4f}s ({:.2f} fps)".format(*run_test(setup_face_landmarks.format(image), test_face_landmarks)))
print(" - Encode face (inc. landmarks): {:.4f}s ({:.2f} fps)".format(*run_test(setup_encode_face.format(image), test_encode_face)))
print(" - End-to-end: {:.4f}s ({:.2f} fps)".format(*run_test(setup_end_to_end.format(image), test_end_to_end)))
print()
| python | MIT | 2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1 | 2026-01-04T14:39:09.099486Z | false |
ageitgey/face_recognition | https://github.com/ageitgey/face_recognition/blob/2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1/examples/find_facial_features_in_picture.py | examples/find_facial_features_in_picture.py | from PIL import Image, ImageDraw
import face_recognition
# Load the jpg file into a numpy array
image = face_recognition.load_image_file("two_people.jpg")
# Find all facial features in all the faces in the image
face_landmarks_list = face_recognition.face_landmarks(image)
print("I found {} face(s) in this photograph.".format(len(face_landmarks_list)))
# Create a PIL imagedraw object so we can draw on the picture
pil_image = Image.fromarray(image)
d = ImageDraw.Draw(pil_image)
for face_landmarks in face_landmarks_list:
# Print the location of each facial feature in this image
for facial_feature in face_landmarks.keys():
print("The {} in this face has the following points: {}".format(facial_feature, face_landmarks[facial_feature]))
# Let's trace out each facial feature in the image with a line!
for facial_feature in face_landmarks.keys():
d.line(face_landmarks[facial_feature], width=5)
# Show the picture
pil_image.show()
| python | MIT | 2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1 | 2026-01-04T14:39:09.099486Z | false |
ageitgey/face_recognition | https://github.com/ageitgey/face_recognition/blob/2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1/examples/facerec_on_raspberry_pi_Simplified_Chinese.py | examples/facerec_on_raspberry_pi_Simplified_Chinese.py | # 这是一个在树莓派上运行人脸识别的案例
# 本案例会在命令行控制面板上输出识别出的人脸数量和身份结果。
# 你需要一个2代以上的树莓派,并在树莓派上安装face_recognition,并连接上picamera摄像头
# 并确保picamera这个模块已经安装(树莓派一般会内置安装)
# 你可以参考这个教程配制你的树莓派:
# https://gist.github.com/ageitgey/1ac8dbe8572f3f533df6269dab35df65
import face_recognition
import picamera
import numpy as np
# 你需要在sudo raspi-config中把camera功能打开
camera = picamera.PiCamera()
camera.resolution = (320, 240)
output = np.empty((240, 320, 3), dtype=np.uint8)
# 载入样本图片(奥巴马和拜登)
print("Loading known face image(s)")
obama_image = face_recognition.load_image_file("obama_small.jpg")
obama_face_encoding = face_recognition.face_encodings(obama_image)[0]
# 初始化变量
face_locations = []
face_encodings = []
while True:
print("Capturing image.")
# 以numpy array的数据结构从picamera摄像头中获取一帧图片
camera.capture(output, format="rgb")
# 获得所有人脸的位置以及它们的编码
face_locations = face_recognition.face_locations(output)
print("Found {} faces in image.".format(len(face_locations)))
face_encodings = face_recognition.face_encodings(output, face_locations)
# 将每一个人脸与已知样本图片比对
for face_encoding in face_encodings:
# 看是否属于奥巴马或者拜登
match = face_recognition.compare_faces([obama_face_encoding], face_encoding)
name = "<Unknown Person>"
if match[0]:
name = "Barack Obama"
print("I see someone named {}!".format(name))
| python | MIT | 2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1 | 2026-01-04T14:39:09.099486Z | false |
ageitgey/face_recognition | https://github.com/ageitgey/face_recognition/blob/2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1/examples/facerec_from_webcam_faster.py | examples/facerec_from_webcam_faster.py | import face_recognition
import cv2
import numpy as np
# This is a demo of running face recognition on live video from your webcam. It's a little more complicated than the
# other example, but it includes some basic performance tweaks to make things run a lot faster:
# 1. Process each video frame at 1/4 resolution (though still display it at full resolution)
# 2. Only detect faces in every other frame of video.
# PLEASE NOTE: This example requires OpenCV (the `cv2` library) to be installed only to read from your webcam.
# OpenCV is *not* required to use the face_recognition library. It's only required if you want to run this
# specific demo. If you have trouble installing it, try any of the other demos that don't require it instead.
# Get a reference to webcam #0 (the default one)
video_capture = cv2.VideoCapture(0)
# Load a sample picture and learn how to recognize it.
obama_image = face_recognition.load_image_file("obama.jpg")
obama_face_encoding = face_recognition.face_encodings(obama_image)[0]
# Load a second sample picture and learn how to recognize it.
biden_image = face_recognition.load_image_file("biden.jpg")
biden_face_encoding = face_recognition.face_encodings(biden_image)[0]
# Create arrays of known face encodings and their names
known_face_encodings = [
obama_face_encoding,
biden_face_encoding
]
known_face_names = [
"Barack Obama",
"Joe Biden"
]
# Initialize some variables
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
while True:
# Grab a single frame of video
ret, frame = video_capture.read()
# Only process every other frame of video to save time
if process_this_frame:
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_small_frame = small_frame[:, :, ::-1]
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unknown"
# # If a match was found in known_face_encodings, just use the first one.
# if True in matches:
# first_match_index = matches.index(True)
# name = known_face_names[first_match_index]
# Or instead, use the known face with the smallest distance to the new face
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
face_names.append(name)
process_this_frame = not process_this_frame
# Display the results
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
# Display the resulting image
cv2.imshow('Video', frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
| python | MIT | 2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1 | 2026-01-04T14:39:09.099486Z | false |
ageitgey/face_recognition | https://github.com/ageitgey/face_recognition/blob/2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1/examples/blink_detection.py | examples/blink_detection.py | #!/usr/bin/env python3
# This is a demo of detecting eye status from the users camera. If the users eyes are closed for EYES_CLOSED seconds, the system will start printing out "EYES CLOSED"
# to the terminal until the user presses and holds the spacebar to acknowledge
# this demo must be run with sudo privileges for the keyboard module to work
# PLEASE NOTE: This example requires OpenCV (the `cv2` library) to be installed only to read from your webcam.
# OpenCV is *not* required to use the face_recognition library. It's only required if you want to run this
# specific demo. If you have trouble installing it, try any of the other demos that don't require it instead.
# imports
import face_recognition
import cv2
import time
from scipy.spatial import distance as dist
EYES_CLOSED_SECONDS = 5
def main():
closed_count = 0
video_capture = cv2.VideoCapture(0)
ret, frame = video_capture.read(0)
# cv2.VideoCapture.release()
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
rgb_small_frame = small_frame[:, :, ::-1]
face_landmarks_list = face_recognition.face_landmarks(rgb_small_frame)
process = True
while True:
ret, frame = video_capture.read(0)
# get it into the correct format
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
rgb_small_frame = small_frame[:, :, ::-1]
# get the correct face landmarks
if process:
face_landmarks_list = face_recognition.face_landmarks(rgb_small_frame)
# get eyes
for face_landmark in face_landmarks_list:
left_eye = face_landmark['left_eye']
right_eye = face_landmark['right_eye']
color = (255,0,0)
thickness = 2
cv2.rectangle(small_frame, left_eye[0], right_eye[-1], color, thickness)
cv2.imshow('Video', small_frame)
ear_left = get_ear(left_eye)
ear_right = get_ear(right_eye)
closed = ear_left < 0.2 and ear_right < 0.2
if (closed):
closed_count += 1
else:
closed_count = 0
if (closed_count >= EYES_CLOSED_SECONDS):
asleep = True
while (asleep): #continue this loop until they wake up and acknowledge music
print("EYES CLOSED")
if cv2.waitKey(1) == 32: #Wait for space key
asleep = False
print("EYES OPENED")
closed_count = 0
process = not process
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
def get_ear(eye):
# compute the euclidean distances between the two sets of
# vertical eye landmarks (x, y)-coordinates
A = dist.euclidean(eye[1], eye[5])
B = dist.euclidean(eye[2], eye[4])
# compute the euclidean distance between the horizontal
# eye landmark (x, y)-coordinates
C = dist.euclidean(eye[0], eye[3])
# compute the eye aspect ratio
ear = (A + B) / (2.0 * C)
# return the eye aspect ratio
return ear
if __name__ == "__main__":
main()
| python | MIT | 2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1 | 2026-01-04T14:39:09.099486Z | false |
ageitgey/face_recognition | https://github.com/ageitgey/face_recognition/blob/2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1/examples/face_recognition_svm.py | examples/face_recognition_svm.py | # Train multiple images per person
# Find and recognize faces in an image using a SVC with scikit-learn
"""
Structure:
<test_image>.jpg
<train_dir>/
<person_1>/
<person_1_face-1>.jpg
<person_1_face-2>.jpg
.
.
<person_1_face-n>.jpg
<person_2>/
<person_2_face-1>.jpg
<person_2_face-2>.jpg
.
.
<person_2_face-n>.jpg
.
.
<person_n>/
<person_n_face-1>.jpg
<person_n_face-2>.jpg
.
.
<person_n_face-n>.jpg
"""
import face_recognition
from sklearn import svm
import os
# Training the SVC classifier
# The training data would be all the face encodings from all the known images and the labels are their names
encodings = []
names = []
# Training directory
train_dir = os.listdir('/train_dir/')
# Loop through each person in the training directory
for person in train_dir:
pix = os.listdir("/train_dir/" + person)
# Loop through each training image for the current person
for person_img in pix:
# Get the face encodings for the face in each image file
face = face_recognition.load_image_file("/train_dir/" + person + "/" + person_img)
face_bounding_boxes = face_recognition.face_locations(face)
#If training image contains exactly one face
if len(face_bounding_boxes) == 1:
face_enc = face_recognition.face_encodings(face)[0]
# Add face encoding for current image with corresponding label (name) to the training data
encodings.append(face_enc)
names.append(person)
else:
print(person + "/" + person_img + " was skipped and can't be used for training")
# Create and train the SVC classifier
clf = svm.SVC(gamma='scale')
clf.fit(encodings,names)
# Load the test image with unknown faces into a numpy array
test_image = face_recognition.load_image_file('test_image.jpg')
# Find all the faces in the test image using the default HOG-based model
face_locations = face_recognition.face_locations(test_image)
no = len(face_locations)
print("Number of faces detected: ", no)
# Predict all the faces in the test image using the trained classifier
print("Found:")
for i in range(no):
test_image_enc = face_recognition.face_encodings(test_image)[i]
name = clf.predict([test_image_enc])
print(*name)
| python | MIT | 2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1 | 2026-01-04T14:39:09.099486Z | false |
ageitgey/face_recognition | https://github.com/ageitgey/face_recognition/blob/2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1/examples/identify_and_draw_boxes_on_faces.py | examples/identify_and_draw_boxes_on_faces.py | import face_recognition
from PIL import Image, ImageDraw
import numpy as np
# This is an example of running face recognition on a single image
# and drawing a box around each person that was identified.
# Load a sample picture and learn how to recognize it.
obama_image = face_recognition.load_image_file("obama.jpg")
obama_face_encoding = face_recognition.face_encodings(obama_image)[0]
# Load a second sample picture and learn how to recognize it.
biden_image = face_recognition.load_image_file("biden.jpg")
biden_face_encoding = face_recognition.face_encodings(biden_image)[0]
# Create arrays of known face encodings and their names
known_face_encodings = [
obama_face_encoding,
biden_face_encoding
]
known_face_names = [
"Barack Obama",
"Joe Biden"
]
# Load an image with an unknown face
unknown_image = face_recognition.load_image_file("two_people.jpg")
# Find all the faces and face encodings in the unknown image
face_locations = face_recognition.face_locations(unknown_image)
face_encodings = face_recognition.face_encodings(unknown_image, face_locations)
# Convert the image to a PIL-format image so that we can draw on top of it with the Pillow library
# See http://pillow.readthedocs.io/ for more about PIL/Pillow
pil_image = Image.fromarray(unknown_image)
# Create a Pillow ImageDraw Draw instance to draw with
draw = ImageDraw.Draw(pil_image)
# Loop through each face found in the unknown image
for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unknown"
# If a match was found in known_face_encodings, just use the first one.
# if True in matches:
# first_match_index = matches.index(True)
# name = known_face_names[first_match_index]
# Or instead, use the known face with the smallest distance to the new face
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
# Draw a box around the face using the Pillow module
draw.rectangle(((left, top), (right, bottom)), outline=(0, 0, 255))
# Draw a label with a name below the face
text_width, text_height = draw.textsize(name)
draw.rectangle(((left, bottom - text_height - 10), (right, bottom)), fill=(0, 0, 255), outline=(0, 0, 255))
draw.text((left + 6, bottom - text_height - 5), name, fill=(255, 255, 255, 255))
# Remove the drawing library from memory as per the Pillow docs
del draw
# Display the resulting image
pil_image.show()
# You can also save a copy of the new image to disk if you want by uncommenting this line
# pil_image.save("image_with_boxes.jpg")
| python | MIT | 2e2dccea9dd0ce730c8d464d0f67c6eebb40c9d1 | 2026-01-04T14:39:09.099486Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.