repo
stringlengths
7
90
file_url
stringlengths
81
315
file_path
stringlengths
4
228
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 14:38:15
2026-01-05 02:33:18
truncated
bool
2 classes
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/config.py
crawl4ai/config.py
import os from dotenv import load_dotenv load_dotenv() # Load environment variables from .env file # Default provider, ONLY used when the extraction strategy is LLMExtractionStrategy DEFAULT_PROVIDER = "openai/gpt-4o" DEFAULT_PROVIDER_API_KEY = "OPENAI_API_KEY" MODEL_REPO_BRANCH = "new-release-0.0.2" # Provider-model dictionary, ONLY used when the extraction strategy is LLMExtractionStrategy PROVIDER_MODELS = { "ollama/llama3": "no-token-needed", # Any model from Ollama no need for API token "groq/llama3-70b-8192": os.getenv("GROQ_API_KEY"), "groq/llama3-8b-8192": os.getenv("GROQ_API_KEY"), "openai/gpt-4o-mini": os.getenv("OPENAI_API_KEY"), "openai/gpt-4o": os.getenv("OPENAI_API_KEY"), "openai/o1-mini": os.getenv("OPENAI_API_KEY"), "openai/o1-preview": os.getenv("OPENAI_API_KEY"), "openai/o3-mini": os.getenv("OPENAI_API_KEY"), "openai/o3-mini-high": os.getenv("OPENAI_API_KEY"), "anthropic/claude-3-haiku-20240307": os.getenv("ANTHROPIC_API_KEY"), "anthropic/claude-3-opus-20240229": os.getenv("ANTHROPIC_API_KEY"), "anthropic/claude-3-sonnet-20240229": os.getenv("ANTHROPIC_API_KEY"), "anthropic/claude-3-5-sonnet-20240620": os.getenv("ANTHROPIC_API_KEY"), "gemini/gemini-pro": os.getenv("GEMINI_API_KEY"), 'gemini/gemini-1.5-pro': os.getenv("GEMINI_API_KEY"), 'gemini/gemini-2.0-flash': os.getenv("GEMINI_API_KEY"), 'gemini/gemini-2.0-flash-exp': os.getenv("GEMINI_API_KEY"), 'gemini/gemini-2.0-flash-lite-preview-02-05': os.getenv("GEMINI_API_KEY"), "deepseek/deepseek-chat": os.getenv("DEEPSEEK_API_KEY"), } PROVIDER_MODELS_PREFIXES = { "ollama": "no-token-needed", # Any model from Ollama no need for API token "groq": os.getenv("GROQ_API_KEY"), "openai": os.getenv("OPENAI_API_KEY"), "anthropic": os.getenv("ANTHROPIC_API_KEY"), "gemini": os.getenv("GEMINI_API_KEY"), "deepseek": os.getenv("DEEPSEEK_API_KEY"), } # Chunk token threshold CHUNK_TOKEN_THRESHOLD = 2**11 # 2048 tokens OVERLAP_RATE = 0.1 WORD_TOKEN_RATE = 1.3 # Threshold for the minimum number of word in a HTML tag to be considered MIN_WORD_THRESHOLD = 1 IMAGE_DESCRIPTION_MIN_WORD_THRESHOLD = 1 IMPORTANT_ATTRS = ["src", "href", "alt", "title", "width", "height"] ONLY_TEXT_ELIGIBLE_TAGS = [ "b", "i", "u", "span", "del", "ins", "sub", "sup", "strong", "em", "code", "kbd", "var", "s", "q", "abbr", "cite", "dfn", "time", "small", "mark", ] SOCIAL_MEDIA_DOMAINS = [ "facebook.com", "twitter.com", "x.com", "linkedin.com", "instagram.com", "pinterest.com", "tiktok.com", "snapchat.com", "reddit.com", ] # Threshold for the Image extraction - Range is 1 to 6 # Images are scored based on point based system, to filter based on usefulness. Points are assigned # to each image based on the following aspects. # If either height or width exceeds 150px # If image size is greater than 10Kb # If alt property is set # If image format is in jpg, png or webp # If image is in the first half of the total images extracted from the page IMAGE_SCORE_THRESHOLD = 2 MAX_METRICS_HISTORY = 1000 NEED_MIGRATION = True URL_LOG_SHORTEN_LENGTH = 30 SHOW_DEPRECATION_WARNINGS = True SCREENSHOT_HEIGHT_TRESHOLD = 10000 PAGE_TIMEOUT = 60000 DOWNLOAD_PAGE_TIMEOUT = 60000 # Global user settings with descriptions and default values USER_SETTINGS = { "DEFAULT_LLM_PROVIDER": { "default": "openai/gpt-4o", "description": "Default LLM provider in 'company/model' format (e.g., 'openai/gpt-4o', 'anthropic/claude-3-sonnet')", "type": "string" }, "DEFAULT_LLM_PROVIDER_TOKEN": { "default": "", "description": "API token for the default LLM provider", "type": "string", "secret": True }, "VERBOSE": { "default": False, "description": "Enable verbose output for all commands", "type": "boolean" }, "BROWSER_HEADLESS": { "default": True, "description": "Run browser in headless mode by default", "type": "boolean" }, "BROWSER_TYPE": { "default": "chromium", "description": "Default browser type (chromium or firefox)", "type": "string", "options": ["chromium", "firefox"] }, "CACHE_MODE": { "default": "bypass", "description": "Default cache mode (bypass, use, or refresh)", "type": "string", "options": ["bypass", "use", "refresh"] }, "USER_AGENT_MODE": { "default": "default", "description": "Default user agent mode (default, random, or mobile)", "type": "string", "options": ["default", "random", "mobile"] } }
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/async_url_seeder.py
crawl4ai/async_url_seeder.py
""" async_url_seeder.py Fast async URL discovery for Crawl4AI Features -------- * Common-Crawl streaming via httpx.AsyncClient (HTTP/2, keep-alive) * robots.txt β†’ sitemap chain (.gz + nested indexes) via async httpx * Per-domain CDX result cache on disk (~/.crawl4ai/<index>_<domain>_<hash>.jsonl) * Optional HEAD-only liveness check * Optional partial <head> download + meta parsing * Global hits-per-second rate-limit via asyncio.Semaphore * Concurrency in the thousands β€” fine on a single event-loop """ from __future__ import annotations import aiofiles import asyncio import gzip import hashlib import io import json import os import pathlib import re import time from datetime import timedelta from pathlib import Path from typing import Any, Dict, Iterable, List, Optional, Sequence, Union from urllib.parse import quote, urljoin import httpx import fnmatch try: from lxml import html as lxml_html from lxml import etree LXML = True except ImportError: LXML = False try: import brotli HAS_BROTLI = True except ImportError: HAS_BROTLI = False try: import rank_bm25 HAS_BM25 = True except ImportError: HAS_BM25 = False # Import AsyncLoggerBase from crawl4ai's logger module # Assuming crawl4ai/async_logger.py defines AsyncLoggerBase # You might need to adjust this import based on your exact file structure # Import AsyncLogger for default if needed from .async_logger import AsyncLoggerBase, AsyncLogger # Import SeedingConfig for type hints from typing import TYPE_CHECKING if TYPE_CHECKING: from .async_configs import SeedingConfig # ────────────────────────────────────────────────────────────────────────── consts COLLINFO_URL = "https://index.commoncrawl.org/collinfo.json" # CACHE_DIR = pathlib.Path("~/.crawl4ai").expanduser() # REMOVED: now managed by __init__ # CACHE_DIR.mkdir(exist_ok=True) # REMOVED: now managed by __init__ # INDEX_CACHE = CACHE_DIR / "latest_cc_index.txt" # REMOVED: now managed by __init__ TTL = timedelta(days=7) # Keeping this constant as it's a seeder-specific TTL _meta_rx = re.compile( r'<meta\s+(?:[^>]*?(?:name|property|http-equiv)\s*=\s*["\']?([^"\' >]+)[^>]*?content\s*=\s*["\']?([^"\' >]+)[^>]*?)\/?>', re.I) _charset_rx = re.compile(r'<meta\s+[^>]*charset=["\']?([^"\' >]+)', re.I) _title_rx = re.compile(r'<title>(.*?)</title>', re.I | re.S) _link_rx = re.compile( r'<link\s+[^>]*rel=["\']?([^"\' >]+)[^>]*href=["\']?([^"\' >]+)', re.I) # ────────────────────────────────────────────────────────────────────────── helpers def _match(url: str, pattern: str) -> bool: if fnmatch.fnmatch(url, pattern): return True canon = url.split("://", 1)[-1] return (fnmatch.fnmatch(canon, pattern) or (canon.startswith("www.") and fnmatch.fnmatch(canon[4:], pattern))) def _parse_head(src: str) -> Dict[str, Any]: if LXML: try: if isinstance(src, str): # strip Unicode, let lxml decode src = src.encode("utf-8", "replace") doc = lxml_html.fromstring(src) except (ValueError, etree.ParserError): return {} # malformed, bail gracefully info: Dict[str, Any] = { "title": (doc.find(".//title").text or "").strip() if doc.find(".//title") is not None else None, "charset": None, "meta": {}, "link": {}, "jsonld": [] } for el in doc.xpath(".//meta"): k = el.attrib.get("name") or el.attrib.get( "property") or el.attrib.get("http-equiv") if k: info["meta"][k.lower()] = el.attrib.get("content", "") elif "charset" in el.attrib: info["charset"] = el.attrib["charset"].lower() for el in doc.xpath(".//link"): rel_attr = el.attrib.get("rel", "") if not rel_attr: continue # Handle multiple space-separated rel values rel_values = rel_attr.lower().split() entry = {a: el.attrib[a] for a in ( "href", "as", "type", "hreflang") if a in el.attrib} # Add entry for each rel value for rel in rel_values: info["link"].setdefault(rel, []).append(entry) # Extract JSON-LD structured data for script in doc.xpath('.//script[@type="application/ld+json"]'): if script.text: try: jsonld_data = json.loads(script.text.strip()) info["jsonld"].append(jsonld_data) except json.JSONDecodeError: pass # Extract html lang attribute html_elem = doc.find(".//html") if html_elem is not None: info["lang"] = html_elem.attrib.get("lang", "") return info # regex fallback info: Dict[str, Any] = {"title": None, "charset": None, "meta": {}, "link": {}, "jsonld": [], "lang": ""} m = _title_rx.search(src) info["title"] = m.group(1).strip() if m else None for k, v in _meta_rx.findall(src): info["meta"][k.lower()] = v m = _charset_rx.search(src) info["charset"] = m.group(1).lower() if m else None for rel, href in _link_rx.findall(src): info["link"].setdefault(rel.lower(), []).append({"href": href}) # Try to extract JSON-LD with regex jsonld_pattern = re.compile( r'<script[^>]*type=["\']application/ld\+json["\'][^>]*>(.*?)</script>', re.I | re.S) for match in jsonld_pattern.findall(src): try: jsonld_data = json.loads(match.strip()) info["jsonld"].append(jsonld_data) except json.JSONDecodeError: pass # Try to extract lang attribute lang_match = re.search(r'<html[^>]*lang=["\']?([^"\' >]+)', src, re.I) if lang_match: info["lang"] = lang_match.group(1) return info # ────────────────────────────────────────────────────────────────────────── class class AsyncUrlSeeder: """ Async version of UrlSeeder. Call pattern is await/async for / async with. Public coroutines ----------------- await seed.urls(...) returns List[Dict[str,Any]] (url, status, head_data) await seed.many_urls(...) returns Dict[str, List[Dict[str,Any]]] await seed.close() closes the HTTP client if owned by seeder Usage examples -------------- # Manual cleanup: seeder = AsyncUrlSeeder() try: urls = await seeder.urls("example.com", config) finally: await seeder.close() # Using async context manager (recommended): async with AsyncUrlSeeder() as seeder: urls = await seeder.urls("example.com", config) # Reusing existing client: client = httpx.AsyncClient() seeder = AsyncUrlSeeder(client=client) urls = await seeder.urls("example.com", config) # No need to close seeder, as it doesn't own the client """ def __init__( self, ttl: timedelta = TTL, client: Optional[httpx.AsyncClient] = None, logger: Optional[AsyncLoggerBase] = None, # NEW: Add logger parameter # NEW: Add base_directory base_directory: Optional[Union[str, pathlib.Path]] = None, cache_root: Optional[Union[str, Path]] = None, ): self.ttl = ttl self._owns_client = client is None # Track if we created the client self.client = client or httpx.AsyncClient(http2=True, timeout=20, headers={ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) +AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36" }) self.logger = logger # Store the logger instance self.base_directory = pathlib.Path(base_directory or os.getenv( "CRAWL4_AI_BASE_DIRECTORY", Path.home())) # Resolve base_directory self.cache_dir = self.base_directory / ".crawl4ai" / \ "seeder_cache" # NEW: Specific cache dir for seeder self.cache_dir.mkdir(parents=True, exist_ok=True) # Ensure it exists self.index_cache_path = self.cache_dir / \ "latest_cc_index.txt" # NEW: Index cache path # defer – grabbing the index inside an active loop blows up self.index_id: Optional[str] = None self._rate_sem: Optional[asyncio.Semaphore] = None # ───────── cache dirs ───────── self.cache_root = Path(os.path.expanduser( cache_root or "~/.cache/url_seeder")) (self.cache_root / "live").mkdir(parents=True, exist_ok=True) (self.cache_root / "head").mkdir(exist_ok=True) def _log(self, level: str, message: str, tag: str = "URL_SEED", **kwargs: Any): """Helper to log messages using the provided logger, if available.""" if self.logger: log_method = getattr(self.logger, level, None) if log_method: log_method(message=message, tag=tag, params=kwargs.get('params', {})) # else: # Fallback for unknown level, should not happen with AsyncLoggerBase # print(f"[{tag}] {level.upper()}: {message.format(**kwargs)}") # ───────── cache helpers ───────── def _cache_path(self, kind: str, url: str) -> Path: h = hashlib.sha1(url.encode()).hexdigest() return self.cache_root / kind / f"{h}.json" async def _cache_get(self, kind: str, url: str) -> Optional[Dict[str, Any]]: p = self._cache_path(kind, url) if not p.exists(): return None if time.time()-p.stat().st_mtime > self.ttl.total_seconds(): return None try: async with aiofiles.open(p, "r") as f: return json.loads(await f.read()) except Exception: return None async def _cache_set(self, kind: str, url: str, data: Dict[str, Any]) -> None: try: async with aiofiles.open(self._cache_path(kind, url), "w") as f: await f.write(json.dumps(data, separators=(",", ":"))) except Exception: pass # ─────────────────────────────── discovery entry async def urls(self, domain: str, config: "SeedingConfig", ) -> List[Dict[str, Any]]: """ Fetch URLs for a domain using configuration from SeedingConfig. Parameters ---------- domain : str The domain to fetch URLs for (e.g., "example.com") config : SeedingConfig Configuration object containing all seeding parameters """ # Extract parameters from config pattern = config.pattern or "*" source = config.source live_check = config.live_check extract_head = config.extract_head concurrency = config.concurrency head_timeout = 5 # Default timeout for HEAD requests hits_per_sec = config.hits_per_sec self.force = config.force # Store force flag as instance attribute force = config.force verbose = config.verbose if config.verbose is not None else ( self.logger.verbose if self.logger else False) max_urls = config.max_urls if config.max_urls is not None else -1 query = config.query score_threshold = config.score_threshold scoring_method = config.scoring_method # Ensure seeder's logger verbose matches the config's verbose if it's set if self.logger and hasattr(self.logger, 'verbose') and config.verbose is not None: self.logger.verbose = config.verbose # ensure we have the latest CC collection id if self.index_id is None: self.index_id = await self._latest_index() # Parse source parameter - split by '+' to get list of sources sources = source.split('+') valid_sources = {"cc", "sitemap"} for s in sources: if s not in valid_sources: raise ValueError( f"Invalid source '{s}'. Valid sources are: {', '.join(valid_sources)}") if hits_per_sec: if hits_per_sec <= 0: self._log( "warning", "hits_per_sec must be positive. Disabling rate limiting.", tag="URL_SEED") self._rate_sem = None else: self._rate_sem = asyncio.Semaphore(hits_per_sec) else: self._rate_sem = None # Ensure it's None if no rate limiting self._log("info", "Starting URL seeding for {domain} with source={source}", params={"domain": domain, "source": source}, tag="URL_SEED") # choose stream async def gen(): if "sitemap" in sources: self._log("debug", "Fetching from sitemaps...", tag="URL_SEED") async for u in self._from_sitemaps(domain, pattern, force): yield u if "cc" in sources: self._log("debug", "Fetching from Common Crawl...", tag="URL_SEED") async for u in self._from_cc(domain, pattern, force): yield u # Use bounded queue to prevent RAM spikes with large domains queue_size = min(10000, max(1000, concurrency * 100)) # Dynamic size based on concurrency queue = asyncio.Queue(maxsize=queue_size) producer_done = asyncio.Event() stop_event = asyncio.Event() seen: set[str] = set() filter_nonsense = config.filter_nonsense_urls # Extract this for passing to workers async def producer(): try: async for u in gen(): if u in seen: self._log("debug", "Skipping duplicate URL: {url}", params={"url": u}, tag="URL_SEED") continue if stop_event.is_set(): self._log( "info", "Producer stopping due to max_urls limit.", tag="URL_SEED") break seen.add(u) await queue.put(u) # Will block if queue is full, providing backpressure except Exception as e: self._log("error", "Producer encountered an error: {error}", params={ "error": str(e)}, tag="URL_SEED") finally: producer_done.set() self._log("debug", "Producer finished.", tag="URL_SEED") async def worker(res_list: List[Dict[str, Any]]): while True: if queue.empty() and producer_done.is_set(): # self._log("debug", "Worker exiting: queue empty and producer done.", tag="URL_SEED") break try: # Increased timeout slightly url = await asyncio.wait_for(queue.get(), 5) except asyncio.TimeoutError: continue # Keep checking queue and producer_done status except Exception as e: self._log("error", "Worker failed to get URL from queue: {error}", params={ "error": str(e)}, tag="URL_SEED") continue if max_urls > 0 and len(res_list) >= max_urls: self._log( "info", "Worker stopping due to max_urls limit.", tag="URL_SEED", ) stop_event.set() # mark the current item done queue.task_done() # flush whatever is still sitting in the queue so # queue.join() can finish cleanly while not queue.empty(): try: queue.get_nowait() queue.task_done() except asyncio.QueueEmpty: break break if self._rate_sem: # global QPS control async with self._rate_sem: await self._validate(url, res_list, live_check, extract_head, head_timeout, verbose, query, score_threshold, scoring_method, filter_nonsense) else: await self._validate(url, res_list, live_check, extract_head, head_timeout, verbose, query, score_threshold, scoring_method, filter_nonsense) queue.task_done() # Mark task as done for queue.join() if ever used # launch results: List[Dict[str, Any]] = [] prod_task = asyncio.create_task(producer()) workers = [asyncio.create_task(worker(results)) for _ in range(concurrency)] # Wait for all workers to finish await asyncio.gather(prod_task, *workers) await queue.join() # Ensure all queued items are processed self._log("info", "Finished URL seeding for {domain}. Total URLs: {count}", params={"domain": domain, "count": len(results)}, tag="URL_SEED") # Apply BM25 scoring if query was provided if query and extract_head and scoring_method == "bm25": # Apply collective BM25 scoring across all documents results = await self._apply_bm25_scoring(results, config) # Filter by score threshold if specified if score_threshold is not None: original_count = len(results) results = [r for r in results if r.get("relevance_score", 0) >= score_threshold] if original_count > len(results): self._log("info", "Filtered {filtered} URLs below score threshold {threshold}", params={"filtered": original_count - len(results), "threshold": score_threshold}, tag="URL_SEED") # Sort by relevance score results.sort(key=lambda x: x.get("relevance_score", 0.0), reverse=True) self._log("info", "Sorted {count} URLs by relevance score for query: '{query}'", params={"count": len(results), "query": query}, tag="URL_SEED") elif query and not extract_head: self._log( "warning", "Query provided but extract_head is False. Enable extract_head for relevance scoring.", tag="URL_SEED") return results[:max_urls] if max_urls > 0 else results async def many_urls( self, domains: Sequence[str], config: "SeedingConfig", ) -> Dict[str, List[Dict[str, Any]]]: """ Fetch URLs for many domains in parallel. Parameters ---------- domains : Sequence[str] List of domains to fetch URLs for config : SeedingConfig Configuration object containing all seeding parameters Returns a {domain: urls-list} dict. """ self._log("info", "Starting URL seeding for {count} domains...", params={"count": len(domains)}, tag="URL_SEED") # Ensure seeder's logger verbose matches the config's verbose if it's set if self.logger and hasattr(self.logger, 'verbose') and config.verbose is not None: self.logger.verbose = config.verbose tasks = [ self.urls(domain, config) for domain in domains ] results = await asyncio.gather(*tasks) final_results = dict(zip(domains, results)) self._log( "info", "Finished URL seeding for multiple domains.", tag="URL_SEED") return final_results async def extract_head_for_urls( self, urls: List[str], config: Optional["SeedingConfig"] = None, concurrency: int = 10, timeout: int = 5 ) -> List[Dict[str, Any]]: """ Extract head content for a custom list of URLs using URLSeeder's parallel processing. This method reuses URLSeeder's efficient parallel processing, caching, and head extraction logic to process a custom list of URLs rather than discovering URLs from sources. Parameters ---------- urls : List[str] List of URLs to extract head content from config : SeedingConfig, optional Configuration object. If None, uses default settings for head extraction concurrency : int, default=10 Number of concurrent requests timeout : int, default=5 Timeout for each request in seconds Returns ------- List[Dict[str, Any]] List of dictionaries containing url, status, head_data, and optional relevance_score """ # Create default config if none provided if config is None: # Import here to avoid circular imports from .async_configs import SeedingConfig config = SeedingConfig( extract_head=True, concurrency=concurrency, verbose=False ) # Override concurrency and ensure head extraction is enabled config.concurrency = concurrency config.extract_head = True self._log("info", "Starting head extraction for {count} custom URLs", params={"count": len(urls)}, tag="URL_SEED") # Setup rate limiting if specified in config if config.hits_per_sec: if config.hits_per_sec <= 0: self._log("warning", "hits_per_sec must be positive. Disabling rate limiting.", tag="URL_SEED") self._rate_sem = None else: self._rate_sem = asyncio.Semaphore(config.hits_per_sec) else: self._rate_sem = None # Use bounded queue to prevent memory issues with large URL lists queue_size = min(10000, max(1000, concurrency * 100)) queue = asyncio.Queue(maxsize=queue_size) producer_done = asyncio.Event() stop_event = asyncio.Event() seen: set[str] = set() # Results collection results: List[Dict[str, Any]] = [] async def producer(): """Producer to feed URLs into the queue.""" try: for url in urls: if url in seen: self._log("debug", "Skipping duplicate URL: {url}", params={"url": url}, tag="URL_SEED") continue if stop_event.is_set(): break seen.add(url) await queue.put(url) finally: producer_done.set() async def worker(res_list: List[Dict[str, Any]]): """Worker to process URLs from the queue.""" while True: try: # Wait for URL or producer completion url = await asyncio.wait_for(queue.get(), timeout=1.0) except asyncio.TimeoutError: if producer_done.is_set() and queue.empty(): break continue try: # Use existing _validate method which handles head extraction, caching, etc. await self._validate( url, res_list, live=False, # We're not doing live checks, just head extraction extract=True, # Always extract head content timeout=timeout, verbose=config.verbose or False, query=config.query, score_threshold=config.score_threshold, scoring_method=config.scoring_method or "bm25", filter_nonsense=config.filter_nonsense_urls ) except Exception as e: self._log("error", "Failed to process URL {url}: {error}", params={"url": url, "error": str(e)}, tag="URL_SEED") # Add failed entry to results res_list.append({ "url": url, "status": "failed", "head_data": {}, "error": str(e) }) finally: queue.task_done() # Start producer producer_task = asyncio.create_task(producer()) # Start workers worker_tasks = [] for _ in range(concurrency): worker_task = asyncio.create_task(worker(results)) worker_tasks.append(worker_task) # Wait for producer to finish await producer_task # Wait for all items to be processed await queue.join() # Cancel workers for task in worker_tasks: task.cancel() # Wait for workers to finish canceling await asyncio.gather(*worker_tasks, return_exceptions=True) # Apply BM25 scoring if query is provided if config.query and config.scoring_method == "bm25": results = await self._apply_bm25_scoring(results, config) # Apply score threshold filtering if config.score_threshold is not None: results = [r for r in results if r.get("relevance_score", 0) >= config.score_threshold] # Sort by relevance score if available if any("relevance_score" in r for r in results): results.sort(key=lambda x: x.get("relevance_score", 0), reverse=True) self._log("info", "Completed head extraction for {count} URLs, {success} successful", params={ "count": len(urls), "success": len([r for r in results if r.get("status") == "valid"]) }, tag="URL_SEED") return results async def _apply_bm25_scoring(self, results: List[Dict[str, Any]], config: "SeedingConfig") -> List[Dict[str, Any]]: """Apply BM25 scoring to results that have head_data.""" if not HAS_BM25: self._log("warning", "BM25 scoring requested but rank_bm25 not available", tag="URL_SEED") return results # Extract text contexts from head data text_contexts = [] valid_results = [] for result in results: if result.get("status") == "valid" and result.get("head_data"): text_context = self._extract_text_context(result["head_data"]) if text_context: text_contexts.append(text_context) valid_results.append(result) else: # Use URL-based scoring as fallback score = self._calculate_url_relevance_score(config.query, result["url"]) result["relevance_score"] = float(score) elif result.get("status") == "valid": # No head data but valid URL - use URL-based scoring score = self._calculate_url_relevance_score(config.query, result["url"]) result["relevance_score"] = float(score) # Calculate BM25 scores for results with text context if text_contexts and valid_results: scores = await asyncio.to_thread(self._calculate_bm25_score, config.query, text_contexts) for i, result in enumerate(valid_results): if i < len(scores): result["relevance_score"] = float(scores[i]) return results async def _resolve_head(self, url: str) -> Optional[str]: """ HEAD-probe a URL. Returns: * the same URL if it answers 2xx, * the absolute redirect target if it answers 3xx, * None on any other status or network error. """ try: r = await self.client.head(url, timeout=10, follow_redirects=False) # direct hit if 200 <= r.status_code < 300: return str(r.url) # single level redirect if r.status_code in (301, 302, 303, 307, 308): loc = r.headers.get("location") if loc: return urljoin(url, loc) return None except Exception as e: self._log("debug", "HEAD {url} failed: {err}", params={"url": url, "err": str(e)}, tag="URL_SEED") return None # ─────────────────────────────── CC async def _from_cc(self, domain: str, pattern: str, force: bool): import re digest = hashlib.md5(pattern.encode()).hexdigest()[:8] # ── normalise for CC (strip scheme, query, fragment) raw = re.sub(r'^https?://', '', domain).split('#', 1)[0].split('?', 1)[0].lstrip('.') # ── sanitize only for cache-file name safe = re.sub('[/?#]+', '_', raw) path = self.cache_dir / f"{self.index_id}_{safe}_{digest}.jsonl" if path.exists() and not force: self._log("info", "Loading CC URLs for {domain} from cache: {path}", params={"domain": domain, "path": path}, tag="URL_SEED") async with aiofiles.open(path, "r") as fp: async for line in fp: url = line.strip() if _match(url, pattern): yield url return # build CC glob – if a path is present keep it, else add trailing /* glob = f"*.{raw}*" if '/' in raw else f"*.{raw}/*" url = f"https://index.commoncrawl.org/{self.index_id}-index?url={quote(glob, safe='*')}&output=json" retries = (1, 3, 7) self._log("info", "Fetching CC URLs for {domain} from Common Crawl index: {url}", params={"domain": domain, "url": url}, tag="URL_SEED") for i, d in enumerate(retries+(-1,)): # last -1 means don't retry try: async with self.client.stream("GET", url) as r: r.raise_for_status() async with aiofiles.open(path, "w") as fp: async for line in r.aiter_lines(): rec = json.loads(line) u = rec["url"] await fp.write(u+"\n") if _match(u, pattern): yield u return except httpx.HTTPStatusError as e: if e.response.status_code == 503 and i < len(retries): self._log("warning", "Common Crawl API returned 503 for {domain}. Retrying in {delay}s.", params={"domain": domain, "delay": retries[i]}, tag="URL_SEED") await asyncio.sleep(retries[i]) continue self._log("error", "HTTP error fetching CC index for {domain}: {error}", params={"domain": domain, "error": str(e)}, tag="URL_SEED") raise except Exception as e: self._log("error", "Error fetching CC index for {domain}: {error}", params={"domain": domain, "error": str(e)}, tag="URL_SEED") raise # ─────────────────────────────── Sitemaps async def _from_sitemaps(self, domain: str, pattern: str, force: bool = False): """ 1. Probe default sitemap locations. 2. If none exist, parse robots.txt for alternative sitemap URLs.
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
true
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/link_preview.py
crawl4ai/link_preview.py
""" Link Extractor for Crawl4AI Extracts head content from links discovered during crawling using URLSeeder's efficient parallel processing and caching infrastructure. """ import asyncio import fnmatch from typing import Dict, List, Optional, Any from .async_logger import AsyncLogger from .async_url_seeder import AsyncUrlSeeder from .async_configs import SeedingConfig, CrawlerRunConfig from .models import Links, Link from .utils import calculate_total_score class LinkPreview: """ Extracts head content from links using URLSeeder's parallel processing infrastructure. This class provides intelligent link filtering and head content extraction with: - Pattern-based inclusion/exclusion filtering - Parallel processing with configurable concurrency - Caching for performance - BM25 relevance scoring - Memory-safe processing for large link sets """ def __init__(self, logger: Optional[AsyncLogger] = None): """ Initialize the LinkPreview. Args: logger: Optional logger instance for recording events """ self.logger = logger self.seeder: Optional[AsyncUrlSeeder] = None self._owns_seeder = False async def __aenter__(self): """Async context manager entry.""" await self.start() return self async def __aexit__(self, exc_type, exc_val, exc_tb): """Async context manager exit.""" await self.close() async def start(self): """Initialize the URLSeeder instance.""" if not self.seeder: self.seeder = AsyncUrlSeeder(logger=self.logger) await self.seeder.__aenter__() self._owns_seeder = True async def close(self): """Clean up resources.""" if self.seeder and self._owns_seeder: await self.seeder.__aexit__(None, None, None) self.seeder = None self._owns_seeder = False def _log(self, level: str, message: str, tag: str = "LINK_EXTRACT", **kwargs): """Helper method to safely log messages.""" if self.logger: log_method = getattr(self.logger, level, None) if log_method: log_method(message=message, tag=tag, params=kwargs.get('params', {})) async def extract_link_heads( self, links: Links, config: CrawlerRunConfig ) -> Links: """ Extract head content for filtered links and attach to Link objects. Args: links: Links object containing internal and external links config: CrawlerRunConfig with link_preview_config settings Returns: Links object with head_data attached to filtered Link objects """ link_config = config.link_preview_config # Ensure seeder is initialized await self.start() # Filter links based on configuration filtered_urls = self._filter_links(links, link_config) if not filtered_urls: self._log("info", "No links matched filtering criteria") return links self._log("info", "Extracting head content for {count} filtered links", params={"count": len(filtered_urls)}) # Extract head content using URLSeeder head_results = await self._extract_heads_parallel(filtered_urls, link_config) # Merge results back into Link objects updated_links = self._merge_head_data(links, head_results, config) self._log("info", "Completed head extraction for links, {success} successful", params={"success": len([r for r in head_results if r.get("status") == "valid"])}) return updated_links def _filter_links(self, links: Links, link_config: Dict[str, Any]) -> List[str]: """ Filter links based on configuration parameters. Args: links: Links object containing internal and external links link_config: Configuration dictionary for link extraction Returns: List of filtered URL strings """ filtered_urls = [] # Include internal links if configured if link_config.include_internal: filtered_urls.extend([link.href for link in links.internal if link.href]) self._log("debug", "Added {count} internal links", params={"count": len(links.internal)}) # Include external links if configured if link_config.include_external: filtered_urls.extend([link.href for link in links.external if link.href]) self._log("debug", "Added {count} external links", params={"count": len(links.external)}) # Apply include patterns include_patterns = link_config.include_patterns if include_patterns: filtered_urls = [ url for url in filtered_urls if any(fnmatch.fnmatch(url, pattern) for pattern in include_patterns) ] self._log("debug", "After include patterns: {count} links remain", params={"count": len(filtered_urls)}) # Apply exclude patterns exclude_patterns = link_config.exclude_patterns if exclude_patterns: filtered_urls = [ url for url in filtered_urls if not any(fnmatch.fnmatch(url, pattern) for pattern in exclude_patterns) ] self._log("debug", "After exclude patterns: {count} links remain", params={"count": len(filtered_urls)}) # Limit number of links max_links = link_config.max_links if max_links > 0 and len(filtered_urls) > max_links: filtered_urls = filtered_urls[:max_links] self._log("debug", "Limited to {max_links} links", params={"max_links": max_links}) # Remove duplicates while preserving order seen = set() unique_urls = [] for url in filtered_urls: if url not in seen: seen.add(url) unique_urls.append(url) self._log("debug", "Final filtered URLs: {count} unique links", params={"count": len(unique_urls)}) return unique_urls async def _extract_heads_parallel( self, urls: List[str], link_config: Dict[str, Any] ) -> List[Dict[str, Any]]: """ Extract head content for URLs using URLSeeder's parallel processing. Args: urls: List of URLs to process link_config: Configuration dictionary for link extraction Returns: List of dictionaries with url, status, head_data, and optional relevance_score """ verbose = link_config.verbose concurrency = link_config.concurrency if verbose: self._log("info", "Starting batch processing: {total} links with {concurrency} concurrent workers", params={"total": len(urls), "concurrency": concurrency}) # Create SeedingConfig for URLSeeder seeding_config = SeedingConfig( extract_head=True, concurrency=concurrency, hits_per_sec=getattr(link_config, 'hits_per_sec', None), query=link_config.query, score_threshold=link_config.score_threshold, scoring_method="bm25" if link_config.query else None, verbose=verbose ) # Use URLSeeder's extract_head_for_urls method with progress tracking if verbose: # Create a wrapper to track progress results = await self._extract_with_progress(urls, seeding_config, link_config) else: results = await self.seeder.extract_head_for_urls( urls=urls, config=seeding_config, concurrency=concurrency, timeout=link_config.timeout ) return results async def _extract_with_progress( self, urls: List[str], seeding_config: SeedingConfig, link_config: Dict[str, Any] ) -> List[Dict[str, Any]]: """Extract head content with progress reporting.""" total_urls = len(urls) concurrency = link_config.concurrency batch_size = max(1, total_urls // 10) # Report progress every 10% # Process URLs and track progress completed = 0 successful = 0 failed = 0 # Create a custom progress tracking version # We'll modify URLSeeder's method to include progress callbacks # For now, let's use the existing method and report at the end # In a production version, we would modify URLSeeder to accept progress callbacks self._log("info", "Processing links in batches...") # Use existing method results = await self.seeder.extract_head_for_urls( urls=urls, config=seeding_config, concurrency=concurrency, timeout=link_config.timeout ) # Count results for result in results: completed += 1 if result.get("status") == "valid": successful += 1 else: failed += 1 # Final progress report self._log("info", "Batch processing completed: {completed}/{total} processed, {successful} successful, {failed} failed", params={ "completed": completed, "total": total_urls, "successful": successful, "failed": failed }) return results def _merge_head_data( self, original_links: Links, head_results: List[Dict[str, Any]], config: CrawlerRunConfig ) -> Links: """ Merge head extraction results back into Link objects. Args: original_links: Original Links object head_results: Results from head extraction Returns: Links object with head_data attached to matching links """ # Create URL to head_data mapping url_to_head_data = {} for result in head_results: url = result.get("url") if url: url_to_head_data[url] = { "head_data": result.get("head_data", {}), "status": result.get("status", "unknown"), "error": result.get("error"), "relevance_score": result.get("relevance_score") } # Update internal links updated_internal = [] for link in original_links.internal: if link.href in url_to_head_data: head_info = url_to_head_data[link.href] # Create new Link object with head data and scoring contextual_score = head_info.get("relevance_score") updated_link = Link( href=link.href, text=link.text, title=link.title, base_domain=link.base_domain, head_data=head_info["head_data"], head_extraction_status=head_info["status"], head_extraction_error=head_info.get("error"), intrinsic_score=getattr(link, 'intrinsic_score', None), contextual_score=contextual_score ) # Add relevance score to head_data for backward compatibility if contextual_score is not None: updated_link.head_data = updated_link.head_data or {} updated_link.head_data["relevance_score"] = contextual_score # Calculate total score combining intrinsic and contextual scores updated_link.total_score = calculate_total_score( intrinsic_score=updated_link.intrinsic_score, contextual_score=updated_link.contextual_score, score_links_enabled=getattr(config, 'score_links', False), query_provided=bool(config.link_preview_config.query) ) updated_internal.append(updated_link) else: # Keep original link unchanged updated_internal.append(link) # Update external links updated_external = [] for link in original_links.external: if link.href in url_to_head_data: head_info = url_to_head_data[link.href] # Create new Link object with head data and scoring contextual_score = head_info.get("relevance_score") updated_link = Link( href=link.href, text=link.text, title=link.title, base_domain=link.base_domain, head_data=head_info["head_data"], head_extraction_status=head_info["status"], head_extraction_error=head_info.get("error"), intrinsic_score=getattr(link, 'intrinsic_score', None), contextual_score=contextual_score ) # Add relevance score to head_data for backward compatibility if contextual_score is not None: updated_link.head_data = updated_link.head_data or {} updated_link.head_data["relevance_score"] = contextual_score # Calculate total score combining intrinsic and contextual scores updated_link.total_score = calculate_total_score( intrinsic_score=updated_link.intrinsic_score, contextual_score=updated_link.contextual_score, score_links_enabled=getattr(config, 'score_links', False), query_provided=bool(config.link_preview_config.query) ) updated_external.append(updated_link) else: # Keep original link unchanged updated_external.append(link) # Sort links by relevance score if available if any(hasattr(link, 'head_data') and link.head_data and 'relevance_score' in link.head_data for link in updated_internal + updated_external): def get_relevance_score(link): if hasattr(link, 'head_data') and link.head_data and 'relevance_score' in link.head_data: return link.head_data['relevance_score'] return 0.0 updated_internal.sort(key=get_relevance_score, reverse=True) updated_external.sort(key=get_relevance_score, reverse=True) return Links( internal=updated_internal, external=updated_external )
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/__init__.py
crawl4ai/__init__.py
# __init__.py import warnings from .async_webcrawler import AsyncWebCrawler, CacheMode # MODIFIED: Add SeedingConfig and VirtualScrollConfig here from .async_configs import BrowserConfig, CrawlerRunConfig, HTTPCrawlerConfig, LLMConfig, ProxyConfig, GeolocationConfig, SeedingConfig, VirtualScrollConfig, LinkPreviewConfig, MatchMode from .content_scraping_strategy import ( ContentScrapingStrategy, LXMLWebScrapingStrategy, WebScrapingStrategy, # Backward compatibility alias ) from .async_logger import ( AsyncLoggerBase, AsyncLogger, ) from .proxy_strategy import ( ProxyRotationStrategy, RoundRobinProxyStrategy, ) from .extraction_strategy import ( ExtractionStrategy, LLMExtractionStrategy, CosineStrategy, JsonCssExtractionStrategy, JsonXPathExtractionStrategy, JsonLxmlExtractionStrategy, RegexExtractionStrategy ) from .chunking_strategy import ChunkingStrategy, RegexChunking from .markdown_generation_strategy import DefaultMarkdownGenerator from .table_extraction import ( TableExtractionStrategy, DefaultTableExtraction, NoTableExtraction, LLMTableExtraction, ) from .content_filter_strategy import ( PruningContentFilter, BM25ContentFilter, LLMContentFilter, RelevantContentFilter, ) from .models import CrawlResult, MarkdownGenerationResult, DisplayMode from .components.crawler_monitor import CrawlerMonitor from .link_preview import LinkPreview from .async_dispatcher import ( MemoryAdaptiveDispatcher, SemaphoreDispatcher, RateLimiter, BaseDispatcher, ) from .docker_client import Crawl4aiDockerClient from .hub import CrawlerHub from .browser_profiler import BrowserProfiler from .deep_crawling import ( DeepCrawlStrategy, BFSDeepCrawlStrategy, FilterChain, URLPatternFilter, DomainFilter, ContentTypeFilter, URLFilter, FilterStats, SEOFilter, KeywordRelevanceScorer, URLScorer, CompositeScorer, DomainAuthorityScorer, FreshnessScorer, PathDepthScorer, BestFirstCrawlingStrategy, DFSDeepCrawlStrategy, DeepCrawlDecorator, ContentRelevanceFilter, ContentTypeScorer, ) # NEW: Import AsyncUrlSeeder from .async_url_seeder import AsyncUrlSeeder # Adaptive Crawler from .adaptive_crawler import ( AdaptiveCrawler, AdaptiveConfig, CrawlState, CrawlStrategy, StatisticalStrategy ) # C4A Script Language Support from .script import ( compile as c4a_compile, validate as c4a_validate, compile_file as c4a_compile_file, CompilationResult, ValidationResult, ErrorDetail ) # Browser Adapters from .browser_adapter import ( BrowserAdapter, PlaywrightAdapter, UndetectedAdapter ) from .utils import ( start_colab_display_server, setup_colab_environment, hooks_to_string ) __all__ = [ "AsyncLoggerBase", "AsyncLogger", "AsyncWebCrawler", "BrowserProfiler", "LLMConfig", "GeolocationConfig", # NEW: Add SeedingConfig and VirtualScrollConfig "SeedingConfig", "VirtualScrollConfig", # NEW: Add AsyncUrlSeeder "AsyncUrlSeeder", # Adaptive Crawler "AdaptiveCrawler", "AdaptiveConfig", "CrawlState", "CrawlStrategy", "StatisticalStrategy", "DeepCrawlStrategy", "BFSDeepCrawlStrategy", "BestFirstCrawlingStrategy", "DFSDeepCrawlStrategy", "FilterChain", "URLPatternFilter", "ContentTypeFilter", "DomainFilter", "FilterStats", "URLFilter", "SEOFilter", "KeywordRelevanceScorer", "URLScorer", "CompositeScorer", "DomainAuthorityScorer", "FreshnessScorer", "PathDepthScorer", "DeepCrawlDecorator", "CrawlResult", "CrawlerHub", "CacheMode", "MatchMode", "ContentScrapingStrategy", "WebScrapingStrategy", "LXMLWebScrapingStrategy", "BrowserConfig", "CrawlerRunConfig", "HTTPCrawlerConfig", "ExtractionStrategy", "LLMExtractionStrategy", "CosineStrategy", "JsonCssExtractionStrategy", "JsonXPathExtractionStrategy", "JsonLxmlExtractionStrategy", "RegexExtractionStrategy", "ChunkingStrategy", "RegexChunking", "DefaultMarkdownGenerator", "TableExtractionStrategy", "DefaultTableExtraction", "NoTableExtraction", "RelevantContentFilter", "PruningContentFilter", "BM25ContentFilter", "LLMContentFilter", "BaseDispatcher", "MemoryAdaptiveDispatcher", "SemaphoreDispatcher", "RateLimiter", "CrawlerMonitor", "LinkPreview", "DisplayMode", "MarkdownGenerationResult", "Crawl4aiDockerClient", "ProxyRotationStrategy", "RoundRobinProxyStrategy", "ProxyConfig", "start_colab_display_server", "setup_colab_environment", "hooks_to_string", # C4A Script additions "c4a_compile", "c4a_validate", "c4a_compile_file", "CompilationResult", "ValidationResult", "ErrorDetail", # Browser Adapters "BrowserAdapter", "PlaywrightAdapter", "UndetectedAdapter", "LinkPreviewConfig" ] # def is_sync_version_installed(): # try: # import selenium # noqa # return True # except ImportError: # return False # if is_sync_version_installed(): # try: # from .web_crawler import WebCrawler # __all__.append("WebCrawler") # except ImportError: # print( # "Warning: Failed to import WebCrawler even though selenium is installed. This might be due to other missing dependencies." # ) # else: # WebCrawler = None # # import warnings # # print("Warning: Synchronous WebCrawler is not available. Install crawl4ai[sync] for synchronous support. However, please note that the synchronous version will be deprecated soon.") # Disable all Pydantic warnings warnings.filterwarnings("ignore", module="pydantic") # pydantic_warnings.filter_warnings()
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/types.py
crawl4ai/types.py
from typing import TYPE_CHECKING, Union # Logger types AsyncLoggerBase = Union['AsyncLoggerBaseType'] AsyncLogger = Union['AsyncLoggerType'] # Crawler core types AsyncWebCrawler = Union['AsyncWebCrawlerType'] CacheMode = Union['CacheModeType'] CrawlResult = Union['CrawlResultType'] CrawlerHub = Union['CrawlerHubType'] BrowserProfiler = Union['BrowserProfilerType'] # NEW: Add AsyncUrlSeederType AsyncUrlSeeder = Union['AsyncUrlSeederType'] # Configuration types BrowserConfig = Union['BrowserConfigType'] CrawlerRunConfig = Union['CrawlerRunConfigType'] HTTPCrawlerConfig = Union['HTTPCrawlerConfigType'] LLMConfig = Union['LLMConfigType'] # NEW: Add SeedingConfigType SeedingConfig = Union['SeedingConfigType'] # Content scraping types ContentScrapingStrategy = Union['ContentScrapingStrategyType'] LXMLWebScrapingStrategy = Union['LXMLWebScrapingStrategyType'] # Backward compatibility alias WebScrapingStrategy = Union['LXMLWebScrapingStrategyType'] # Proxy types ProxyRotationStrategy = Union['ProxyRotationStrategyType'] RoundRobinProxyStrategy = Union['RoundRobinProxyStrategyType'] # Extraction types ExtractionStrategy = Union['ExtractionStrategyType'] LLMExtractionStrategy = Union['LLMExtractionStrategyType'] CosineStrategy = Union['CosineStrategyType'] JsonCssExtractionStrategy = Union['JsonCssExtractionStrategyType'] JsonXPathExtractionStrategy = Union['JsonXPathExtractionStrategyType'] # Chunking types ChunkingStrategy = Union['ChunkingStrategyType'] RegexChunking = Union['RegexChunkingType'] # Markdown generation types DefaultMarkdownGenerator = Union['DefaultMarkdownGeneratorType'] MarkdownGenerationResult = Union['MarkdownGenerationResultType'] # Content filter types RelevantContentFilter = Union['RelevantContentFilterType'] PruningContentFilter = Union['PruningContentFilterType'] BM25ContentFilter = Union['BM25ContentFilterType'] LLMContentFilter = Union['LLMContentFilterType'] # Dispatcher types BaseDispatcher = Union['BaseDispatcherType'] MemoryAdaptiveDispatcher = Union['MemoryAdaptiveDispatcherType'] SemaphoreDispatcher = Union['SemaphoreDispatcherType'] RateLimiter = Union['RateLimiterType'] CrawlerMonitor = Union['CrawlerMonitorType'] DisplayMode = Union['DisplayModeType'] RunManyReturn = Union['RunManyReturnType'] # Docker client Crawl4aiDockerClient = Union['Crawl4aiDockerClientType'] # Deep crawling types DeepCrawlStrategy = Union['DeepCrawlStrategyType'] BFSDeepCrawlStrategy = Union['BFSDeepCrawlStrategyType'] FilterChain = Union['FilterChainType'] ContentTypeFilter = Union['ContentTypeFilterType'] DomainFilter = Union['DomainFilterType'] URLFilter = Union['URLFilterType'] FilterStats = Union['FilterStatsType'] SEOFilter = Union['SEOFilterType'] KeywordRelevanceScorer = Union['KeywordRelevanceScorerType'] URLScorer = Union['URLScorerType'] CompositeScorer = Union['CompositeScorerType'] DomainAuthorityScorer = Union['DomainAuthorityScorerType'] FreshnessScorer = Union['FreshnessScorerType'] PathDepthScorer = Union['PathDepthScorerType'] BestFirstCrawlingStrategy = Union['BestFirstCrawlingStrategyType'] DFSDeepCrawlStrategy = Union['DFSDeepCrawlStrategyType'] DeepCrawlDecorator = Union['DeepCrawlDecoratorType'] # Only import types during type checking to avoid circular imports if TYPE_CHECKING: # Logger imports from .async_logger import ( AsyncLoggerBase as AsyncLoggerBaseType, AsyncLogger as AsyncLoggerType, ) # Crawler core imports from .async_webcrawler import ( AsyncWebCrawler as AsyncWebCrawlerType, CacheMode as CacheModeType, ) from .models import CrawlResult as CrawlResultType from .hub import CrawlerHub as CrawlerHubType from .browser_profiler import BrowserProfiler as BrowserProfilerType # NEW: Import AsyncUrlSeeder for type checking from .async_url_seeder import AsyncUrlSeeder as AsyncUrlSeederType # Configuration imports from .async_configs import ( BrowserConfig as BrowserConfigType, CrawlerRunConfig as CrawlerRunConfigType, HTTPCrawlerConfig as HTTPCrawlerConfigType, LLMConfig as LLMConfigType, # NEW: Import SeedingConfig for type checking SeedingConfig as SeedingConfigType, ) # Content scraping imports from .content_scraping_strategy import ( ContentScrapingStrategy as ContentScrapingStrategyType, LXMLWebScrapingStrategy as LXMLWebScrapingStrategyType, ) # Proxy imports from .proxy_strategy import ( ProxyRotationStrategy as ProxyRotationStrategyType, RoundRobinProxyStrategy as RoundRobinProxyStrategyType, ) # Extraction imports from .extraction_strategy import ( ExtractionStrategy as ExtractionStrategyType, LLMExtractionStrategy as LLMExtractionStrategyType, CosineStrategy as CosineStrategyType, JsonCssExtractionStrategy as JsonCssExtractionStrategyType, JsonXPathExtractionStrategy as JsonXPathExtractionStrategyType, ) # Chunking imports from .chunking_strategy import ( ChunkingStrategy as ChunkingStrategyType, RegexChunking as RegexChunkingType, ) # Markdown generation imports from .markdown_generation_strategy import ( DefaultMarkdownGenerator as DefaultMarkdownGeneratorType, ) from .models import MarkdownGenerationResult as MarkdownGenerationResultType # Content filter imports from .content_filter_strategy import ( RelevantContentFilter as RelevantContentFilterType, PruningContentFilter as PruningContentFilterType, BM25ContentFilter as BM25ContentFilterType, LLMContentFilter as LLMContentFilterType, ) # Dispatcher imports from .async_dispatcher import ( BaseDispatcher as BaseDispatcherType, MemoryAdaptiveDispatcher as MemoryAdaptiveDispatcherType, SemaphoreDispatcher as SemaphoreDispatcherType, RateLimiter as RateLimiterType, CrawlerMonitor as CrawlerMonitorType, DisplayMode as DisplayModeType, RunManyReturn as RunManyReturnType, ) # Docker client from .docker_client import Crawl4aiDockerClient as Crawl4aiDockerClientType # Deep crawling imports from .deep_crawling import ( DeepCrawlStrategy as DeepCrawlStrategyType, BFSDeepCrawlStrategy as BFSDeepCrawlStrategyType, FilterChain as FilterChainType, ContentTypeFilter as ContentTypeFilterType, DomainFilter as DomainFilterType, URLFilter as URLFilterType, FilterStats as FilterStatsType, SEOFilter as SEOFilterType, KeywordRelevanceScorer as KeywordRelevanceScorerType, URLScorer as URLScorerType, CompositeScorer as CompositeScorerType, DomainAuthorityScorer as DomainAuthorityScorerType, FreshnessScorer as FreshnessScorerType, PathDepthScorer as PathDepthScorerType, BestFirstCrawlingStrategy as BestFirstCrawlingStrategyType, DFSDeepCrawlStrategy as DFSDeepCrawlStrategyType, DeepCrawlDecorator as DeepCrawlDecoratorType, ) def create_llm_config(*args, **kwargs) -> 'LLMConfigType': from .async_configs import LLMConfig return LLMConfig(*args, **kwargs)
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/adaptive_crawler.py
crawl4ai/adaptive_crawler.py
""" Adaptive Web Crawler for Crawl4AI This module implements adaptive information foraging for efficient web crawling. It determines when sufficient information has been gathered to answer a query, avoiding unnecessary crawls while ensuring comprehensive coverage. """ from abc import ABC, abstractmethod from typing import Dict, List, Optional, Set, Tuple, Any, Union from dataclasses import dataclass, field import asyncio import pickle import os import json import math from collections import defaultdict, Counter import re from pathlib import Path from crawl4ai.async_webcrawler import AsyncWebCrawler from crawl4ai.async_configs import CrawlerRunConfig, LinkPreviewConfig, LLMConfig from crawl4ai.models import Link, CrawlResult import numpy as np @dataclass class CrawlState: """Tracks the current state of adaptive crawling""" crawled_urls: Set[str] = field(default_factory=set) knowledge_base: List[CrawlResult] = field(default_factory=list) pending_links: List[Link] = field(default_factory=list) query: str = "" metrics: Dict[str, float] = field(default_factory=dict) # Statistical tracking term_frequencies: Dict[str, int] = field(default_factory=lambda: defaultdict(int)) document_frequencies: Dict[str, int] = field(default_factory=lambda: defaultdict(int)) documents_with_terms: Dict[str, Set[int]] = field(default_factory=lambda: defaultdict(set)) total_documents: int = 0 # History tracking for saturation new_terms_history: List[int] = field(default_factory=list) crawl_order: List[str] = field(default_factory=list) # Embedding-specific tracking (only if strategy is embedding) kb_embeddings: Optional[Any] = None # Will be numpy array query_embeddings: Optional[Any] = None # Will be numpy array expanded_queries: List[str] = field(default_factory=list) coverage_shape: Optional[Any] = None # Alpha shape semantic_gaps: List[Tuple[List[float], float]] = field(default_factory=list) # Serializable embedding_model: str = "" def save(self, path: Union[str, Path]): """Save state to disk for persistence""" path = Path(path) path.parent.mkdir(parents=True, exist_ok=True) # Convert CrawlResult objects to dicts for serialization state_dict = { 'crawled_urls': list(self.crawled_urls), 'knowledge_base': [self._crawl_result_to_dict(cr) for cr in self.knowledge_base], 'pending_links': [link.model_dump() for link in self.pending_links], 'query': self.query, 'metrics': self.metrics, 'term_frequencies': dict(self.term_frequencies), 'document_frequencies': dict(self.document_frequencies), 'documents_with_terms': {k: list(v) for k, v in self.documents_with_terms.items()}, 'total_documents': self.total_documents, 'new_terms_history': self.new_terms_history, 'crawl_order': self.crawl_order, # Embedding-specific fields (convert numpy arrays to lists for JSON) 'kb_embeddings': self.kb_embeddings.tolist() if self.kb_embeddings is not None else None, 'query_embeddings': self.query_embeddings.tolist() if self.query_embeddings is not None else None, 'expanded_queries': self.expanded_queries, 'semantic_gaps': self.semantic_gaps, 'embedding_model': self.embedding_model } with open(path, 'w') as f: json.dump(state_dict, f, indent=2) @classmethod def load(cls, path: Union[str, Path]) -> 'CrawlState': """Load state from disk""" path = Path(path) with open(path, 'r') as f: state_dict = json.load(f) state = cls() state.crawled_urls = set(state_dict['crawled_urls']) state.knowledge_base = [cls._dict_to_crawl_result(d) for d in state_dict['knowledge_base']] state.pending_links = [Link(**link_dict) for link_dict in state_dict['pending_links']] state.query = state_dict['query'] state.metrics = state_dict['metrics'] state.term_frequencies = defaultdict(int, state_dict['term_frequencies']) state.document_frequencies = defaultdict(int, state_dict['document_frequencies']) state.documents_with_terms = defaultdict(set, {k: set(v) for k, v in state_dict['documents_with_terms'].items()}) state.total_documents = state_dict['total_documents'] state.new_terms_history = state_dict['new_terms_history'] state.crawl_order = state_dict['crawl_order'] # Load embedding-specific fields (convert lists back to numpy arrays) state.kb_embeddings = np.array(state_dict['kb_embeddings']) if state_dict.get('kb_embeddings') is not None else None state.query_embeddings = np.array(state_dict['query_embeddings']) if state_dict.get('query_embeddings') is not None else None state.expanded_queries = state_dict.get('expanded_queries', []) state.semantic_gaps = state_dict.get('semantic_gaps', []) state.embedding_model = state_dict.get('embedding_model', '') return state @staticmethod def _crawl_result_to_dict(cr: CrawlResult) -> Dict: """Convert CrawlResult to serializable dict""" # Extract markdown content safely markdown_content = "" if hasattr(cr, 'markdown') and cr.markdown: if hasattr(cr.markdown, 'raw_markdown'): markdown_content = cr.markdown.raw_markdown else: markdown_content = str(cr.markdown) return { 'url': cr.url, 'content': markdown_content, 'links': cr.links if hasattr(cr, 'links') else {}, 'metadata': cr.metadata if hasattr(cr, 'metadata') else {} } @staticmethod def _dict_to_crawl_result(d: Dict): """Convert dict back to CrawlResult""" # Create a mock object that has the minimal interface we need class MockMarkdown: def __init__(self, content): self.raw_markdown = content class MockCrawlResult: def __init__(self, url, content, links, metadata): self.url = url self.markdown = MockMarkdown(content) self.links = links self.metadata = metadata return MockCrawlResult( url=d['url'], content=d.get('content', ''), links=d.get('links', {}), metadata=d.get('metadata', {}) ) @dataclass class AdaptiveConfig: """Configuration for adaptive crawling""" confidence_threshold: float = 0.7 max_depth: int = 5 max_pages: int = 20 top_k_links: int = 3 min_gain_threshold: float = 0.1 strategy: str = "statistical" # statistical, embedding, llm # Advanced parameters saturation_threshold: float = 0.8 consistency_threshold: float = 0.7 coverage_weight: float = 0.4 consistency_weight: float = 0.3 saturation_weight: float = 0.3 # Link scoring parameters relevance_weight: float = 0.5 novelty_weight: float = 0.3 authority_weight: float = 0.2 # Persistence save_state: bool = False state_path: Optional[str] = None # Embedding strategy parameters embedding_model: str = "sentence-transformers/all-MiniLM-L6-v2" embedding_llm_config: Optional[Union[LLMConfig, Dict]] = None # Separate config for embeddings n_query_variations: int = 10 coverage_threshold: float = 0.85 alpha_shape_alpha: float = 0.5 # Minimum confidence threshold for relevance embedding_min_confidence_threshold: float = 0.1 # Below this, content is considered completely irrelevant # Example: If confidence < 0.1, stop immediately as query and content are unrelated # Embedding confidence calculation parameters embedding_coverage_radius: float = 0.2 # Distance threshold for "covered" query points # Example: With radius=0.2, a query point is considered covered if ANY document # is within cosine distance 0.2 (very similar). Smaller = stricter coverage requirement embedding_k_exp: float = 1.0 # Exponential decay factor for distance-to-score mapping # Example: score = exp(-k_exp * distance). With k_exp=1, distance 0.2 β†’ score 0.82, # distance 0.5 β†’ score 0.61. Higher k_exp = steeper decay = more emphasis on very close matches embedding_nearest_weight: float = 0.7 # Weight for nearest neighbor in hybrid scoring embedding_top_k_weight: float = 0.3 # Weight for top-k average in hybrid scoring # Example: If nearest doc has score 0.9 and top-3 avg is 0.6, final = 0.7*0.9 + 0.3*0.6 = 0.81 # Higher nearest_weight = more focus on best match vs neighborhood density # Embedding link selection parameters embedding_overlap_threshold: float = 0.85 # Similarity threshold for penalizing redundant links # Example: Links with >0.85 similarity to existing KB get penalized to avoid redundancy # Lower = more aggressive deduplication, Higher = allow more similar content # Embedding stopping criteria parameters embedding_min_relative_improvement: float = 0.1 # Minimum relative improvement to continue # Example: If confidence is 0.6, need improvement > 0.06 per batch to continue crawling # Lower = more patient crawling, Higher = stop earlier when progress slows embedding_validation_min_score: float = 0.3 # Minimum validation score to trust convergence # Example: Even if learning converged, keep crawling if validation score < 0.4 # This prevents premature stopping when we haven't truly covered the query space # Quality confidence mapping parameters (for display to user) embedding_quality_min_confidence: float = 0.7 # Minimum confidence for validated systems embedding_quality_max_confidence: float = 0.95 # Maximum realistic confidence embedding_quality_scale_factor: float = 0.833 # Scaling factor for confidence mapping # Example: Validated system with learning_score=0.5 β†’ confidence = 0.7 + (0.5-0.4)*0.833 = 0.78 # These control how internal scores map to user-friendly confidence percentages def validate(self): """Validate configuration parameters""" assert 0 <= self.confidence_threshold <= 1, "confidence_threshold must be between 0 and 1" assert self.max_depth > 0, "max_depth must be positive" assert self.max_pages > 0, "max_pages must be positive" assert self.top_k_links > 0, "top_k_links must be positive" assert 0 <= self.min_gain_threshold <= 1, "min_gain_threshold must be between 0 and 1" # Check weights sum to 1 weight_sum = self.coverage_weight + self.consistency_weight + self.saturation_weight assert abs(weight_sum - 1.0) < 0.001, f"Coverage weights must sum to 1, got {weight_sum}" weight_sum = self.relevance_weight + self.novelty_weight + self.authority_weight assert abs(weight_sum - 1.0) < 0.001, f"Link scoring weights must sum to 1, got {weight_sum}" # Validate embedding parameters assert 0 < self.embedding_coverage_radius < 1, "embedding_coverage_radius must be between 0 and 1" assert self.embedding_k_exp > 0, "embedding_k_exp must be positive" assert 0 <= self.embedding_nearest_weight <= 1, "embedding_nearest_weight must be between 0 and 1" assert 0 <= self.embedding_top_k_weight <= 1, "embedding_top_k_weight must be between 0 and 1" assert abs(self.embedding_nearest_weight + self.embedding_top_k_weight - 1.0) < 0.001, "Embedding weights must sum to 1" assert 0 <= self.embedding_overlap_threshold <= 1, "embedding_overlap_threshold must be between 0 and 1" assert 0 < self.embedding_min_relative_improvement < 1, "embedding_min_relative_improvement must be between 0 and 1" assert 0 <= self.embedding_validation_min_score <= 1, "embedding_validation_min_score must be between 0 and 1" assert 0 <= self.embedding_quality_min_confidence <= 1, "embedding_quality_min_confidence must be between 0 and 1" assert 0 <= self.embedding_quality_max_confidence <= 1, "embedding_quality_max_confidence must be between 0 and 1" assert self.embedding_quality_scale_factor > 0, "embedding_quality_scale_factor must be positive" assert 0 <= self.embedding_min_confidence_threshold <= 1, "embedding_min_confidence_threshold must be between 0 and 1" @property def _embedding_llm_config_dict(self) -> Optional[Dict]: """Convert LLMConfig to dict format for backward compatibility.""" if self.embedding_llm_config is None: return None if isinstance(self.embedding_llm_config, dict): # Already a dict - return as-is for backward compatibility return self.embedding_llm_config # Convert LLMConfig object to dict format return { 'provider': self.embedding_llm_config.provider, 'api_token': self.embedding_llm_config.api_token, 'base_url': getattr(self.embedding_llm_config, 'base_url', None), 'temperature': getattr(self.embedding_llm_config, 'temperature', None), 'max_tokens': getattr(self.embedding_llm_config, 'max_tokens', None), 'top_p': getattr(self.embedding_llm_config, 'top_p', None), 'frequency_penalty': getattr(self.embedding_llm_config, 'frequency_penalty', None), 'presence_penalty': getattr(self.embedding_llm_config, 'presence_penalty', None), 'stop': getattr(self.embedding_llm_config, 'stop', None), 'n': getattr(self.embedding_llm_config, 'n', None), } class CrawlStrategy(ABC): """Abstract base class for crawling strategies""" @abstractmethod async def calculate_confidence(self, state: CrawlState) -> float: """Calculate overall confidence that we have sufficient information""" pass @abstractmethod async def rank_links(self, state: CrawlState, config: AdaptiveConfig) -> List[Tuple[Link, float]]: """Rank pending links by expected information gain""" pass @abstractmethod async def should_stop(self, state: CrawlState, config: AdaptiveConfig) -> bool: """Determine if crawling should stop""" pass @abstractmethod async def update_state(self, state: CrawlState, new_results: List[CrawlResult]) -> None: """Update state with new crawl results""" pass class StatisticalStrategy(CrawlStrategy): """Pure statistical approach - no LLM, no embeddings""" def __init__(self): self.idf_cache = {} self.bm25_k1 = 1.2 # BM25 parameter self.bm25_b = 0.75 # BM25 parameter async def calculate_confidence(self, state: CrawlState) -> float: """Calculate confidence using coverage, consistency, and saturation""" if not state.knowledge_base: return 0.0 coverage = self._calculate_coverage(state) consistency = self._calculate_consistency(state) saturation = self._calculate_saturation(state) # Store individual metrics state.metrics['coverage'] = coverage state.metrics['consistency'] = consistency state.metrics['saturation'] = saturation # Weighted combination (weights from config not accessible here, using defaults) confidence = 0.4 * coverage + 0.3 * consistency + 0.3 * saturation return confidence def _calculate_coverage(self, state: CrawlState) -> float: """Coverage scoring - measures query term presence across knowledge base Returns a score between 0 and 1, where: - 0 means no query terms found - 1 means excellent coverage of all query terms """ if not state.query or state.total_documents == 0: return 0.0 query_terms = self._tokenize(state.query.lower()) if not query_terms: return 0.0 term_scores = [] max_tf = max(state.term_frequencies.values()) if state.term_frequencies else 1 for term in query_terms: tf = state.term_frequencies.get(term, 0) df = state.document_frequencies.get(term, 0) if df > 0: # Document coverage: what fraction of docs contain this term doc_coverage = df / state.total_documents # Frequency signal: normalized log frequency freq_signal = math.log(1 + tf) / math.log(1 + max_tf) if max_tf > 0 else 0 # Combined score: document coverage with frequency boost term_score = doc_coverage * (1 + 0.5 * freq_signal) term_scores.append(term_score) else: term_scores.append(0.0) # Average across all query terms coverage = sum(term_scores) / len(term_scores) # Apply square root curve to make score more intuitive # This helps differentiate between partial and good coverage return min(1.0, math.sqrt(coverage)) def _calculate_consistency(self, state: CrawlState) -> float: """Information overlap between pages - high overlap suggests coherent topic coverage""" if len(state.knowledge_base) < 2: return 1.0 # Single or no documents are perfectly consistent # Calculate pairwise term overlap overlaps = [] for i in range(len(state.knowledge_base)): for j in range(i + 1, len(state.knowledge_base)): # Get terms from both documents terms_i = set(self._get_document_terms(state.knowledge_base[i])) terms_j = set(self._get_document_terms(state.knowledge_base[j])) if terms_i and terms_j: # Jaccard similarity overlap = len(terms_i & terms_j) / len(terms_i | terms_j) overlaps.append(overlap) if overlaps: # Average overlap as consistency measure consistency = sum(overlaps) / len(overlaps) else: consistency = 0.0 return consistency def _calculate_saturation(self, state: CrawlState) -> float: """Diminishing returns indicator - are we still discovering new information?""" if not state.new_terms_history: return 0.0 if len(state.new_terms_history) < 2: return 0.0 # Not enough history # Calculate rate of new term discovery recent_rate = state.new_terms_history[-1] if state.new_terms_history[-1] > 0 else 1 initial_rate = state.new_terms_history[0] if state.new_terms_history[0] > 0 else 1 # Saturation increases as rate decreases saturation = 1 - (recent_rate / initial_rate) return max(0.0, min(saturation, 1.0)) async def rank_links(self, state: CrawlState, config: AdaptiveConfig) -> List[Tuple[Link, float]]: """Rank links by expected information gain""" scored_links = [] for link in state.pending_links: # Skip already crawled URLs if link.href in state.crawled_urls: continue # Calculate component scores relevance = self._calculate_relevance(link, state) novelty = self._calculate_novelty(link, state) authority = 1.0 # authority = self._calculate_authority(link) # Combined score score = (config.relevance_weight * relevance + config.novelty_weight * novelty + config.authority_weight * authority) scored_links.append((link, score)) # Sort by score descending scored_links.sort(key=lambda x: x[1], reverse=True) return scored_links def _calculate_relevance(self, link: Link, state: CrawlState) -> float: """BM25 relevance score between link preview and query""" if not state.query or not link: return 0.0 # Combine available text from link link_text = ' '.join(filter(None, [ link.text or '', link.title or '', link.head_data.get('meta', {}).get('title', '') if link.head_data else '', link.head_data.get('meta', {}).get('description', '') if link.head_data else '', link.head_data.get('meta', {}).get('keywords', '') if link.head_data else '' ])).lower() if not link_text: return 0.0 # Use contextual score if available (from BM25 scoring during crawl) # if link.contextual_score is not None: if link.contextual_score and link.contextual_score > 0: return link.contextual_score # Otherwise, calculate simple term overlap query_terms = set(self._tokenize(state.query.lower())) link_terms = set(self._tokenize(link_text)) if not query_terms: return 0.0 overlap = len(query_terms & link_terms) / len(query_terms) return overlap def _calculate_novelty(self, link: Link, state: CrawlState) -> float: """Estimate how much new information this link might provide""" if not state.knowledge_base: return 1.0 # First links are maximally novel # Get terms from link preview link_text = ' '.join(filter(None, [ link.text or '', link.title or '', link.head_data.get('title', '') if link.head_data else '', link.head_data.get('description', '') if link.head_data else '', link.head_data.get('keywords', '') if link.head_data else '' ])).lower() link_terms = set(self._tokenize(link_text)) if not link_terms: return 0.5 # Unknown novelty # Calculate what percentage of link terms are new existing_terms = set(state.term_frequencies.keys()) new_terms = link_terms - existing_terms novelty = len(new_terms) / len(link_terms) if link_terms else 0.0 return novelty def _calculate_authority(self, link: Link) -> float: """Simple authority score based on URL structure and link attributes""" score = 0.5 # Base score if not link.href: return 0.0 url = link.href.lower() # Positive indicators if '/docs/' in url or '/documentation/' in url: score += 0.2 if '/api/' in url or '/reference/' in url: score += 0.2 if '/guide/' in url or '/tutorial/' in url: score += 0.1 # Check for file extensions if url.endswith('.pdf'): score += 0.1 elif url.endswith(('.jpg', '.png', '.gif')): score -= 0.3 # Reduce score for images # Use intrinsic score if available if link.intrinsic_score is not None: score = 0.7 * score + 0.3 * link.intrinsic_score return min(score, 1.0) async def should_stop(self, state: CrawlState, config: AdaptiveConfig) -> bool: """Determine if crawling should stop""" # Check confidence threshold confidence = state.metrics.get('confidence', 0.0) if confidence >= config.confidence_threshold: return True # Check resource limits if len(state.crawled_urls) >= config.max_pages: return True # Check if we have any links left if not state.pending_links: return True # Check saturation if state.metrics.get('saturation', 0.0) >= config.saturation_threshold: return True return False async def update_state(self, state: CrawlState, new_results: List[CrawlResult]) -> None: """Update state with new crawl results""" for result in new_results: # Track new terms old_term_count = len(state.term_frequencies) # Extract and process content - try multiple fields try: content = result.markdown.raw_markdown except AttributeError: print(f"Warning: CrawlResult {result.url} has no markdown content") content = "" # content = "" # if hasattr(result, 'extracted_content') and result.extracted_content: # content = result.extracted_content # elif hasattr(result, 'markdown') and result.markdown: # content = result.markdown.raw_markdown # elif hasattr(result, 'cleaned_html') and result.cleaned_html: # content = result.cleaned_html # elif hasattr(result, 'html') and result.html: # # Use raw HTML as last resort # content = result.html terms = self._tokenize(content.lower()) # Update term frequencies term_set = set() for term in terms: state.term_frequencies[term] += 1 term_set.add(term) # Update document frequencies doc_id = state.total_documents for term in term_set: if term not in state.documents_with_terms[term]: state.document_frequencies[term] += 1 state.documents_with_terms[term].add(doc_id) # Track new terms discovered new_term_count = len(state.term_frequencies) new_terms = new_term_count - old_term_count state.new_terms_history.append(new_terms) # Update document count state.total_documents += 1 # Add to crawl order state.crawl_order.append(result.url) def _tokenize(self, text: str) -> List[str]: """Simple tokenization - can be enhanced""" # Remove punctuation and split text = re.sub(r'[^\w\s]', ' ', text) tokens = text.split() # Filter short tokens and stop words (basic) tokens = [t for t in tokens if len(t) > 2] return tokens def _get_document_terms(self, crawl_result: CrawlResult) -> List[str]: """Extract terms from a crawl result""" content = crawl_result.markdown.raw_markdown or "" return self._tokenize(content.lower()) class EmbeddingStrategy(CrawlStrategy): """Embedding-based adaptive crawling using semantic space coverage""" def __init__(self, embedding_model: str = None, llm_config: Union[LLMConfig, Dict] = None): self.embedding_model = embedding_model or "sentence-transformers/all-MiniLM-L6-v2" self.llm_config = llm_config self._embedding_cache = {} self._link_embedding_cache = {} # Cache for link embeddings self._validation_passed = False # Track if validation passed # Performance optimization caches self._distance_matrix_cache = None # Cache for query-KB distances self._kb_embeddings_hash = None # Track KB changes self._validation_embeddings_cache = None # Cache validation query embeddings self._kb_similarity_threshold = 0.95 # Threshold for deduplication def _get_embedding_llm_config_dict(self) -> Dict: """Get embedding LLM config as dict with fallback to default.""" if hasattr(self, 'config') and self.config: config_dict = self.config._embedding_llm_config_dict if config_dict: return config_dict # Fallback to default if no config provided return { 'provider': 'openai/text-embedding-3-small', 'api_token': os.getenv('OPENAI_API_KEY') } async def _get_embeddings(self, texts: List[str]) -> Any: """Get embeddings using configured method""" from .utils import get_text_embeddings embedding_llm_config = self._get_embedding_llm_config_dict() return await get_text_embeddings( texts, embedding_llm_config, self.embedding_model ) def _compute_distance_matrix(self, query_embeddings: Any, kb_embeddings: Any) -> Any: """Compute distance matrix using vectorized operations""" if kb_embeddings is None or len(kb_embeddings) == 0: return None # Ensure proper shapes if len(query_embeddings.shape) == 1: query_embeddings = query_embeddings.reshape(1, -1) if len(kb_embeddings.shape) == 1: kb_embeddings = kb_embeddings.reshape(1, -1) # Vectorized cosine distance: 1 - cosine_similarity # Normalize vectors query_norm = query_embeddings / np.linalg.norm(query_embeddings, axis=1, keepdims=True) kb_norm = kb_embeddings / np.linalg.norm(kb_embeddings, axis=1, keepdims=True) # Compute cosine similarity matrix similarity_matrix = np.dot(query_norm, kb_norm.T) # Convert to distance distance_matrix = 1 - similarity_matrix return distance_matrix def _get_cached_distance_matrix(self, query_embeddings: Any, kb_embeddings: Any) -> Any: """Get distance matrix with caching""" if kb_embeddings is None or len(kb_embeddings) == 0: return None # Check if KB has changed kb_hash = hash(kb_embeddings.tobytes()) if kb_embeddings is not None else None if (self._distance_matrix_cache is None or kb_hash != self._kb_embeddings_hash): # Recompute matrix self._distance_matrix_cache = self._compute_distance_matrix(query_embeddings, kb_embeddings) self._kb_embeddings_hash = kb_hash return self._distance_matrix_cache async def map_query_semantic_space(self, query: str, n_synthetic: int = 10) -> Any: """Generate a point cloud representing the semantic neighborhood of the query""" from .utils import perform_completion_with_backoff # Generate more variations than needed for train/val split n_total = int(n_synthetic * 1.3) # Generate 30% more for validation # Generate variations using LLM prompt = f"""Generate {n_total} variations of this query that explore different aspects: '{query}' These should be queries a user might ask when looking for similar information. Include different phrasings, related concepts, and specific aspects. Return as a JSON array of strings.""" # Use the LLM for query generation # Convert LLMConfig to dict if needed llm_config_dict = None if self.llm_config: if isinstance(self.llm_config, dict): llm_config_dict = self.llm_config else: # Convert LLMConfig object to dict llm_config_dict = { 'provider': self.llm_config.provider, 'api_token': self.llm_config.api_token } provider = llm_config_dict.get('provider', 'openai/gpt-4o-mini') if llm_config_dict else 'openai/gpt-4o-mini' api_token = llm_config_dict.get('api_token') if llm_config_dict else None response = perform_completion_with_backoff( provider=provider, prompt_with_variables=prompt, api_token=api_token, json_response=True ) variations = json.loads(response.choices[0].message.content) # # Mock data with more variations for split
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
true
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/browser_adapter.py
crawl4ai/browser_adapter.py
# browser_adapter.py """ Browser adapter for Crawl4AI to support both Playwright and undetected browsers with minimal changes to existing codebase. """ from abc import ABC, abstractmethod from typing import List, Dict, Any, Optional, Callable import time import json # Import both, but use conditionally try: from playwright.async_api import Page except ImportError: Page = Any try: from patchright.async_api import Page as UndetectedPage except ImportError: UndetectedPage = Any class BrowserAdapter(ABC): """Abstract adapter for browser-specific operations""" @abstractmethod async def evaluate(self, page: Page, expression: str, arg: Any = None) -> Any: """Execute JavaScript in the page""" pass @abstractmethod async def setup_console_capture(self, page: Page, captured_console: List[Dict]) -> Optional[Callable]: """Setup console message capturing, returns handler function if needed""" pass @abstractmethod async def setup_error_capture(self, page: Page, captured_console: List[Dict]) -> Optional[Callable]: """Setup error capturing, returns handler function if needed""" pass @abstractmethod async def retrieve_console_messages(self, page: Page) -> List[Dict]: """Retrieve captured console messages (for undetected browsers)""" pass @abstractmethod async def cleanup_console_capture(self, page: Page, handle_console: Optional[Callable], handle_error: Optional[Callable]): """Clean up console event listeners""" pass @abstractmethod def get_imports(self) -> tuple: """Get the appropriate imports for this adapter""" pass class PlaywrightAdapter(BrowserAdapter): """Adapter for standard Playwright""" async def evaluate(self, page: Page, expression: str, arg: Any = None) -> Any: """Standard Playwright evaluate""" if arg is not None: return await page.evaluate(expression, arg) return await page.evaluate(expression) async def setup_console_capture(self, page: Page, captured_console: List[Dict]) -> Optional[Callable]: """Setup console capture using Playwright's event system""" def handle_console_capture(msg): try: message_type = "unknown" try: message_type = msg.type except: pass message_text = "unknown" try: message_text = msg.text except: pass entry = { "type": message_type, "text": message_text, "timestamp": time.time() } captured_console.append(entry) except Exception as e: captured_console.append({ "type": "console_capture_error", "error": str(e), "timestamp": time.time() }) page.on("console", handle_console_capture) return handle_console_capture async def setup_error_capture(self, page: Page, captured_console: List[Dict]) -> Optional[Callable]: """Setup error capture using Playwright's event system""" def handle_pageerror_capture(err): try: error_message = "Unknown error" try: error_message = err.message except: pass error_stack = "" try: error_stack = err.stack except: pass captured_console.append({ "type": "error", "text": error_message, "stack": error_stack, "timestamp": time.time() }) except Exception as e: captured_console.append({ "type": "pageerror_capture_error", "error": str(e), "timestamp": time.time() }) page.on("pageerror", handle_pageerror_capture) return handle_pageerror_capture async def retrieve_console_messages(self, page: Page) -> List[Dict]: """Not needed for Playwright - messages are captured via events""" return [] async def cleanup_console_capture(self, page: Page, handle_console: Optional[Callable], handle_error: Optional[Callable]): """Remove event listeners""" if handle_console: page.remove_listener("console", handle_console) if handle_error: page.remove_listener("pageerror", handle_error) def get_imports(self) -> tuple: """Return Playwright imports""" from playwright.async_api import Page, Error from playwright.async_api import TimeoutError as PlaywrightTimeoutError return Page, Error, PlaywrightTimeoutError class StealthAdapter(BrowserAdapter): """Adapter for Playwright with stealth features using playwright_stealth""" def __init__(self): self._console_script_injected = {} self._stealth_available = self._check_stealth_availability() def _check_stealth_availability(self) -> bool: """Check if playwright_stealth is available and get the correct function""" try: from playwright_stealth import stealth_async self._stealth_function = stealth_async return True except ImportError: try: from playwright_stealth import stealth_sync self._stealth_function = stealth_sync return True except ImportError: self._stealth_function = None return False async def apply_stealth(self, page: Page): """Apply stealth to a page if available""" if self._stealth_available and self._stealth_function: try: if hasattr(self._stealth_function, '__call__'): if 'async' in getattr(self._stealth_function, '__name__', ''): await self._stealth_function(page) else: self._stealth_function(page) except Exception as e: # Fail silently or log error depending on requirements pass async def evaluate(self, page: Page, expression: str, arg: Any = None) -> Any: """Standard Playwright evaluate with stealth applied""" if arg is not None: return await page.evaluate(expression, arg) return await page.evaluate(expression) async def setup_console_capture(self, page: Page, captured_console: List[Dict]) -> Optional[Callable]: """Setup console capture using Playwright's event system with stealth""" # Apply stealth to the page first await self.apply_stealth(page) def handle_console_capture(msg): try: message_type = "unknown" try: message_type = msg.type except: pass message_text = "unknown" try: message_text = msg.text except: pass entry = { "type": message_type, "text": message_text, "timestamp": time.time() } captured_console.append(entry) except Exception as e: captured_console.append({ "type": "console_capture_error", "error": str(e), "timestamp": time.time() }) page.on("console", handle_console_capture) return handle_console_capture async def setup_error_capture(self, page: Page, captured_console: List[Dict]) -> Optional[Callable]: """Setup error capture using Playwright's event system""" def handle_pageerror_capture(err): try: error_message = "Unknown error" try: error_message = err.message except: pass error_stack = "" try: error_stack = err.stack except: pass captured_console.append({ "type": "error", "text": error_message, "stack": error_stack, "timestamp": time.time() }) except Exception as e: captured_console.append({ "type": "pageerror_capture_error", "error": str(e), "timestamp": time.time() }) page.on("pageerror", handle_pageerror_capture) return handle_pageerror_capture async def retrieve_console_messages(self, page: Page) -> List[Dict]: """Not needed for Playwright - messages are captured via events""" return [] async def cleanup_console_capture(self, page: Page, handle_console: Optional[Callable], handle_error: Optional[Callable]): """Remove event listeners""" if handle_console: page.remove_listener("console", handle_console) if handle_error: page.remove_listener("pageerror", handle_error) def get_imports(self) -> tuple: """Return Playwright imports""" from playwright.async_api import Page, Error from playwright.async_api import TimeoutError as PlaywrightTimeoutError return Page, Error, PlaywrightTimeoutError class UndetectedAdapter(BrowserAdapter): """Adapter for undetected browser automation with stealth features""" def __init__(self): self._console_script_injected = {} async def evaluate(self, page: UndetectedPage, expression: str, arg: Any = None) -> Any: """Undetected browser evaluate with isolated context""" # For most evaluations, use isolated context for stealth # Only use non-isolated when we need to access our injected console capture isolated = not ( "__console" in expression or "__captured" in expression or "__error" in expression or "window.__" in expression ) if arg is not None: return await page.evaluate(expression, arg, isolated_context=isolated) return await page.evaluate(expression, isolated_context=isolated) async def setup_console_capture(self, page: UndetectedPage, captured_console: List[Dict]) -> Optional[Callable]: """Setup console capture using JavaScript injection for undetected browsers""" if not self._console_script_injected.get(page, False): await page.add_init_script(""" // Initialize console capture window.__capturedConsole = []; window.__capturedErrors = []; // Store original console methods const originalConsole = {}; ['log', 'info', 'warn', 'error', 'debug'].forEach(method => { originalConsole[method] = console[method]; console[method] = function(...args) { try { window.__capturedConsole.push({ type: method, text: args.map(arg => { try { if (typeof arg === 'object') { return JSON.stringify(arg); } return String(arg); } catch (e) { return '[Object]'; } }).join(' '), timestamp: Date.now() }); } catch (e) { // Fail silently to avoid detection } // Call original method originalConsole[method].apply(console, args); }; }); """) self._console_script_injected[page] = True return None # No handler function needed for undetected browser async def setup_error_capture(self, page: UndetectedPage, captured_console: List[Dict]) -> Optional[Callable]: """Setup error capture using JavaScript injection for undetected browsers""" if not self._console_script_injected.get(page, False): await page.add_init_script(""" // Capture errors window.addEventListener('error', (event) => { try { window.__capturedErrors.push({ type: 'error', text: event.message, stack: event.error ? event.error.stack : '', filename: event.filename, lineno: event.lineno, colno: event.colno, timestamp: Date.now() }); } catch (e) { // Fail silently } }); // Capture unhandled promise rejections window.addEventListener('unhandledrejection', (event) => { try { window.__capturedErrors.push({ type: 'unhandledrejection', text: event.reason ? String(event.reason) : 'Unhandled Promise Rejection', stack: event.reason && event.reason.stack ? event.reason.stack : '', timestamp: Date.now() }); } catch (e) { // Fail silently } }); """) self._console_script_injected[page] = True return None # No handler function needed for undetected browser async def retrieve_console_messages(self, page: UndetectedPage) -> List[Dict]: """Retrieve captured console messages and errors from the page""" messages = [] try: # Get console messages console_messages = await page.evaluate( "() => { const msgs = window.__capturedConsole || []; window.__capturedConsole = []; return msgs; }", isolated_context=False ) messages.extend(console_messages) # Get errors errors = await page.evaluate( "() => { const errs = window.__capturedErrors || []; window.__capturedErrors = []; return errs; }", isolated_context=False ) messages.extend(errors) # Convert timestamps from JS to Python format for msg in messages: if 'timestamp' in msg and isinstance(msg['timestamp'], (int, float)): msg['timestamp'] = msg['timestamp'] / 1000.0 # Convert from ms to seconds except Exception: # If retrieval fails, return empty list pass return messages async def cleanup_console_capture(self, page: UndetectedPage, handle_console: Optional[Callable], handle_error: Optional[Callable]): """Clean up for undetected browser - retrieve final messages""" # For undetected browser, we don't have event listeners to remove # but we should retrieve any final messages final_messages = await self.retrieve_console_messages(page) return final_messages def get_imports(self) -> tuple: """Return undetected browser imports""" from patchright.async_api import Page, Error from patchright.async_api import TimeoutError as PlaywrightTimeoutError return Page, Error, PlaywrightTimeoutError
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/proxy_strategy.py
crawl4ai/proxy_strategy.py
from typing import List, Dict, Optional from abc import ABC, abstractmethod from itertools import cycle import os ########### ATTENTION PEOPLE OF EARTH ########### # I have moved this config to async_configs.py, kept it here, in case someone still importing it, however # be a dear and follow `from crawl4ai import ProxyConfig` instead :) class ProxyConfig: def __init__( self, server: str, username: Optional[str] = None, password: Optional[str] = None, ip: Optional[str] = None, ): """Configuration class for a single proxy. Args: server: Proxy server URL (e.g., "http://127.0.0.1:8080") username: Optional username for proxy authentication password: Optional password for proxy authentication ip: Optional IP address for verification purposes """ self.server = server self.username = username self.password = password # Extract IP from server if not explicitly provided self.ip = ip or self._extract_ip_from_server() def _extract_ip_from_server(self) -> Optional[str]: """Extract IP address from server URL.""" try: # Simple extraction assuming http://ip:port format if "://" in self.server: parts = self.server.split("://")[1].split(":") return parts[0] else: parts = self.server.split(":") return parts[0] except Exception: return None @staticmethod def from_string(proxy_str: str) -> "ProxyConfig": """Create a ProxyConfig from a string in the format 'ip:port:username:password'.""" parts = proxy_str.split(":") if len(parts) == 4: # ip:port:username:password ip, port, username, password = parts return ProxyConfig( server=f"http://{ip}:{port}", username=username, password=password, ip=ip ) elif len(parts) == 2: # ip:port only ip, port = parts return ProxyConfig( server=f"http://{ip}:{port}", ip=ip ) else: raise ValueError(f"Invalid proxy string format: {proxy_str}") @staticmethod def from_dict(proxy_dict: Dict) -> "ProxyConfig": """Create a ProxyConfig from a dictionary.""" return ProxyConfig( server=proxy_dict.get("server"), username=proxy_dict.get("username"), password=proxy_dict.get("password"), ip=proxy_dict.get("ip") ) @staticmethod def from_env(env_var: str = "PROXIES") -> List["ProxyConfig"]: """Load proxies from environment variable. Args: env_var: Name of environment variable containing comma-separated proxy strings Returns: List of ProxyConfig objects """ proxies = [] try: proxy_list = os.getenv(env_var, "").split(",") for proxy in proxy_list: if not proxy: continue proxies.append(ProxyConfig.from_string(proxy)) except Exception as e: print(f"Error loading proxies from environment: {e}") return proxies def to_dict(self) -> Dict: """Convert to dictionary representation.""" return { "server": self.server, "username": self.username, "password": self.password, "ip": self.ip } def clone(self, **kwargs) -> "ProxyConfig": """Create a copy of this configuration with updated values. Args: **kwargs: Key-value pairs of configuration options to update Returns: ProxyConfig: A new instance with the specified updates """ config_dict = self.to_dict() config_dict.update(kwargs) return ProxyConfig.from_dict(config_dict) class ProxyRotationStrategy(ABC): """Base abstract class for proxy rotation strategies""" @abstractmethod async def get_next_proxy(self) -> Optional[ProxyConfig]: """Get next proxy configuration from the strategy""" pass @abstractmethod def add_proxies(self, proxies: List[ProxyConfig]): """Add proxy configurations to the strategy""" pass class RoundRobinProxyStrategy: """Simple round-robin proxy rotation strategy using ProxyConfig objects""" def __init__(self, proxies: List[ProxyConfig] = None): """ Initialize with optional list of proxy configurations Args: proxies: List of ProxyConfig objects """ self._proxies = [] self._proxy_cycle = None if proxies: self.add_proxies(proxies) def add_proxies(self, proxies: List[ProxyConfig]): """Add new proxies to the rotation pool""" self._proxies.extend(proxies) self._proxy_cycle = cycle(self._proxies) async def get_next_proxy(self) -> Optional[ProxyConfig]: """Get next proxy in round-robin fashion""" if not self._proxy_cycle: return None return next(self._proxy_cycle)
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/install.py
crawl4ai/install.py
import subprocess import sys import asyncio from .async_logger import AsyncLogger, LogLevel from pathlib import Path import os import shutil # Initialize logger logger = AsyncLogger(log_level=LogLevel.DEBUG, verbose=True) def setup_home_directory(): """Set up the .crawl4ai folder structure in the user's home directory.""" base_dir = os.getenv("CRAWL4_AI_BASE_DIRECTORY") crawl4ai_folder = Path(base_dir) if base_dir else Path.home() crawl4ai_config = crawl4ai_folder / "global.yml" crawl4ai_folder = crawl4ai_folder / ".crawl4ai" cache_folder = crawl4ai_folder / "cache" content_folders = [ "html_content", "cleaned_html", "markdown_content", "extracted_content", "screenshots", ] # Clean up old cache if exists if cache_folder.exists(): shutil.rmtree(cache_folder) # Create new folder structure crawl4ai_folder.mkdir(exist_ok=True) cache_folder.mkdir(exist_ok=True) for folder in content_folders: (crawl4ai_folder / folder).mkdir(exist_ok=True) # If config file does not exist, create it if not crawl4ai_config.exists(): with open(crawl4ai_config, "w") as f: f.write("") def post_install(): """ Run all post-installation tasks. Checks CRAWL4AI_MODE environment variable. If set to 'api', skips Playwright browser installation. """ logger.info("Running post-installation setup...", tag="INIT") setup_home_directory() # Check environment variable to conditionally skip Playwright install run_mode = os.getenv('CRAWL4AI_MODE') if run_mode == 'api': logger.warning( "CRAWL4AI_MODE=api detected. Skipping Playwright browser installation.", tag="SETUP" ) else: # Proceed with installation only if mode is not 'api' install_playwright() run_migration() # TODO: Will be added in the future # setup_builtin_browser() logger.success("Post-installation setup completed!", tag="COMPLETE") def setup_builtin_browser(): """Set up a builtin browser for use with Crawl4AI""" try: logger.info("Setting up builtin browser...", tag="INIT") asyncio.run(_setup_builtin_browser()) logger.success("Builtin browser setup completed!", tag="COMPLETE") except Exception as e: logger.warning(f"Failed to set up builtin browser: {e}") logger.warning("You can manually set up a builtin browser using 'crawl4ai-doctor builtin-browser-start'") async def _setup_builtin_browser(): try: # Import BrowserProfiler here to avoid circular imports from .browser_profiler import BrowserProfiler profiler = BrowserProfiler(logger=logger) # Launch the builtin browser cdp_url = await profiler.launch_builtin_browser(headless=True) if cdp_url: logger.success(f"Builtin browser launched at {cdp_url}", tag="BROWSER") else: logger.warning("Failed to launch builtin browser", tag="BROWSER") except Exception as e: logger.warning(f"Error setting up builtin browser: {e}", tag="BROWSER") raise def install_playwright(): logger.info("Installing Playwright browsers...", tag="INIT") try: # subprocess.check_call([sys.executable, "-m", "playwright", "install", "--with-deps", "--force", "chrome"]) subprocess.check_call( [ sys.executable, "-m", "playwright", "install", "--with-deps", "--force", "chromium", ] ) logger.success( "Playwright installation completed successfully.", tag="COMPLETE" ) except subprocess.CalledProcessError: # logger.error(f"Error during Playwright installation: {e}", tag="ERROR") logger.warning( f"Please run '{sys.executable} -m playwright install --with-deps' manually after the installation." ) except Exception: # logger.error(f"Unexpected error during Playwright installation: {e}", tag="ERROR") logger.warning( f"Please run '{sys.executable} -m playwright install --with-deps' manually after the installation." ) # Install Patchright browsers for undetected browser support logger.info("Installing Patchright browsers for undetected mode...", tag="INIT") try: subprocess.check_call( [ sys.executable, "-m", "patchright", "install", "--with-deps", "--force", "chromium", ] ) logger.success( "Patchright installation completed successfully.", tag="COMPLETE" ) except subprocess.CalledProcessError: logger.warning( f"Please run '{sys.executable} -m patchright install --with-deps' manually after the installation." ) except Exception: logger.warning( f"Please run '{sys.executable} -m patchright install --with-deps' manually after the installation." ) def run_migration(): """Initialize database during installation""" try: logger.info("Starting database initialization...", tag="INIT") from crawl4ai.async_database import async_db_manager asyncio.run(async_db_manager.initialize()) logger.success( "Database initialization completed successfully.", tag="COMPLETE" ) except ImportError: logger.warning("Database module not found. Will initialize on first use.") except Exception as e: logger.warning(f"Database initialization failed: {e}") logger.warning("Database will be initialized on first use") async def run_doctor(): """Test if Crawl4AI is working properly""" logger.info("Running Crawl4AI health check...", tag="INIT") try: from .async_webcrawler import ( AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode, ) browser_config = BrowserConfig( headless=True, browser_type="chromium", ignore_https_errors=True, light_mode=True, viewport_width=1280, viewport_height=720, ) run_config = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, screenshot=True, ) async with AsyncWebCrawler(config=browser_config) as crawler: logger.info("Testing crawling capabilities...", tag="TEST") result = await crawler.arun(url="https://crawl4ai.com", config=run_config) if result and result.markdown: logger.success("βœ… Crawling test passed!", tag="COMPLETE") return True else: raise Exception("Failed to get content") except Exception as e: logger.error(f"❌ Test failed: {e}", tag="ERROR") return False def doctor(): """Entry point for the doctor command""" import asyncio asyncio.run(run_doctor()) sys.exit(0)
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/markdown_generation_strategy.py
crawl4ai/markdown_generation_strategy.py
from abc import ABC, abstractmethod from typing import Optional, Dict, Any, Tuple from .models import MarkdownGenerationResult from .html2text import CustomHTML2Text # from .types import RelevantContentFilter from .content_filter_strategy import RelevantContentFilter import re from urllib.parse import urljoin # Pre-compile the regex pattern LINK_PATTERN = re.compile(r'!?\[([^\]]+)\]\(([^)]+?)(?:\s+"([^"]*)")?\)') def fast_urljoin(base: str, url: str) -> str: """Fast URL joining for common cases.""" if url.startswith(("http://", "https://", "mailto:", "//")): return url if url.startswith("/"): # Handle absolute paths if base.endswith("/"): return base[:-1] + url return base + url return urljoin(base, url) class MarkdownGenerationStrategy(ABC): """Abstract base class for markdown generation strategies.""" def __init__( self, content_filter: Optional[RelevantContentFilter] = None, options: Optional[Dict[str, Any]] = None, verbose: bool = False, content_source: str = "cleaned_html", ): self.content_filter = content_filter self.options = options or {} self.verbose = verbose self.content_source = content_source @abstractmethod def generate_markdown( self, input_html: str, base_url: str = "", html2text_options: Optional[Dict[str, Any]] = None, content_filter: Optional[RelevantContentFilter] = None, citations: bool = True, **kwargs, ) -> MarkdownGenerationResult: """Generate markdown from the selected input HTML.""" pass class DefaultMarkdownGenerator(MarkdownGenerationStrategy): """ Default implementation of markdown generation strategy. How it works: 1. Generate raw markdown from cleaned HTML. 2. Convert links to citations. 3. Generate fit markdown if content filter is provided. 4. Return MarkdownGenerationResult. Args: content_filter (Optional[RelevantContentFilter]): Content filter for generating fit markdown. options (Optional[Dict[str, Any]]): Additional options for markdown generation. Defaults to None. content_source (str): Source of content to generate markdown from. Options: "cleaned_html", "raw_html", "fit_html". Defaults to "cleaned_html". Returns: MarkdownGenerationResult: Result containing raw markdown, fit markdown, fit HTML, and references markdown. """ def __init__( self, content_filter: Optional[RelevantContentFilter] = None, options: Optional[Dict[str, Any]] = None, content_source: str = "cleaned_html", ): super().__init__(content_filter, options, verbose=False, content_source=content_source) def convert_links_to_citations( self, markdown: str, base_url: str = "" ) -> Tuple[str, str]: """ Convert links in markdown to citations. How it works: 1. Find all links in the markdown. 2. Convert links to citations. 3. Return converted markdown and references markdown. Note: This function uses a regex pattern to find links in markdown. Args: markdown (str): Markdown text. base_url (str): Base URL for URL joins. Returns: Tuple[str, str]: Converted markdown and references markdown. """ link_map = {} url_cache = {} # Cache for URL joins parts = [] last_end = 0 counter = 1 for match in LINK_PATTERN.finditer(markdown): parts.append(markdown[last_end : match.start()]) text, url, title = match.groups() # Use cached URL if available, otherwise compute and cache if base_url and not url.startswith(("http://", "https://", "mailto:")): if url not in url_cache: url_cache[url] = fast_urljoin(base_url, url) url = url_cache[url] if url not in link_map: desc = [] if title: desc.append(title) if text and text != title: desc.append(text) link_map[url] = (counter, ": " + " - ".join(desc) if desc else "") counter += 1 num = link_map[url][0] parts.append( f"{text}⟨{num}⟩" if not match.group(0).startswith("!") else f"![{text}⟨{num}⟩]" ) last_end = match.end() parts.append(markdown[last_end:]) converted_text = "".join(parts) # Pre-build reference strings references = ["\n\n## References\n\n"] references.extend( f"⟨{num}⟩ {url}{desc}\n" for url, (num, desc) in sorted(link_map.items(), key=lambda x: x[1][0]) ) return converted_text, "".join(references) def generate_markdown( self, input_html: str, base_url: str = "", html2text_options: Optional[Dict[str, Any]] = None, options: Optional[Dict[str, Any]] = None, content_filter: Optional[RelevantContentFilter] = None, citations: bool = True, **kwargs, ) -> MarkdownGenerationResult: """ Generate markdown with citations from the provided input HTML. How it works: 1. Generate raw markdown from the input HTML. 2. Convert links to citations. 3. Generate fit markdown if content filter is provided. 4. Return MarkdownGenerationResult. Args: input_html (str): The HTML content to process (selected based on content_source). base_url (str): Base URL for URL joins. html2text_options (Optional[Dict[str, Any]]): HTML2Text options. options (Optional[Dict[str, Any]]): Additional options for markdown generation. content_filter (Optional[RelevantContentFilter]): Content filter for generating fit markdown. citations (bool): Whether to generate citations. Returns: MarkdownGenerationResult: Result containing raw markdown, fit markdown, fit HTML, and references markdown. """ try: # Initialize HTML2Text with default options for better conversion h = CustomHTML2Text(baseurl=base_url) default_options = { "body_width": 0, # Disable text wrapping "ignore_emphasis": False, "ignore_links": False, "ignore_images": False, "protect_links": False, "single_line_break": True, "mark_code": True, "escape_snob": False, } # Update with custom options if provided if html2text_options: default_options.update(html2text_options) elif options: default_options.update(options) elif self.options: default_options.update(self.options) h.update_params(**default_options) # Ensure we have valid input if not input_html: input_html = "" elif not isinstance(input_html, str): input_html = str(input_html) # Generate raw markdown try: raw_markdown = h.handle(input_html) except Exception as e: raw_markdown = f"Error converting HTML to markdown: {str(e)}" raw_markdown = raw_markdown.replace(" ```", "```") # Convert links to citations markdown_with_citations: str = raw_markdown references_markdown: str = "" if citations: try: ( markdown_with_citations, references_markdown, ) = self.convert_links_to_citations(raw_markdown, base_url) except Exception as e: markdown_with_citations = raw_markdown references_markdown = f"Error generating citations: {str(e)}" # Generate fit markdown if content filter is provided fit_markdown: Optional[str] = "" filtered_html: Optional[str] = "" if content_filter or self.content_filter: try: content_filter = content_filter or self.content_filter filtered_html = content_filter.filter_content(input_html) filtered_html = "\n".join( "<div>{}</div>".format(s) for s in filtered_html ) fit_markdown = h.handle(filtered_html) except Exception as e: fit_markdown = f"Error generating fit markdown: {str(e)}" filtered_html = "" return MarkdownGenerationResult( raw_markdown=raw_markdown or "", markdown_with_citations=markdown_with_citations or "", references_markdown=references_markdown or "", fit_markdown=fit_markdown or "", fit_html=filtered_html or "", ) except Exception as e: # If anything fails, return empty strings with error message error_msg = f"Error in markdown generation: {str(e)}" return MarkdownGenerationResult( raw_markdown=error_msg, markdown_with_citations=error_msg, references_markdown="", fit_markdown="", fit_html="", )
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/__version__.py
crawl4ai/__version__.py
# crawl4ai/__version__.py # This is the version that will be used for stable releases __version__ = "0.7.8" # For nightly builds, this gets set during build process __nightly_version__ = None
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/cache_context.py
crawl4ai/cache_context.py
from enum import Enum class CacheMode(Enum): """ Defines the caching behavior for web crawling operations. Modes: - ENABLED: Normal caching behavior (read and write) - DISABLED: No caching at all - READ_ONLY: Only read from cache, don't write - WRITE_ONLY: Only write to cache, don't read - BYPASS: Bypass cache for this operation """ ENABLED = "enabled" DISABLED = "disabled" READ_ONLY = "read_only" WRITE_ONLY = "write_only" BYPASS = "bypass" class CacheContext: """ Encapsulates cache-related decisions and URL handling. This class centralizes all cache-related logic and URL type checking, making the caching behavior more predictable and maintainable. Attributes: url (str): The URL being processed. cache_mode (CacheMode): The cache mode for the current operation. always_bypass (bool): If True, bypasses caching for this operation. is_cacheable (bool): True if the URL is cacheable, False otherwise. is_web_url (bool): True if the URL is a web URL, False otherwise. is_local_file (bool): True if the URL is a local file, False otherwise. is_raw_html (bool): True if the URL is raw HTML, False otherwise. _url_display (str): The display name for the URL (web, local file, or raw HTML). """ def __init__(self, url: str, cache_mode: CacheMode, always_bypass: bool = False): """ Initializes the CacheContext with the provided URL and cache mode. Args: url (str): The URL being processed. cache_mode (CacheMode): The cache mode for the current operation. always_bypass (bool): If True, bypasses caching for this operation. """ self.url = url self.cache_mode = cache_mode self.always_bypass = always_bypass self.is_cacheable = url.startswith(("http://", "https://", "file://")) self.is_web_url = url.startswith(("http://", "https://")) self.is_local_file = url.startswith("file://") self.is_raw_html = url.startswith("raw:") self._url_display = url if not self.is_raw_html else "Raw HTML" def should_read(self) -> bool: """ Determines if cache should be read based on context. How it works: 1. If always_bypass is True or is_cacheable is False, return False. 2. If cache_mode is ENABLED or READ_ONLY, return True. Returns: bool: True if cache should be read, False otherwise. """ if self.always_bypass or not self.is_cacheable: return False return self.cache_mode in [CacheMode.ENABLED, CacheMode.READ_ONLY] def should_write(self) -> bool: """ Determines if cache should be written based on context. How it works: 1. If always_bypass is True or is_cacheable is False, return False. 2. If cache_mode is ENABLED or WRITE_ONLY, return True. Returns: bool: True if cache should be written, False otherwise. """ if self.always_bypass or not self.is_cacheable: return False return self.cache_mode in [CacheMode.ENABLED, CacheMode.WRITE_ONLY] @property def display_url(self) -> str: """Returns the URL in display format.""" return self._url_display def _legacy_to_cache_mode( disable_cache: bool = False, bypass_cache: bool = False, no_cache_read: bool = False, no_cache_write: bool = False, ) -> CacheMode: """ Converts legacy cache parameters to the new CacheMode enum. This is an internal function to help transition from the old boolean flags to the new CacheMode system. """ if disable_cache: return CacheMode.DISABLED if bypass_cache: return CacheMode.BYPASS if no_cache_read and no_cache_write: return CacheMode.DISABLED if no_cache_read: return CacheMode.WRITE_ONLY if no_cache_write: return CacheMode.READ_ONLY return CacheMode.ENABLED
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/extraction_strategy.py
crawl4ai/extraction_strategy.py
from abc import ABC, abstractmethod import inspect from typing import Any, List, Dict, Optional, Tuple, Pattern, Union from concurrent.futures import ThreadPoolExecutor, as_completed import json import time from enum import IntFlag, auto from .prompts import PROMPT_EXTRACT_BLOCKS, PROMPT_EXTRACT_BLOCKS_WITH_INSTRUCTION, PROMPT_EXTRACT_SCHEMA_WITH_INSTRUCTION, JSON_SCHEMA_BUILDER_XPATH, PROMPT_EXTRACT_INFERRED_SCHEMA from .config import ( DEFAULT_PROVIDER, DEFAULT_PROVIDER_API_KEY, CHUNK_TOKEN_THRESHOLD, OVERLAP_RATE, WORD_TOKEN_RATE, ) from .utils import * # noqa: F403 from .utils import ( sanitize_html, escape_json_string, perform_completion_with_backoff, extract_xml_data, split_and_parse_json_objects, sanitize_input_encode, merge_chunks, ) from .models import * # noqa: F403 from .models import TokenUsage from .model_loader import * # noqa: F403 from .model_loader import ( get_device, load_HF_embedding_model, load_text_multilabel_classifier, calculate_batch_size ) from .types import LLMConfig, create_llm_config from functools import partial import numpy as np import re from bs4 import BeautifulSoup from lxml import html, etree class ExtractionStrategy(ABC): """ Abstract base class for all extraction strategies. """ def __init__(self, input_format: str = "markdown", **kwargs): """ Initialize the extraction strategy. Args: input_format: Content format to use for extraction. Options: "markdown" (default), "html", "fit_markdown" **kwargs: Additional keyword arguments """ self.input_format = input_format self.DEL = "<|DEL|>" self.name = self.__class__.__name__ self.verbose = kwargs.get("verbose", False) @abstractmethod def extract(self, url: str, html: str, *q, **kwargs) -> List[Dict[str, Any]]: """ Extract meaningful blocks or chunks from the given HTML. :param url: The URL of the webpage. :param html: The HTML content of the webpage. :return: A list of extracted blocks or chunks. """ pass def run(self, url: str, sections: List[str], *q, **kwargs) -> List[Dict[str, Any]]: """ Process sections of text in parallel by default. :param url: The URL of the webpage. :param sections: List of sections (strings) to process. :return: A list of processed JSON blocks. """ extracted_content = [] with ThreadPoolExecutor() as executor: futures = [ executor.submit(self.extract, url, section, **kwargs) for section in sections ] for future in as_completed(futures): extracted_content.extend(future.result()) return extracted_content async def arun(self, url: str, sections: List[str], *q, **kwargs) -> List[Dict[str, Any]]: """ Async version: Process sections of text in parallel using asyncio. Default implementation runs the sync version in a thread pool. Subclasses can override this for true async processing. :param url: The URL of the webpage. :param sections: List of sections (strings) to process. :return: A list of processed JSON blocks. """ import asyncio return await asyncio.to_thread(self.run, url, sections, *q, **kwargs) class NoExtractionStrategy(ExtractionStrategy): """ A strategy that does not extract any meaningful content from the HTML. It simply returns the entire HTML as a single block. """ def extract(self, url: str, html: str, *q, **kwargs) -> List[Dict[str, Any]]: """ Extract meaningful blocks or chunks from the given HTML. """ return [{"index": 0, "content": html}] def run(self, url: str, sections: List[str], *q, **kwargs) -> List[Dict[str, Any]]: return [ {"index": i, "tags": [], "content": section} for i, section in enumerate(sections) ] ####################################################### # Strategies using clustering for text data extraction # ####################################################### class CosineStrategy(ExtractionStrategy): """ Extract meaningful blocks or chunks from the given HTML using cosine similarity. How it works: 1. Pre-filter documents using embeddings and semantic_filter. 2. Perform clustering using cosine similarity. 3. Organize texts by their cluster labels, retaining order. 4. Filter clusters by word count. 5. Extract meaningful blocks or chunks from the filtered clusters. Attributes: semantic_filter (str): A keyword filter for document filtering. word_count_threshold (int): Minimum number of words per cluster. max_dist (float): The maximum cophenetic distance on the dendrogram to form clusters. linkage_method (str): The linkage method for hierarchical clustering. top_k (int): Number of top categories to extract. model_name (str): The name of the sentence-transformers model. sim_threshold (float): The similarity threshold for clustering. """ def __init__( self, semantic_filter=None, word_count_threshold=10, max_dist=0.2, linkage_method="ward", top_k=3, model_name="sentence-transformers/all-MiniLM-L6-v2", sim_threshold=0.3, **kwargs, ): """ Initialize the strategy with clustering parameters. Args: semantic_filter (str): A keyword filter for document filtering. word_count_threshold (int): Minimum number of words per cluster. max_dist (float): The maximum cophenetic distance on the dendrogram to form clusters. linkage_method (str): The linkage method for hierarchical clustering. top_k (int): Number of top categories to extract. """ super().__init__(**kwargs) import numpy as np self.semantic_filter = semantic_filter self.word_count_threshold = word_count_threshold self.max_dist = max_dist self.linkage_method = linkage_method self.top_k = top_k self.sim_threshold = sim_threshold self.timer = time.time() self.verbose = kwargs.get("verbose", False) self.buffer_embeddings = np.array([]) self.get_embedding_method = "direct" self.device = get_device() # import torch # self.device = torch.device('cpu') self.default_batch_size = calculate_batch_size(self.device) if self.verbose: print(f"[LOG] Loading Extraction Model for {self.device.type} device.") # if False and self.device.type == "cpu": # self.model = load_onnx_all_MiniLM_l6_v2() # self.tokenizer = self.model.tokenizer # self.get_embedding_method = "direct" # else: self.tokenizer, self.model = load_HF_embedding_model(model_name) self.model.to(self.device) self.model.eval() self.get_embedding_method = "batch" self.buffer_embeddings = np.array([]) # if model_name == "bert-base-uncased": # self.tokenizer, self.model = load_bert_base_uncased() # self.model.eval() # Ensure the model is in evaluation mode # self.get_embedding_method = "batch" # elif model_name == "BAAI/bge-small-en-v1.5": # self.tokenizer, self.model = load_bge_small_en_v1_5() # self.model.eval() # Ensure the model is in evaluation mode # self.get_embedding_method = "batch" # elif model_name == "sentence-transformers/all-MiniLM-L6-v2": # self.model = load_onnx_all_MiniLM_l6_v2() # self.tokenizer = self.model.tokenizer # self.get_embedding_method = "direct" if self.verbose: print(f"[LOG] Loading Multilabel Classifier for {self.device.type} device.") self.nlp, _ = load_text_multilabel_classifier() # self.default_batch_size = 16 if self.device.type == 'cpu' else 64 if self.verbose: print( f"[LOG] Model loaded {model_name}, models/reuters, took " + str(time.time() - self.timer) + " seconds" ) def filter_documents_embeddings( self, documents: List[str], semantic_filter: str, at_least_k: int = 20 ) -> List[str]: """ Filter and sort documents based on the cosine similarity of their embeddings with the semantic_filter embedding. Args: documents (List[str]): A list of document texts. semantic_filter (str): A keyword filter for document filtering. at_least_k (int): The minimum number of documents to return. Returns: List[str]: A list of filtered and sorted document texts. """ if not semantic_filter: return documents if len(documents) < at_least_k: at_least_k = len(documents) // 2 from sklearn.metrics.pairwise import cosine_similarity # Compute embedding for the keyword filter query_embedding = self.get_embeddings([semantic_filter])[0] # Compute embeddings for the documents document_embeddings = self.get_embeddings(documents) # Calculate cosine similarity between the query embedding and document embeddings similarities = cosine_similarity( [query_embedding], document_embeddings ).flatten() # Filter documents based on the similarity threshold filtered_docs = [ (doc, sim) for doc, sim in zip(documents, similarities) if sim >= self.sim_threshold ] # If the number of filtered documents is less than at_least_k, sort remaining documents by similarity if len(filtered_docs) < at_least_k: remaining_docs = [ (doc, sim) for doc, sim in zip(documents, similarities) if sim < self.sim_threshold ] remaining_docs.sort(key=lambda x: x[1], reverse=True) filtered_docs.extend(remaining_docs[: at_least_k - len(filtered_docs)]) # Extract the document texts from the tuples filtered_docs = [doc for doc, _ in filtered_docs] return filtered_docs[:at_least_k] def get_embeddings( self, sentences: List[str], batch_size=None, bypass_buffer=False ): """ Get BERT embeddings for a list of sentences. Args: sentences (List[str]): A list of text chunks (sentences). Returns: NumPy array of embeddings. """ # if self.buffer_embeddings.any() and not bypass_buffer: # return self.buffer_embeddings if self.device.type in ["cpu", "gpu", "cuda", "mps"]: import torch # Tokenize sentences and convert to tensor if batch_size is None: batch_size = self.default_batch_size all_embeddings = [] for i in range(0, len(sentences), batch_size): batch_sentences = sentences[i : i + batch_size] encoded_input = self.tokenizer( batch_sentences, padding=True, truncation=True, return_tensors="pt" ) encoded_input = { key: tensor.to(self.device) for key, tensor in encoded_input.items() } # Ensure no gradients are calculated with torch.no_grad(): model_output = self.model(**encoded_input) # Get embeddings from the last hidden state (mean pooling) embeddings = model_output.last_hidden_state.mean(dim=1).cpu().numpy() all_embeddings.append(embeddings) self.buffer_embeddings = np.vstack(all_embeddings) elif self.device.type == "cpu": # self.buffer_embeddings = self.model(sentences) if batch_size is None: batch_size = self.default_batch_size all_embeddings = [] for i in range(0, len(sentences), batch_size): batch_sentences = sentences[i : i + batch_size] embeddings = self.model(batch_sentences) all_embeddings.append(embeddings) self.buffer_embeddings = np.vstack(all_embeddings) return self.buffer_embeddings def hierarchical_clustering(self, sentences: List[str], embeddings=None): """ Perform hierarchical clustering on sentences and return cluster labels. Args: sentences (List[str]): A list of text chunks (sentences). Returns: NumPy array of cluster labels. """ # Get embeddings from scipy.cluster.hierarchy import linkage, fcluster from scipy.spatial.distance import pdist self.timer = time.time() embeddings = self.get_embeddings(sentences, bypass_buffer=True) # print(f"[LOG] πŸš€ Embeddings computed in {time.time() - self.timer:.2f} seconds") # Compute pairwise cosine distances distance_matrix = pdist(embeddings, "cosine") # Perform agglomerative clustering respecting order linked = linkage(distance_matrix, method=self.linkage_method) # Form flat clusters labels = fcluster(linked, self.max_dist, criterion="distance") return labels def filter_clusters_by_word_count( self, clusters: Dict[int, List[str]] ) -> Dict[int, List[str]]: """ Filter clusters to remove those with a word count below the threshold. Args: clusters (Dict[int, List[str]]): Dictionary of clusters. Returns: Dict[int, List[str]]: Filtered dictionary of clusters. """ filtered_clusters = {} for cluster_id, texts in clusters.items(): # Concatenate texts for analysis full_text = " ".join(texts) # Count words word_count = len(full_text.split()) # Keep clusters with word count above the threshold if word_count >= self.word_count_threshold: filtered_clusters[cluster_id] = texts return filtered_clusters def extract(self, url: str, html: str, *q, **kwargs) -> List[Dict[str, Any]]: """ Extract clusters from HTML content using hierarchical clustering. Args: url (str): The URL of the webpage. html (str): The HTML content of the webpage. Returns: List[Dict[str, Any]]: A list of processed JSON blocks. """ # Assume `html` is a list of text chunks for this strategy t = time.time() text_chunks = html.split(self.DEL) # Split by lines or paragraphs as needed # Pre-filter documents using embeddings and semantic_filter text_chunks = self.filter_documents_embeddings( text_chunks, self.semantic_filter ) if not text_chunks: return [] # Perform clustering labels = self.hierarchical_clustering(text_chunks) # print(f"[LOG] πŸš€ Clustering done in {time.time() - t:.2f} seconds") # Organize texts by their cluster labels, retaining order t = time.time() clusters = {} for index, label in enumerate(labels): clusters.setdefault(label, []).append(text_chunks[index]) # Filter clusters by word count filtered_clusters = self.filter_clusters_by_word_count(clusters) # Convert filtered clusters to a sorted list of dictionaries cluster_list = [ {"index": int(idx), "tags": [], "content": " ".join(filtered_clusters[idx])} for idx in sorted(filtered_clusters) ] if self.verbose: print(f"[LOG] πŸš€ Assign tags using {self.device}") if self.device.type in ["gpu", "cuda", "mps", "cpu"]: labels = self.nlp([cluster["content"] for cluster in cluster_list]) for cluster, label in zip(cluster_list, labels): cluster["tags"] = label # elif self.device.type == "cpu": # # Process the text with the loaded model # texts = [cluster['content'] for cluster in cluster_list] # # Batch process texts # docs = self.nlp.pipe(texts, disable=["tagger", "parser", "ner", "lemmatizer"]) # for doc, cluster in zip(docs, cluster_list): # tok_k = self.top_k # top_categories = sorted(doc.cats.items(), key=lambda x: x[1], reverse=True)[:tok_k] # cluster['tags'] = [cat for cat, _ in top_categories] # for cluster in cluster_list: # doc = self.nlp(cluster['content']) # tok_k = self.top_k # top_categories = sorted(doc.cats.items(), key=lambda x: x[1], reverse=True)[:tok_k] # cluster['tags'] = [cat for cat, _ in top_categories] if self.verbose: print(f"[LOG] πŸš€ Categorization done in {time.time() - t:.2f} seconds") return cluster_list def run(self, url: str, sections: List[str], *q, **kwargs) -> List[Dict[str, Any]]: """ Process sections using hierarchical clustering. Args: url (str): The URL of the webpage. sections (List[str]): List of sections (strings) to process. Returns: """ # This strategy processes all sections together return self.extract(url, self.DEL.join(sections), **kwargs) ####################################################### # Strategies using LLM-based extraction for text data # ####################################################### class LLMExtractionStrategy(ExtractionStrategy): """ A strategy that uses an LLM to extract meaningful content from the HTML. Attributes: llm_config: The LLM configuration object. instruction: The instruction to use for the LLM model. schema: Pydantic model schema for structured data. extraction_type: "block" or "schema". chunk_token_threshold: Maximum tokens per chunk. overlap_rate: Overlap between chunks. word_token_rate: Word to token conversion rate. apply_chunking: Whether to apply chunking. verbose: Whether to print verbose output. usages: List of individual token usages. total_usage: Accumulated token usage. """ _UNWANTED_PROPS = { 'provider' : 'Instead, use llm_config=LLMConfig(provider="...")', 'api_token' : 'Instead, use llm_config=LlMConfig(api_token="...")', 'base_url' : 'Instead, use llm_config=LLMConfig(base_url="...")', 'api_base' : 'Instead, use llm_config=LLMConfig(base_url="...")', } def __init__( self, llm_config: 'LLMConfig' = None, instruction: str = None, schema: Dict = None, extraction_type="block", chunk_token_threshold=CHUNK_TOKEN_THRESHOLD, overlap_rate=OVERLAP_RATE, word_token_rate=WORD_TOKEN_RATE, apply_chunking=True, input_format: str = "markdown", force_json_response=False, verbose=False, # Deprecated arguments provider: str = DEFAULT_PROVIDER, api_token: Optional[str] = None, base_url: str = None, api_base: str = None, **kwargs, ): """ Initialize the strategy with clustering parameters. Args: llm_config: The LLM configuration object. instruction: The instruction to use for the LLM model. schema: Pydantic model schema for structured data. extraction_type: "block" or "schema". chunk_token_threshold: Maximum tokens per chunk. overlap_rate: Overlap between chunks. word_token_rate: Word to token conversion rate. apply_chunking: Whether to apply chunking. input_format: Content format to use for extraction. Options: "markdown" (default), "html", "fit_markdown" force_json_response: Whether to force a JSON response from the LLM. verbose: Whether to print verbose output. # Deprecated arguments, will be removed very soon provider: The provider to use for extraction. It follows the format <provider_name>/<model_name>, e.g., "ollama/llama3.3". api_token: The API token for the provider. base_url: The base URL for the API request. api_base: The base URL for the API request. extra_args: Additional arguments for the API request, such as temperature, max_tokens, etc. """ super().__init__( input_format=input_format, **kwargs) self.llm_config = llm_config if not self.llm_config: self.llm_config = create_llm_config( provider=DEFAULT_PROVIDER, api_token=os.environ.get(DEFAULT_PROVIDER_API_KEY), ) self.instruction = instruction self.extract_type = extraction_type self.schema = schema if schema: self.extract_type = "schema" self.force_json_response = force_json_response self.chunk_token_threshold = chunk_token_threshold or CHUNK_TOKEN_THRESHOLD self.overlap_rate = overlap_rate self.word_token_rate = word_token_rate self.apply_chunking = apply_chunking self.extra_args = kwargs.get("extra_args", {}) if not self.apply_chunking: self.chunk_token_threshold = 1e9 self.verbose = verbose self.usages = [] # Store individual usages self.total_usage = TokenUsage() # Accumulated usage self.provider = provider self.api_token = api_token self.base_url = base_url self.api_base = api_base def __setattr__(self, name, value): """Handle attribute setting.""" # TODO: Planning to set properties dynamically based on the __init__ signature sig = inspect.signature(self.__init__) all_params = sig.parameters # Dictionary of parameter names and their details if name in self._UNWANTED_PROPS and value is not all_params[name].default: raise AttributeError(f"Setting '{name}' is deprecated. {self._UNWANTED_PROPS[name]}") super().__setattr__(name, value) def extract(self, url: str, ix: int, html: str) -> List[Dict[str, Any]]: """ Extract meaningful blocks or chunks from the given HTML using an LLM. How it works: 1. Construct a prompt with variables. 2. Make a request to the LLM using the prompt. 3. Parse the response and extract blocks or chunks. Args: url: The URL of the webpage. ix: Index of the block. html: The HTML content of the webpage. Returns: A list of extracted blocks or chunks. """ if self.verbose: # print("[LOG] Extracting blocks from URL:", url) print(f"[LOG] Call LLM for {url} - block index: {ix}") variable_values = { "URL": url, "HTML": escape_json_string(sanitize_html(html)), } prompt_with_variables = PROMPT_EXTRACT_BLOCKS if self.instruction: variable_values["REQUEST"] = self.instruction prompt_with_variables = PROMPT_EXTRACT_BLOCKS_WITH_INSTRUCTION if self.extract_type == "schema" and self.schema: variable_values["SCHEMA"] = json.dumps(self.schema, indent=2) # if type of self.schema is dict else self.schema prompt_with_variables = PROMPT_EXTRACT_SCHEMA_WITH_INSTRUCTION if self.extract_type == "schema" and not self.schema: prompt_with_variables = PROMPT_EXTRACT_INFERRED_SCHEMA for variable in variable_values: prompt_with_variables = prompt_with_variables.replace( "{" + variable + "}", variable_values[variable] ) try: response = perform_completion_with_backoff( self.llm_config.provider, prompt_with_variables, self.llm_config.api_token, base_url=self.llm_config.base_url, json_response=self.force_json_response, extra_args=self.extra_args, base_delay=self.llm_config.backoff_base_delay, max_attempts=self.llm_config.backoff_max_attempts, exponential_factor=self.llm_config.backoff_exponential_factor ) # , json_response=self.extract_type == "schema") # Track usage usage = TokenUsage( completion_tokens=response.usage.completion_tokens, prompt_tokens=response.usage.prompt_tokens, total_tokens=response.usage.total_tokens, completion_tokens_details=response.usage.completion_tokens_details.__dict__ if response.usage.completion_tokens_details else {}, prompt_tokens_details=response.usage.prompt_tokens_details.__dict__ if response.usage.prompt_tokens_details else {}, ) self.usages.append(usage) # Update totals self.total_usage.completion_tokens += usage.completion_tokens self.total_usage.prompt_tokens += usage.prompt_tokens self.total_usage.total_tokens += usage.total_tokens try: content = response.choices[0].message.content blocks = None if self.force_json_response: blocks = json.loads(content) if isinstance(blocks, dict): # If it has only one key which calue is list then assign that to blocks, exampled: {"news": [..]} if len(blocks) == 1 and isinstance(list(blocks.values())[0], list): blocks = list(blocks.values())[0] else: # If it has only one key which value is not list then assign that to blocks, exampled: { "article_id": "1234", ... } blocks = [blocks] elif isinstance(blocks, list): # If it is a list then assign that to blocks blocks = blocks else: # blocks = extract_xml_data(["blocks"], response.choices[0].message.content)["blocks"] blocks = extract_xml_data(["blocks"], content)["blocks"] blocks = json.loads(blocks) for block in blocks: block["error"] = False except Exception: parsed, unparsed = split_and_parse_json_objects( response.choices[0].message.content ) blocks = parsed if unparsed: blocks.append( {"index": 0, "error": True, "tags": ["error"], "content": unparsed} ) if self.verbose: print( "[LOG] Extracted", len(blocks), "blocks from URL:", url, "block index:", ix, ) return blocks except Exception as e: if self.verbose: print(f"[LOG] Error in LLM extraction: {e}") # Add error information to extracted_content return [ { "index": ix, "error": True, "tags": ["error"], "content": str(e), } ] def _merge(self, documents, chunk_token_threshold, overlap) -> List[str]: """ Merge documents into sections based on chunk_token_threshold and overlap. """ sections = merge_chunks( docs = documents, target_size= chunk_token_threshold, overlap=overlap, word_token_ratio=self.word_token_rate ) return sections def run(self, url: str, sections: List[str]) -> List[Dict[str, Any]]: """ Process sections sequentially with a delay for rate limiting issues, specifically for LLMExtractionStrategy. Args: url: The URL of the webpage. sections: List of sections (strings) to process. Returns: A list of extracted blocks or chunks. """ merged_sections = self._merge( sections, self.chunk_token_threshold, overlap=int(self.chunk_token_threshold * self.overlap_rate), ) extracted_content = [] if self.llm_config.provider.startswith("groq/"): # Sequential processing with a delay for ix, section in enumerate(merged_sections): extract_func = partial(self.extract, url) extracted_content.extend( extract_func(ix, sanitize_input_encode(section)) ) time.sleep(0.5) # 500 ms delay between each processing else: # Parallel processing using ThreadPoolExecutor # extract_func = partial(self.extract, url) # for ix, section in enumerate(merged_sections): # extracted_content.append(extract_func(ix, section)) with ThreadPoolExecutor(max_workers=4) as executor: extract_func = partial(self.extract, url) futures = [ executor.submit(extract_func, ix, sanitize_input_encode(section)) for ix, section in enumerate(merged_sections) ] for future in as_completed(futures): try: extracted_content.extend(future.result()) except Exception as e: if self.verbose: print(f"Error in thread execution: {e}") # Add error information to extracted_content extracted_content.append( { "index": 0, "error": True, "tags": ["error"], "content": str(e), } ) return extracted_content async def aextract(self, url: str, ix: int, html: str) -> List[Dict[str, Any]]: """ Async version: Extract meaningful blocks or chunks from the given HTML using an LLM. How it works: 1. Construct a prompt with variables. 2. Make an async request to the LLM using the prompt. 3. Parse the response and extract blocks or chunks. Args: url: The URL of the webpage. ix: Index of the block. html: The HTML content of the webpage. Returns: A list of extracted blocks or chunks. """ from .utils import aperform_completion_with_backoff if self.verbose: print(f"[LOG] Call LLM for {url} - block index: {ix}") variable_values = { "URL": url, "HTML": escape_json_string(sanitize_html(html)), } prompt_with_variables = PROMPT_EXTRACT_BLOCKS if self.instruction: variable_values["REQUEST"] = self.instruction prompt_with_variables = PROMPT_EXTRACT_BLOCKS_WITH_INSTRUCTION if self.extract_type == "schema" and self.schema: variable_values["SCHEMA"] = json.dumps(self.schema, indent=2) prompt_with_variables = PROMPT_EXTRACT_SCHEMA_WITH_INSTRUCTION if self.extract_type == "schema" and not self.schema: prompt_with_variables = PROMPT_EXTRACT_INFERRED_SCHEMA for variable in variable_values: prompt_with_variables = prompt_with_variables.replace( "{" + variable + "}", variable_values[variable] ) try: response = await aperform_completion_with_backoff( self.llm_config.provider, prompt_with_variables, self.llm_config.api_token,
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
true
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/hub.py
crawl4ai/hub.py
# crawl4ai/hub.py from abc import ABC, abstractmethod from typing import Dict, Type, Union import logging import importlib from pathlib import Path import inspect logger = logging.getLogger(__name__) class BaseCrawler(ABC): def __init__(self): self.logger = logging.getLogger(self.__class__.__name__) @abstractmethod async def run(self, url: str = "", **kwargs) -> str: """ Implement this method to return JSON string. Must accept URL + arbitrary kwargs for flexibility. """ pass def __init_subclass__(cls, **kwargs): """Enforce interface validation on subclassing""" super().__init_subclass__(**kwargs) # Verify run method signature run_method = cls.run if not run_method.__code__.co_argcount >= 2: # self + url raise TypeError(f"{cls.__name__} must implement 'run(self, url: str, **kwargs)'") # Verify async nature if not inspect.iscoroutinefunction(run_method): raise TypeError(f"{cls.__name__}.run must be async") class CrawlerHub: _crawlers: Dict[str, Type[BaseCrawler]] = {} @classmethod def _discover_crawlers(cls): """Dynamically load crawlers from /crawlers in 3 lines""" base_path = Path(__file__).parent / "crawlers" for crawler_dir in base_path.iterdir(): if crawler_dir.is_dir(): try: module = importlib.import_module( f"crawl4ai.crawlers.{crawler_dir.name}.crawler" ) for attr in dir(module): cls._maybe_register_crawler( getattr(module, attr), crawler_dir.name ) except Exception as e: logger.warning(f"Failed {crawler_dir.name}: {str(e)}") @classmethod def _maybe_register_crawler(cls, obj, name: str): """Brilliant one-liner registration""" if isinstance(obj, type) and issubclass(obj, BaseCrawler) and obj != BaseCrawler: module = importlib.import_module(obj.__module__) obj.meta = getattr(module, "__meta__", {}) cls._crawlers[name] = obj @classmethod def get(cls, name: str) -> Union[Type[BaseCrawler], None]: if not cls._crawlers: cls._discover_crawlers() return cls._crawlers.get(name)
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/deep_crawling/dfs_strategy.py
crawl4ai/deep_crawling/dfs_strategy.py
# dfs_deep_crawl_strategy.py from typing import AsyncGenerator, Optional, Set, Dict, List, Tuple from ..models import CrawlResult from .bfs_strategy import BFSDeepCrawlStrategy # noqa from ..types import AsyncWebCrawler, CrawlerRunConfig from ..utils import normalize_url_for_deep_crawl class DFSDeepCrawlStrategy(BFSDeepCrawlStrategy): """ Depth-first deep crawling with familiar BFS rules. We reuse the same filters, scoring, and page limits from :class:`BFSDeepCrawlStrategy`, but walk the graph with a stack so we fully explore one branch before hopping to the next. DFS also keeps its own ``_dfs_seen`` set so we can drop duplicate links at discovery time without accidentally marking them as β€œalready crawled”. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._dfs_seen: Set[str] = set() def _reset_seen(self, start_url: str) -> None: """Start each crawl with a clean dedupe set seeded with the root URL.""" self._dfs_seen = {start_url} async def _arun_batch( self, start_url: str, crawler: AsyncWebCrawler, config: CrawlerRunConfig, ) -> List[CrawlResult]: """ Crawl level-by-level but emit results at the end. We keep a stack of ``(url, parent, depth)`` tuples, pop one at a time, and hand it to ``crawler.arun_many`` with deep crawling disabled so we remain in control of traversal. Every successful page bumps ``_pages_crawled`` and seeds new stack items discovered via :meth:`link_discovery`. """ visited: Set[str] = set() # Stack items: (url, parent_url, depth) stack: List[Tuple[str, Optional[str], int]] = [(start_url, None, 0)] depths: Dict[str, int] = {start_url: 0} results: List[CrawlResult] = [] self._reset_seen(start_url) while stack and not self._cancel_event.is_set(): url, parent, depth = stack.pop() if url in visited or depth > self.max_depth: continue visited.add(url) # Clone config to disable recursive deep crawling. batch_config = config.clone(deep_crawl_strategy=None, stream=False) url_results = await crawler.arun_many(urls=[url], config=batch_config) for result in url_results: result.metadata = result.metadata or {} result.metadata["depth"] = depth result.metadata["parent_url"] = parent if self.url_scorer: result.metadata["score"] = self.url_scorer.score(url) results.append(result) # Count only successful crawls toward max_pages limit if result.success: self._pages_crawled += 1 # Check if we've reached the limit during batch processing if self._pages_crawled >= self.max_pages: self.logger.info(f"Max pages limit ({self.max_pages}) reached during batch, stopping crawl") break # Exit the generator # Only discover links from successful crawls new_links: List[Tuple[str, Optional[str]]] = [] await self.link_discovery(result, url, depth, visited, new_links, depths) # Push new links in reverse order so the first discovered is processed next. for new_url, new_parent in reversed(new_links): new_depth = depths.get(new_url, depth + 1) stack.append((new_url, new_parent, new_depth)) return results async def _arun_stream( self, start_url: str, crawler: AsyncWebCrawler, config: CrawlerRunConfig, ) -> AsyncGenerator[CrawlResult, None]: """ Same traversal as :meth:`_arun_batch`, but yield pages immediately. Each popped URL is crawled, its metadata annotated, then the result gets yielded before we even look at the next stack entry. Successful crawls still feed :meth:`link_discovery`, keeping DFS order intact. """ visited: Set[str] = set() stack: List[Tuple[str, Optional[str], int]] = [(start_url, None, 0)] depths: Dict[str, int] = {start_url: 0} self._reset_seen(start_url) while stack and not self._cancel_event.is_set(): url, parent, depth = stack.pop() if url in visited or depth > self.max_depth: continue visited.add(url) stream_config = config.clone(deep_crawl_strategy=None, stream=True) stream_gen = await crawler.arun_many(urls=[url], config=stream_config) async for result in stream_gen: result.metadata = result.metadata or {} result.metadata["depth"] = depth result.metadata["parent_url"] = parent if self.url_scorer: result.metadata["score"] = self.url_scorer.score(url) yield result # Only count successful crawls toward max_pages limit # and only discover links from successful crawls if result.success: self._pages_crawled += 1 # Check if we've reached the limit during batch processing if self._pages_crawled >= self.max_pages: self.logger.info(f"Max pages limit ({self.max_pages}) reached during batch, stopping crawl") break # Exit the generator new_links: List[Tuple[str, Optional[str]]] = [] await self.link_discovery(result, url, depth, visited, new_links, depths) for new_url, new_parent in reversed(new_links): new_depth = depths.get(new_url, depth + 1) stack.append((new_url, new_parent, new_depth)) async def link_discovery( self, result: CrawlResult, source_url: str, current_depth: int, _visited: Set[str], next_level: List[Tuple[str, Optional[str]]], depths: Dict[str, int], ) -> None: """ Find the next URLs we should push onto the DFS stack. Parameters ---------- result : CrawlResult Output of the page we just crawled; its ``links`` block is our raw material. source_url : str URL of the parent page; stored so callers can track ancestry. current_depth : int Depth of the parent; children naturally sit at ``current_depth + 1``. _visited : Set[str] Present to match the BFS signature, but we rely on ``_dfs_seen`` instead. next_level : list of tuples The stack buffer supplied by the caller; we append new ``(url, parent)`` items here. depths : dict Shared depth map so future metadata tagging knows how deep each URL lives. Notes ----- - ``_dfs_seen`` keeps us from pushing duplicates without touching the traversal guard. - Validation, scoring, and capacity trimming mirror the BFS version so behaviour stays consistent. """ next_depth = current_depth + 1 if next_depth > self.max_depth: return remaining_capacity = self.max_pages - self._pages_crawled if remaining_capacity <= 0: self.logger.info( f"Max pages limit ({self.max_pages}) reached, stopping link discovery" ) return links = result.links.get("internal", []) if self.include_external: links += result.links.get("external", []) seen = self._dfs_seen valid_links: List[Tuple[str, float]] = [] for link in links: raw_url = link.get("href") if not raw_url: continue normalized_url = normalize_url_for_deep_crawl(raw_url, source_url) if not normalized_url or normalized_url in seen: continue if not await self.can_process_url(raw_url, next_depth): self.stats.urls_skipped += 1 continue score = self.url_scorer.score(normalized_url) if self.url_scorer else 0 if score < self.score_threshold: self.logger.debug( f"URL {normalized_url} skipped: score {score} below threshold {self.score_threshold}" ) self.stats.urls_skipped += 1 continue seen.add(normalized_url) valid_links.append((normalized_url, score)) if len(valid_links) > remaining_capacity: if self.url_scorer: valid_links.sort(key=lambda x: x[1], reverse=True) valid_links = valid_links[:remaining_capacity] self.logger.info( f"Limiting to {remaining_capacity} URLs due to max_pages limit" ) for url, score in valid_links: if score: result.metadata = result.metadata or {} result.metadata["score"] = score next_level.append((url, source_url)) depths[url] = next_depth
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/deep_crawling/bfs_strategy.py
crawl4ai/deep_crawling/bfs_strategy.py
# bfs_deep_crawl_strategy.py import asyncio import logging from datetime import datetime from typing import AsyncGenerator, Optional, Set, Dict, List, Tuple from urllib.parse import urlparse from ..models import TraversalStats from .filters import FilterChain from .scorers import URLScorer from . import DeepCrawlStrategy from ..types import AsyncWebCrawler, CrawlerRunConfig, CrawlResult from ..utils import normalize_url_for_deep_crawl, efficient_normalize_url_for_deep_crawl from math import inf as infinity class BFSDeepCrawlStrategy(DeepCrawlStrategy): """ Breadth-First Search deep crawling strategy. Core functions: - arun: Main entry point; splits execution into batch or stream modes. - link_discovery: Extracts, filters, and (if needed) scores the outgoing URLs. - can_process_url: Validates URL format and applies the filter chain. """ def __init__( self, max_depth: int, filter_chain: FilterChain = FilterChain(), url_scorer: Optional[URLScorer] = None, include_external: bool = False, score_threshold: float = -infinity, max_pages: int = infinity, logger: Optional[logging.Logger] = None, ): self.max_depth = max_depth self.filter_chain = filter_chain self.url_scorer = url_scorer self.include_external = include_external self.score_threshold = score_threshold self.max_pages = max_pages # self.logger = logger or logging.getLogger(__name__) # Ensure logger is always a Logger instance, not a dict from serialization if isinstance(logger, logging.Logger): self.logger = logger else: # Create a new logger if logger is None, dict, or any other non-Logger type self.logger = logging.getLogger(__name__) self.stats = TraversalStats(start_time=datetime.now()) self._cancel_event = asyncio.Event() self._pages_crawled = 0 async def can_process_url(self, url: str, depth: int) -> bool: """ Validates the URL and applies the filter chain. For the start URL (depth 0) filtering is bypassed. """ try: parsed = urlparse(url) if not parsed.scheme or not parsed.netloc: raise ValueError("Missing scheme or netloc") if parsed.scheme not in ("http", "https"): raise ValueError("Invalid scheme") if "." not in parsed.netloc: raise ValueError("Invalid domain") except Exception as e: self.logger.warning(f"Invalid URL: {url}, error: {e}") return False if depth != 0 and not await self.filter_chain.apply(url): return False return True async def link_discovery( self, result: CrawlResult, source_url: str, current_depth: int, visited: Set[str], next_level: List[Tuple[str, Optional[str]]], depths: Dict[str, int], ) -> None: """ Extracts links from the crawl result, validates and scores them, and prepares the next level of URLs. Each valid URL is appended to next_level as a tuple (url, parent_url) and its depth is tracked. """ next_depth = current_depth + 1 if next_depth > self.max_depth: return # If we've reached the max pages limit, don't discover new links remaining_capacity = self.max_pages - self._pages_crawled if remaining_capacity <= 0: self.logger.info(f"Max pages limit ({self.max_pages}) reached, stopping link discovery") return # Get internal links and, if enabled, external links. links = result.links.get("internal", []) if self.include_external: links += result.links.get("external", []) valid_links = [] # First collect all valid links for link in links: url = link.get("href") # Strip URL fragments to avoid duplicate crawling # base_url = url.split('#')[0] if url else url base_url = normalize_url_for_deep_crawl(url, source_url) if base_url in visited: continue if not await self.can_process_url(url, next_depth): self.stats.urls_skipped += 1 continue # Score the URL if a scorer is provided score = self.url_scorer.score(base_url) if self.url_scorer else 0 # Skip URLs with scores below the threshold if score < self.score_threshold: self.logger.debug(f"URL {url} skipped: score {score} below threshold {self.score_threshold}") self.stats.urls_skipped += 1 continue visited.add(base_url) valid_links.append((base_url, score)) # If we have more valid links than capacity, sort by score and take the top ones if len(valid_links) > remaining_capacity: if self.url_scorer: # Sort by score in descending order valid_links.sort(key=lambda x: x[1], reverse=True) # Take only as many as we have capacity for valid_links = valid_links[:remaining_capacity] self.logger.info(f"Limiting to {remaining_capacity} URLs due to max_pages limit") # Process the final selected links for url, score in valid_links: # attach the score to metadata if needed if score: result.metadata = result.metadata or {} result.metadata["score"] = score next_level.append((url, source_url)) depths[url] = next_depth async def _arun_batch( self, start_url: str, crawler: AsyncWebCrawler, config: CrawlerRunConfig, ) -> List[CrawlResult]: """ Batch (non-streaming) mode: Processes one BFS level at a time, then yields all the results. """ visited: Set[str] = set() # current_level holds tuples: (url, parent_url) current_level: List[Tuple[str, Optional[str]]] = [(start_url, None)] depths: Dict[str, int] = {start_url: 0} results: List[CrawlResult] = [] while current_level and not self._cancel_event.is_set(): # Check if we've already reached max_pages before starting a new level if self._pages_crawled >= self.max_pages: self.logger.info(f"Max pages limit ({self.max_pages}) reached, stopping crawl") break next_level: List[Tuple[str, Optional[str]]] = [] urls = [url for url, _ in current_level] # Clone the config to disable deep crawling recursion and enforce batch mode. batch_config = config.clone(deep_crawl_strategy=None, stream=False) batch_results = await crawler.arun_many(urls=urls, config=batch_config) # Update pages crawled counter - count only successful crawls successful_results = [r for r in batch_results if r.success] self._pages_crawled += len(successful_results) for result in batch_results: url = result.url depth = depths.get(url, 0) result.metadata = result.metadata or {} result.metadata["depth"] = depth parent_url = next((parent for (u, parent) in current_level if u == url), None) result.metadata["parent_url"] = parent_url results.append(result) # Only discover links from successful crawls if result.success: # Link discovery will handle the max pages limit internally await self.link_discovery(result, url, depth, visited, next_level, depths) current_level = next_level return results async def _arun_stream( self, start_url: str, crawler: AsyncWebCrawler, config: CrawlerRunConfig, ) -> AsyncGenerator[CrawlResult, None]: """ Streaming mode: Processes one BFS level at a time and yields results immediately as they arrive. """ visited: Set[str] = set() current_level: List[Tuple[str, Optional[str]]] = [(start_url, None)] depths: Dict[str, int] = {start_url: 0} while current_level and not self._cancel_event.is_set(): next_level: List[Tuple[str, Optional[str]]] = [] urls = [url for url, _ in current_level] visited.update(urls) stream_config = config.clone(deep_crawl_strategy=None, stream=True) stream_gen = await crawler.arun_many(urls=urls, config=stream_config) # Keep track of processed results for this batch results_count = 0 async for result in stream_gen: url = result.url depth = depths.get(url, 0) result.metadata = result.metadata or {} result.metadata["depth"] = depth parent_url = next((parent for (u, parent) in current_level if u == url), None) result.metadata["parent_url"] = parent_url # Count only successful crawls if result.success: self._pages_crawled += 1 # Check if we've reached the limit during batch processing if self._pages_crawled >= self.max_pages: self.logger.info(f"Max pages limit ({self.max_pages}) reached during batch, stopping crawl") break # Exit the generator results_count += 1 yield result # Only discover links from successful crawls if result.success: # Link discovery will handle the max pages limit internally await self.link_discovery(result, url, depth, visited, next_level, depths) # If we didn't get results back (e.g. due to errors), avoid getting stuck in an infinite loop # by considering these URLs as visited but not counting them toward the max_pages limit if results_count == 0 and urls: self.logger.warning(f"No results returned for {len(urls)} URLs, marking as visited") current_level = next_level async def shutdown(self) -> None: """ Clean up resources and signal cancellation of the crawl. """ self._cancel_event.set() self.stats.end_time = datetime.now()
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/deep_crawling/base_strategy.py
crawl4ai/deep_crawling/base_strategy.py
from __future__ import annotations from abc import ABC, abstractmethod from typing import AsyncGenerator, Optional, Set, List, Dict from functools import wraps from contextvars import ContextVar from ..types import AsyncWebCrawler, CrawlerRunConfig, CrawlResult, RunManyReturn class DeepCrawlDecorator: """Decorator that adds deep crawling capability to arun method.""" deep_crawl_active = ContextVar("deep_crawl_active", default=False) def __init__(self, crawler: AsyncWebCrawler): self.crawler = crawler def __call__(self, original_arun): @wraps(original_arun) async def wrapped_arun(url: str, config: CrawlerRunConfig = None, **kwargs): # If deep crawling is already active, call the original method to avoid recursion. if config and config.deep_crawl_strategy and not self.deep_crawl_active.get(): token = self.deep_crawl_active.set(True) # Await the arun call to get the actual result object. result_obj = await config.deep_crawl_strategy.arun( crawler=self.crawler, start_url=url, config=config ) if config.stream: async def result_wrapper(): try: async for result in result_obj: yield result finally: self.deep_crawl_active.reset(token) return result_wrapper() else: try: return result_obj finally: self.deep_crawl_active.reset(token) return await original_arun(url, config=config, **kwargs) return wrapped_arun class DeepCrawlStrategy(ABC): """ Abstract base class for deep crawling strategies. Core functions: - arun: Main entry point that returns an async generator of CrawlResults. - shutdown: Clean up resources. - can_process_url: Validate a URL and decide whether to process it. - _process_links: Extract and process links from a CrawlResult. """ @abstractmethod async def _arun_batch( self, start_url: str, crawler: AsyncWebCrawler, config: CrawlerRunConfig, ) -> List[CrawlResult]: """ Batch (non-streaming) mode: Processes one BFS level at a time, then yields all the results. """ pass @abstractmethod async def _arun_stream( self, start_url: str, crawler: AsyncWebCrawler, config: CrawlerRunConfig, ) -> AsyncGenerator[CrawlResult, None]: """ Streaming mode: Processes one BFS level at a time and yields results immediately as they arrive. """ pass async def arun( self, start_url: str, crawler: AsyncWebCrawler, config: Optional[CrawlerRunConfig] = None, ) -> RunManyReturn: """ Traverse the given URL using the specified crawler. Args: start_url (str): The URL from which to start crawling. crawler (AsyncWebCrawler): The crawler instance to use. crawler_run_config (Optional[CrawlerRunConfig]): Crawler configuration. Returns: Union[CrawlResultT, List[CrawlResultT], AsyncGenerator[CrawlResultT, None]] """ if config is None: raise ValueError("CrawlerRunConfig must be provided") if config.stream: return self._arun_stream(start_url, crawler, config) else: return await self._arun_batch(start_url, crawler, config) def __call__(self, start_url: str, crawler: AsyncWebCrawler, config: CrawlerRunConfig): return self.arun(start_url, crawler, config) @abstractmethod async def shutdown(self) -> None: """ Clean up resources used by the deep crawl strategy. """ pass @abstractmethod async def can_process_url(self, url: str, depth: int) -> bool: """ Validate the URL format and apply custom filtering logic. Args: url (str): The URL to validate. depth (int): The current depth in the crawl. Returns: bool: True if the URL should be processed, False otherwise. """ pass @abstractmethod async def link_discovery( self, result: CrawlResult, source_url: str, current_depth: int, visited: Set[str], next_level: List[tuple], depths: Dict[str, int], ) -> None: """ Extract and process links from the given crawl result. This method should: - Validate each extracted URL using can_process_url. - Optionally score URLs. - Append valid URLs (and their parent references) to the next_level list. - Update the depths dictionary with the new depth for each URL. Args: result (CrawlResult): The result from a crawl operation. source_url (str): The URL from which this result was obtained. current_depth (int): The depth at which the source URL was processed. visited (Set[str]): Set of already visited URLs. next_level (List[tuple]): List of tuples (url, parent_url) for the next BFS level. depths (Dict[str, int]): Mapping of URLs to their current depth. """ pass
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/deep_crawling/scorers.py
crawl4ai/deep_crawling/scorers.py
from abc import ABC, abstractmethod from typing import List, Dict, Optional from dataclasses import dataclass from urllib.parse import urlparse, unquote import re import logging from functools import lru_cache from array import array import ctypes import platform PLATFORM = platform.system() # Pre-computed scores for common year differences _SCORE_LOOKUP = [1.0, 0.5, 0.3333333333333333, 0.25] # Pre-computed scores for common year differences _FRESHNESS_SCORES = [ 1.0, # Current year 0.9, # Last year 0.8, # 2 years ago 0.7, # 3 years ago 0.6, # 4 years ago 0.5, # 5 years ago ] class ScoringStats: __slots__ = ('_urls_scored', '_total_score', '_min_score', '_max_score') def __init__(self): self._urls_scored = 0 self._total_score = 0.0 self._min_score = None # Lazy initialization self._max_score = None def update(self, score: float) -> None: """Optimized update with minimal operations""" self._urls_scored += 1 self._total_score += score # Lazy min/max tracking - only if actually accessed if self._min_score is not None: if score < self._min_score: self._min_score = score if self._max_score is not None: if score > self._max_score: self._max_score = score def get_average(self) -> float: """Direct calculation instead of property""" return self._total_score / self._urls_scored if self._urls_scored else 0.0 def get_min(self) -> float: """Lazy min calculation""" if self._min_score is None: self._min_score = self._total_score / self._urls_scored if self._urls_scored else 0.0 return self._min_score def get_max(self) -> float: """Lazy max calculation""" if self._max_score is None: self._max_score = self._total_score / self._urls_scored if self._urls_scored else 0.0 return self._max_score class URLScorer(ABC): __slots__ = ('_weight', '_stats') def __init__(self, weight: float = 1.0): # Store weight directly as float32 for memory efficiency self._weight = ctypes.c_float(weight).value self._stats = ScoringStats() @abstractmethod def _calculate_score(self, url: str) -> float: """Calculate raw score for URL.""" pass def score(self, url: str) -> float: """Calculate weighted score with minimal overhead.""" score = self._calculate_score(url) * self._weight self._stats.update(score) return score @property def stats(self): """Access to scoring statistics.""" return self._stats @property def weight(self): return self._weight class CompositeScorer(URLScorer): __slots__ = ('_scorers', '_normalize', '_weights_array', '_score_array') def __init__(self, scorers: List[URLScorer], normalize: bool = True): """Initialize composite scorer combining multiple scoring strategies. Optimized for: - Fast parallel scoring - Memory efficient score aggregation - Quick short-circuit conditions - Pre-allocated arrays Args: scorers: List of scoring strategies to combine normalize: Whether to normalize final score by scorer count """ super().__init__(weight=1.0) self._scorers = scorers self._normalize = normalize # Pre-allocate arrays for scores and weights self._weights_array = array('f', [s.weight for s in scorers]) self._score_array = array('f', [0.0] * len(scorers)) @lru_cache(maxsize=10000) def _calculate_score(self, url: str) -> float: """Calculate combined score from all scoring strategies. Uses: 1. Pre-allocated arrays for scores 2. Short-circuit on zero scores 3. Optimized normalization 4. Vectorized operations where possible Args: url: URL to score Returns: Combined and optionally normalized score """ total_score = 0.0 scores = self._score_array # Get scores from all scorers for i, scorer in enumerate(self._scorers): # Use public score() method which applies weight scores[i] = scorer.score(url) total_score += scores[i] # Normalize if requested if self._normalize and self._scorers: count = len(self._scorers) return total_score / count return total_score def score(self, url: str) -> float: """Public scoring interface with stats tracking. Args: url: URL to score Returns: Final combined score """ score = self._calculate_score(url) self.stats.update(score) return score class KeywordRelevanceScorer(URLScorer): __slots__ = ('_weight', '_stats', '_keywords', '_case_sensitive') def __init__(self, keywords: List[str], weight: float = 1.0, case_sensitive: bool = False): super().__init__(weight=weight) self._case_sensitive = case_sensitive # Pre-process keywords once self._keywords = [k if case_sensitive else k.lower() for k in keywords] @lru_cache(maxsize=10000) def _url_bytes(self, url: str) -> bytes: """Cache decoded URL bytes""" return url.encode('utf-8') if self._case_sensitive else url.lower().encode('utf-8') def _calculate_score(self, url: str) -> float: """Fast string matching without regex or byte conversion""" if not self._case_sensitive: url = url.lower() matches = sum(1 for k in self._keywords if k in url) # Fast return paths if not matches: return 0.0 if matches == len(self._keywords): return 1.0 return matches / len(self._keywords) class PathDepthScorer(URLScorer): __slots__ = ('_weight', '_stats', '_optimal_depth') # Remove _url_cache def __init__(self, optimal_depth: int = 3, weight: float = 1.0): super().__init__(weight=weight) self._optimal_depth = optimal_depth @staticmethod @lru_cache(maxsize=10000) def _quick_depth(path: str) -> int: """Ultra fast path depth calculation. Examples: - "http://example.com" -> 0 # No path segments - "http://example.com/" -> 0 # Empty path - "http://example.com/a" -> 1 - "http://example.com/a/b" -> 2 """ if not path or path == '/': return 0 if '/' not in path: return 0 depth = 0 last_was_slash = True for c in path: if c == '/': if not last_was_slash: depth += 1 last_was_slash = True else: last_was_slash = False if not last_was_slash: depth += 1 return depth @lru_cache(maxsize=10000) # Cache the whole calculation def _calculate_score(self, url: str) -> float: pos = url.find('/', url.find('://') + 3) if pos == -1: depth = 0 else: depth = self._quick_depth(url[pos:]) # Use lookup table for common distances distance = depth - self._optimal_depth distance = distance if distance >= 0 else -distance # Faster than abs() if distance < 4: return _SCORE_LOOKUP[distance] return 1.0 / (1.0 + distance) class ContentTypeScorer(URLScorer): __slots__ = ('_weight', '_exact_types', '_regex_types') def __init__(self, type_weights: Dict[str, float], weight: float = 1.0): """Initialize scorer with type weights map. Args: type_weights: Dict mapping file extensions/patterns to scores (e.g. {'.html$': 1.0}) weight: Overall weight multiplier for this scorer """ super().__init__(weight=weight) self._exact_types = {} # Fast lookup for simple extensions self._regex_types = [] # Fallback for complex patterns # Split into exact vs regex matchers for performance for pattern, score in type_weights.items(): if pattern.startswith('.') and pattern.endswith('$'): ext = pattern[1:-1] self._exact_types[ext] = score else: self._regex_types.append((re.compile(pattern), score)) # Sort complex patterns by score for early exit self._regex_types.sort(key=lambda x: -x[1]) @staticmethod @lru_cache(maxsize=10000) def _quick_extension(url: str) -> str: """Extract file extension ultra-fast without regex/splits. Handles: - Basic extensions: "example.html" -> "html" - Query strings: "page.php?id=1" -> "php" - Fragments: "doc.pdf#page=1" -> "pdf" - Path params: "file.jpg;width=100" -> "jpg" Args: url: URL to extract extension from Returns: Extension without dot, or empty string if none found """ pos = url.rfind('.') if pos == -1: return '' # Find first non-alphanumeric char after extension end = len(url) for i in range(pos + 1, len(url)): c = url[i] # Stop at query string, fragment, path param or any non-alphanumeric if c in '?#;' or not c.isalnum(): end = i break return url[pos + 1:end].lower() @lru_cache(maxsize=10000) def _calculate_score(self, url: str) -> float: """Calculate content type score for URL. Uses staged approach: 1. Try exact extension match (fast path) 2. Fall back to regex patterns if needed Args: url: URL to score Returns: Score between 0.0 and 1.0 * weight """ # Fast path: direct extension lookup ext = self._quick_extension(url) if ext: score = self._exact_types.get(ext, None) if score is not None: return score # Slow path: regex patterns for pattern, score in self._regex_types: if pattern.search(url): return score return 0.0 class FreshnessScorer(URLScorer): __slots__ = ('_weight', '_date_pattern', '_current_year') def __init__(self, weight: float = 1.0, current_year: int = 2024): """Initialize freshness scorer. Extracts and scores dates from URLs using format: - YYYY/MM/DD - YYYY-MM-DD - YYYY_MM_DD - YYYY (year only) Args: weight: Score multiplier current_year: Year to calculate freshness against (default 2024) """ super().__init__(weight=weight) self._current_year = current_year # Combined pattern for all date formats # Uses non-capturing groups (?:) and alternation self._date_pattern = re.compile( r'(?:/' # Path separator r'|[-_])' # or date separators r'((?:19|20)\d{2})' # Year group (1900-2099) r'(?:' # Optional month/day group r'(?:/|[-_])' # Date separator r'(?:\d{2})' # Month r'(?:' # Optional day r'(?:/|[-_])' # Date separator r'(?:\d{2})' # Day r')?' # Day is optional r')?' # Month/day group is optional ) @lru_cache(maxsize=10000) def _extract_year(self, url: str) -> Optional[int]: """Extract the most recent year from URL. Args: url: URL to extract year from Returns: Year as int or None if no valid year found """ matches = self._date_pattern.finditer(url) latest_year = None # Find most recent year for match in matches: year = int(match.group(1)) if (year <= self._current_year and # Sanity check (latest_year is None or year > latest_year)): latest_year = year return latest_year @lru_cache(maxsize=10000) def _calculate_score(self, url: str) -> float: """Calculate freshness score based on URL date. More recent years score higher. Uses pre-computed scoring table for common year differences. Args: url: URL to score Returns: Score between 0.0 and 1.0 * weight """ year = self._extract_year(url) if year is None: return 0.5 # Default score # Use lookup table for common year differences year_diff = self._current_year - year if year_diff < len(_FRESHNESS_SCORES): return _FRESHNESS_SCORES[year_diff] # Fallback calculation for older content return max(0.1, 1.0 - year_diff * 0.1) class DomainAuthorityScorer(URLScorer): __slots__ = ('_weight', '_domain_weights', '_default_weight', '_top_domains') def __init__( self, domain_weights: Dict[str, float], default_weight: float = 0.5, weight: float = 1.0, ): """Initialize domain authority scorer. Args: domain_weights: Dict mapping domains to authority scores default_weight: Score for unknown domains weight: Overall scorer weight multiplier Example: { 'python.org': 1.0, 'github.com': 0.9, 'medium.com': 0.7 } """ super().__init__(weight=weight) # Pre-process domains for faster lookup self._domain_weights = { domain.lower(): score for domain, score in domain_weights.items() } self._default_weight = default_weight # Cache top domains for fast path self._top_domains = { domain: score for domain, score in sorted( domain_weights.items(), key=lambda x: -x[1] )[:5] # Keep top 5 highest scoring domains } @staticmethod @lru_cache(maxsize=10000) def _extract_domain(url: str) -> str: """Extract domain from URL ultra-fast. Handles: - Basic domains: "example.com" - Subdomains: "sub.example.com" - Ports: "example.com:8080" - IPv4: "192.168.1.1" Args: url: Full URL to extract domain from Returns: Lowercase domain without port """ # Find domain start start = url.find('://') if start == -1: start = 0 else: start += 3 # Find domain end end = url.find('/', start) if end == -1: end = url.find('?', start) if end == -1: end = url.find('#', start) if end == -1: end = len(url) # Extract domain and remove port domain = url[start:end] port_idx = domain.rfind(':') if port_idx != -1: domain = domain[:port_idx] return domain.lower() @lru_cache(maxsize=10000) def _calculate_score(self, url: str) -> float: """Calculate domain authority score. Uses staged approach: 1. Check top domains (fastest) 2. Check full domain weights 3. Return default weight Args: url: URL to score Returns: Authority score between 0.0 and 1.0 * weight """ domain = self._extract_domain(url) # Fast path: check top domains first score = self._top_domains.get(domain) if score is not None: return score # Regular path: check all domains return self._domain_weights.get(domain, self._default_weight)
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/deep_crawling/crazy.py
crawl4ai/deep_crawling/crazy.py
from __future__ import annotations # I just got crazy, trying to wrute K&R C but in Python. Right now I feel like I'm in a quantum state. # I probably won't use this; I just want to leave it here. A century later, the future human race will be like, "WTF?" # ------ Imports That Will Make You Question Reality ------ # from functools import wraps from contextvars import ContextVar import inspect from crawl4ai import CacheMode from crawl4ai.async_configs import CrawlerRunConfig from crawl4ai.models import CrawlResult, TraversalStats from crawl4ai.deep_crawling.filters import FilterChain from crawl4ai.async_webcrawler import AsyncWebCrawler import time import logging from urllib.parse import urlparse from abc import ABC, abstractmethod from collections import deque import asyncio from typing import ( AsyncGenerator, Dict, List, TypeVar, Generic, Tuple, Callable, Awaitable, Union, ) from functools import lru_cache import mmh3 from bitarray import bitarray import numpy as np from heapq import heappush, heappop # ------ Type Algebra Mastery ------ # CrawlResultT = TypeVar("CrawlResultT", bound="CrawlResult") PriorityT = TypeVar("PriorityT") P = TypeVar("P") # ------ Hyperscalar Context Management ------ # deep_crawl_ctx = ContextVar("deep_crawl_stack", default=deque()) # ------ Algebraic Crawler Monoid ------ # class TraversalContext: __slots__ = ('visited', 'frontier', 'depths', 'priority_fn', 'current_depth') def __init__(self, priority_fn: Callable[[str], Awaitable[float]] = lambda _: 1.0): self.visited: BloomFilter = BloomFilter(10**6, 0.01) # 1M items, 1% FP self.frontier: PriorityQueue = PriorityQueue() self.depths: Dict[str, int] = {} self.priority_fn = priority_fn self.current_depth = 0 def clone_for_level(self) -> TraversalContext: """Monadic context propagation""" new_ctx = TraversalContext(self.priority_fn) new_ctx.visited = self.visited.copy() new_ctx.depths = self.depths.copy() new_ctx.current_depth = self.current_depth return new_ctx class PriorityQueue(Generic[PriorityT]): """Fibonacci heap-inspired priority queue with O(1) amortized operations""" __slots__ = ('_heap', '_index') def __init__(self): self._heap: List[Tuple[PriorityT, float, P]] = [] self._index: Dict[P, int] = {} def insert(self, priority: PriorityT, item: P) -> None: tiebreaker = time.time() # Ensure FIFO for equal priorities heappush(self._heap, (priority, tiebreaker, item)) self._index[item] = len(self._heap) - 1 def extract(self, top_n = 1) -> P: items = [] for _ in range(top_n): if not self._heap: break priority, _, item = heappop(self._heap) del self._index[item] items.append(item) if not items: raise IndexError("Priority queue empty") return items # while self._heap: # _, _, item = heappop(self._heap) # if item in self._index: # del self._index[item] # return item raise IndexError("Priority queue empty") def is_empty(self) -> bool: return not bool(self._heap) class BloomFilter: """Optimal Bloom filter using murmur3 hash avalanche""" __slots__ = ('size', 'hashes', 'bits') def __init__(self, capacity: int, error_rate: float): self.size = self._optimal_size(capacity, error_rate) self.hashes = self._optimal_hashes(capacity, self.size) self.bits = bitarray(self.size) self.bits.setall(False) @staticmethod def _optimal_size(n: int, p: float) -> int: m = - (n * np.log(p)) / (np.log(2) ** 2) return int(np.ceil(m)) @staticmethod def _optimal_hashes(n: int, m: int) -> int: k = (m / n) * np.log(2) return int(np.ceil(k)) def add(self, item: str) -> None: for seed in range(self.hashes): digest = mmh3.hash(item, seed) % self.size self.bits[digest] = True def __contains__(self, item: str) -> bool: return all( self.bits[mmh3.hash(item, seed) % self.size] for seed in range(self.hashes) ) def copy(self) -> BloomFilter: new = object.__new__(BloomFilter) new.size = self.size new.hashes = self.hashes new.bits = self.bits.copy() return new def __len__(self) -> int: """ Estimates the number of items in the filter using the count of set bits and the formula: n = -m/k * ln(1 - X/m) where: m = size of bit array k = number of hash functions X = count of set bits """ set_bits = self.bits.count(True) if set_bits == 0: return 0 # Use the inverse bloom filter formula to estimate cardinality return int( -(self.size / self.hashes) * np.log(1 - set_bits / self.size) ) def bit_count(self) -> int: """Returns the raw count of set bits in the filter""" return self.bits.count(True) def __repr__(self) -> str: return f"BloomFilter(est_items={len(self)}, bits={self.bit_count()}/{self.size})" # ------ Hyper-Optimal Deep Crawl Core ------ # class DeepCrawlDecorator: """Metaprogramming marvel: Zero-cost deep crawl abstraction""" def __init__(self, crawler: AsyncWebCrawler): self.crawler = crawler def __call__(self, original_arun: Callable) -> Callable: @wraps(original_arun) async def quantum_arun(url: str, config: CrawlerRunConfig = None, **kwargs): stack = deep_crawl_ctx.get() if config and config.deep_crawl_strategy and not stack: stack.append(self.crawler) try: deep_crawl_ctx.set(stack) async for result in config.deep_crawl_strategy.traverse( start_url=url, crawler=self.crawler, config=config ): yield result finally: stack.pop() deep_crawl_ctx.set(stack) else: result = await original_arun(url, config=config, **kwargs) yield result return quantum_arun async def collect_results(url, crawler, config): if id(getattr(crawler, "arun")) != id(getattr(crawler, "original_arun")): setattr(crawler, "arun", getattr(crawler, "original_arun")) ret = crawler.arun(url, config=config) # If arun is an async generator, iterate over it if inspect.isasyncgen(ret): return [r async for r in ret] # Otherwise, await the coroutine and normalize to a list result = await ret return result if isinstance(result, list) else [result] async def collect_many_results(url, crawler, config): # Replace back arun to its original implementation if id(getattr(crawler, "arun")) != id(getattr(crawler, "original_arun")): setattr(crawler, "arun", getattr(crawler, "original_arun")) ret = crawler.arun_many(url, config=config) # If arun is an async generator, iterate over it if inspect.isasyncgen(ret): return [r async for r in ret] # Otherwise, await the coroutine and normalize to a list result = await ret return result if isinstance(result, list) else [result] # ------ Deep Crawl Strategy Interface ------ # CrawlResultT = TypeVar("CrawlResultT", bound=CrawlResult) # In batch mode we return List[CrawlResult] and in stream mode an AsyncGenerator. RunManyReturn = Union[CrawlResultT, List[CrawlResultT], AsyncGenerator[CrawlResultT, None]] class DeepCrawlStrategy(ABC): """Abstract base class that will make Dijkstra smile""" @abstractmethod async def traverse(self, start_url: str, crawler: AsyncWebCrawler, config: CrawlerRunConfig) -> RunManyReturn: """Traverse with O(1) memory complexity via generator fusion""" ... @abstractmethod def precompute_priority(self, url: str) -> Awaitable[float]: """Quantum-inspired priority precomputation""" pass @abstractmethod async def link_hypercube(self, result: CrawlResult) -> AsyncGenerator[str, None]: """Hilbert-curve optimized link generation""" pass # ------ BFS That Would Make Knuth Proud ------ # def calculate_quantum_batch_size( depth: int, max_depth: int, frontier_size: int, visited_size: int ) -> int: """ Calculates optimal batch size for URL processing using quantum-inspired mathematical principles. This function implements a sophisticated batch size calculation using: 1. Golden Ratio (Ο†) based scaling for optimal irrationality 2. Depth-aware amplitude modulation 3. Harmonic series dampening 4. Logarithmic growth control 5. Dynamic frontier adaptation The formula follows the quantum harmonic oscillator principle: N = βŒˆΟ†^(2d) * logβ‚‚(|V|) * H(d)⁻¹ * min(20, |F|/10)βŒ‰ where: Ο† = Golden Ratio ((1 + √5) / 2) d = depth factor (normalized remaining depth) |V| = size of visited set H(d) = d-th harmonic number |F| = frontier size Args: depth (int): Current traversal depth max_depth (int): Maximum allowed depth frontier_size (int): Current size of frontier queue visited_size (int): Number of URLs visited so far Returns: int: Optimal batch size bounded between 1 and 100 Mathematical Properties: - Maintains O(log n) growth with respect to visited size - Provides Ο†-optimal distribution of resources - Ensures quantum-like state transitions between depths - Harmonically dampened to prevent exponential explosion """ # Golden ratio Ο† = (1 + √5) / 2 Ο† = (1 + 5 ** 0.5) / 2 # Calculate normalized depth factor [0, 1] depth_factor = (max_depth - depth) / max_depth if depth < max_depth else 0 # Compute harmonic number for current depth harmonic = sum(1/k for k in range(1, depth + 2)) # Calculate quantum batch size batch_size = int(np.ceil( (Ο† ** (depth_factor * 2)) * # Golden ratio scaling np.log2(visited_size + 2) * # Logarithmic growth factor (1 / harmonic) * # Harmonic dampening max(1, min(20, frontier_size / 10)) # Frontier-aware scaling )) # Enforce practical bounds return max(1, min(100, batch_size)) class BFSDeepCrawlStrategy(DeepCrawlStrategy): """Breadth-First Search with Einstein-Rosen bridge optimization""" __slots__ = ('max_depth', 'filter_chain', 'priority_fn', 'stats', '_cancel') def __init__(self, max_depth: int, filter_chain: FilterChain = FilterChain(), priority_fn: Callable[[str], Awaitable[float]] = lambda url: 1.0, logger: logging.Logger = None): self.max_depth = max_depth self.filter_chain = filter_chain self.priority_fn = priority_fn self.stats = TraversalStats() self._cancel = asyncio.Event() self.semaphore = asyncio.Semaphore(1000) async def traverse(self, start_url: str, crawler: AsyncWebCrawler, config: CrawlerRunConfig) -> RunManyReturn: """Non-blocking BFS with O(b^d) time complexity awareness""" ctx = TraversalContext(self.priority_fn) ctx.frontier.insert(self.priority_fn(start_url), (start_url, None, 0)) ctx.visited.add(start_url) ctx.depths[start_url] = 0 while not ctx.frontier.is_empty() and not self._cancel.is_set(): # Use the best algorith, to find top_n value top_n = calculate_quantum_batch_size( depth=ctx.current_depth, max_depth=self.max_depth, frontier_size=len(ctx.frontier._heap), visited_size=len(ctx.visited) ) urls = ctx.frontier.extract(top_n=top_n) # url, parent, depth = ctx.frontier.extract(top_n=top_n) if urls: ctx.current_depth = urls[0][2] async with self.semaphore: results = await collect_many_results([url for (url, parent, depth) in urls], crawler, config) # results = await asyncio.gather(*[ # collect_results(url, crawler, config) for (url, parent, depth) in urls # ]) # result = _result[0] for ix, result in enumerate(results): url, parent, depth = result.url, urls[ix][1], urls[ix][2] result.metadata['depth'] = depth result.metadata['parent'] = parent yield result if depth < self.max_depth: async for link in self.link_hypercube(result): if link not in ctx.visited: priority = self.priority_fn(link) ctx.frontier.insert(priority, (link, url, depth + 1)) ctx.visited.add(link) ctx.depths[link] = depth + 1 @lru_cache(maxsize=65536) async def validate_url(self, url: str) -> bool: """Memoized URL validation with Ξ»-calculus purity""" try: parsed = urlparse(url) return (parsed.scheme in {'http', 'https'} and '.' in parsed.netloc and await self.filter_chain.apply(url)) except Exception: return False async def link_hypercube(self, result: CrawlResult) -> AsyncGenerator[str, None]: """Hilbert-ordered link generation with O(1) yield latency""" links = (link['href'] for link in result.links.get('internal', [])) validated = filter(self.validate_url, links) for link in sorted(validated, key=lambda x: -self.priority_fn(x)): yield link def __aiter__(self) -> AsyncGenerator[CrawlResult, None]: """Native async iterator interface""" return self.traverse() async def __anext__(self) -> CrawlResult: """True async iterator protocol implementation""" result = await self.traverse().__anext__() if result: return result raise StopAsyncIteration async def precompute_priority(self, url): return super().precompute_priority(url) async def shutdown(self): self._cancel.set() # ------ Usage That Will Drop Jaws ------ # async def main(): """Quantum crawl example""" strategy = BFSDeepCrawlStrategy( max_depth=2, priority_fn=lambda url: 1.0 / (len(url) + 1e-9), # Inverse length priority # filter_chain=FilterChain(...) ) config: CrawlerRunConfig = CrawlerRunConfig( deep_crawl_strategy=strategy, stream=False, verbose=True, cache_mode=CacheMode.BYPASS ) async with AsyncWebCrawler() as crawler: run_decorator = DeepCrawlDecorator(crawler) setattr(crawler, "original_arun", crawler.arun) crawler.arun = run_decorator(crawler.arun) start_time = time.perf_counter() async for result in crawler.arun("https://docs.crawl4ai.com", config=config): print(f"πŸŒ€ {result.url} (Depth: {result.metadata['depth']})") print(f"Deep crawl completed in {time.perf_counter() - start_time:.2f}s") if __name__ == "__main__": asyncio.run(main())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/deep_crawling/filters.py
crawl4ai/deep_crawling/filters.py
from abc import ABC, abstractmethod from typing import List, Pattern, Set, Union from urllib.parse import urlparse from array import array import re import logging from functools import lru_cache import fnmatch from dataclasses import dataclass import weakref import math from collections import defaultdict from typing import Dict from ..utils import HeadPeekr import asyncio import inspect @dataclass class FilterStats: __slots__ = ("_counters",) def __init__(self): # Use array of unsigned ints for atomic operations self._counters = array("I", [0, 0, 0]) # total, passed, rejected @property def total_urls(self): return self._counters[0] @property def passed_urls(self): return self._counters[1] @property def rejected_urls(self): return self._counters[2] class URLFilter(ABC): """Optimized base filter class""" __slots__ = ("name", "stats", "_logger_ref") def __init__(self, name: str = None): self.name = name or self.__class__.__name__ self.stats = FilterStats() # Lazy logger initialization using weakref self._logger_ref = None @property def logger(self): if self._logger_ref is None or self._logger_ref() is None: logger = logging.getLogger(f"urlfilter.{self.name}") self._logger_ref = weakref.ref(logger) return self._logger_ref() @abstractmethod def apply(self, url: str) -> bool: pass def _update_stats(self, passed: bool): # Use direct array index for speed self.stats._counters[0] += 1 # total self.stats._counters[1] += passed # passed self.stats._counters[2] += not passed # rejected class FilterChain: """Optimized filter chain""" __slots__ = ("filters", "stats", "_logger_ref") def __init__(self, filters: List[URLFilter] = None): self.filters = tuple(filters or []) # Immutable tuple for speed self.stats = FilterStats() self._logger_ref = None @property def logger(self): if self._logger_ref is None or self._logger_ref() is None: logger = logging.getLogger("urlfilter.chain") self._logger_ref = weakref.ref(logger) return self._logger_ref() def add_filter(self, filter_: URLFilter) -> "FilterChain": """Add a filter to the chain""" self.filters.append(filter_) return self # Enable method chaining async def apply(self, url: str) -> bool: """Apply all filters concurrently when possible""" self.stats._counters[0] += 1 # Total processed URLs tasks = [] for f in self.filters: result = f.apply(url) if inspect.isawaitable(result): tasks.append(result) # Collect async tasks elif not result: # Sync rejection self.stats._counters[2] += 1 # Sync rejected return False if tasks: results = await asyncio.gather(*tasks) # Count how many filters rejected rejections = results.count(False) self.stats._counters[2] += rejections if not all(results): return False # Stop early if any filter rejected self.stats._counters[1] += 1 # Passed return True class URLPatternFilter(URLFilter): """Pattern filter balancing speed and completeness""" __slots__ = ( "patterns", # Store original patterns for serialization "use_glob", # Store original use_glob for serialization "reverse", # Store original reverse for serialization "_simple_suffixes", "_simple_prefixes", "_domain_patterns", "_path_patterns", "_reverse", ) PATTERN_TYPES = { "SUFFIX": 1, # *.html "PREFIX": 2, # /foo/* "DOMAIN": 3, # *.example.com "PATH": 4, # Everything else "REGEX": 5, } def __init__( self, patterns: Union[str, Pattern, List[Union[str, Pattern]]], use_glob: bool = True, reverse: bool = False, ): super().__init__() # Store original constructor params for serialization self.patterns = patterns self.use_glob = use_glob self.reverse = reverse self._reverse = reverse patterns = [patterns] if isinstance(patterns, (str, Pattern)) else patterns self._simple_suffixes = set() self._simple_prefixes = set() self._domain_patterns = [] self._path_patterns = [] for pattern in patterns: pattern_type = self._categorize_pattern(pattern) self._add_pattern(pattern, pattern_type) def _categorize_pattern(self, pattern: str) -> int: """Categorize pattern for specialized handling""" if not isinstance(pattern, str): return self.PATTERN_TYPES["PATH"] # Check if it's a regex pattern if pattern.startswith("^") or pattern.endswith("$") or "\\d" in pattern: return self.PATTERN_TYPES["REGEX"] if pattern.count("*") == 1: if pattern.startswith("*."): return self.PATTERN_TYPES["SUFFIX"] if pattern.endswith("/*"): return self.PATTERN_TYPES["PREFIX"] if "://" in pattern and pattern.startswith("*."): return self.PATTERN_TYPES["DOMAIN"] return self.PATTERN_TYPES["PATH"] def _add_pattern(self, pattern: str, pattern_type: int): """Add pattern to appropriate matcher""" if pattern_type == self.PATTERN_TYPES["REGEX"]: # For regex patterns, compile directly without glob translation if isinstance(pattern, str) and ( pattern.startswith("^") or pattern.endswith("$") or "\\d" in pattern ): self._path_patterns.append(re.compile(pattern)) return elif pattern_type == self.PATTERN_TYPES["SUFFIX"]: self._simple_suffixes.add(pattern[2:]) elif pattern_type == self.PATTERN_TYPES["PREFIX"]: self._simple_prefixes.add(pattern[:-2]) elif pattern_type == self.PATTERN_TYPES["DOMAIN"]: self._domain_patterns.append(re.compile(pattern.replace("*.", r"[^/]+\."))) else: if isinstance(pattern, str): # Handle complex glob patterns if "**" in pattern: pattern = pattern.replace("**", ".*") if "{" in pattern: # Convert {a,b} to (a|b) pattern = re.sub( r"\{([^}]+)\}", lambda m: f'({"|".join(m.group(1).split(","))})', pattern, ) pattern = fnmatch.translate(pattern) self._path_patterns.append( pattern if isinstance(pattern, Pattern) else re.compile(pattern) ) @lru_cache(maxsize=10000) def apply(self, url: str) -> bool: # Quick suffix check (*.html) if self._simple_suffixes: path = url.split("?")[0] if path.split("/")[-1].split(".")[-1] in self._simple_suffixes: result = True self._update_stats(result) return not result if self._reverse else result # Domain check if self._domain_patterns: for pattern in self._domain_patterns: if pattern.match(url): result = True self._update_stats(result) return not result if self._reverse else result # Prefix check (/foo/*) if self._simple_prefixes: path = url.split("?")[0] # if any(path.startswith(p) for p in self._simple_prefixes): # result = True # self._update_stats(result) # return not result if self._reverse else result #### # Modified the prefix matching logic to ensure path boundary checking: # - Check if the matched prefix is followed by a path separator (`/`), query parameter (`?`), fragment (`#`), or is at the end of the path # - This ensures `/api/` only matches complete path segments, not substrings like `/apiv2/` #### for prefix in self._simple_prefixes: if path.startswith(prefix): if len(path) == len(prefix) or path[len(prefix)] in ['/', '?', '#']: result = True self._update_stats(result) return not result if self._reverse else result # Complex patterns if self._path_patterns: if any(p.search(url) for p in self._path_patterns): result = True self._update_stats(result) return not result if self._reverse else result result = False self._update_stats(result) return not result if self._reverse else result class ContentTypeFilter(URLFilter): """Optimized content type filter using fast lookups""" __slots__ = ("allowed_types", "_ext_map", "_check_extension") # Fast extension to mime type mapping _MIME_MAP = { # Text Formats "txt": "text/plain", "html": "text/html", "htm": "text/html", "xhtml": "application/xhtml+xml", "css": "text/css", "csv": "text/csv", "ics": "text/calendar", "js": "application/javascript", # Images "bmp": "image/bmp", "gif": "image/gif", "jpeg": "image/jpeg", "jpg": "image/jpeg", "png": "image/png", "svg": "image/svg+xml", "tiff": "image/tiff", "ico": "image/x-icon", "webp": "image/webp", # Audio "mp3": "audio/mpeg", "wav": "audio/wav", "ogg": "audio/ogg", "m4a": "audio/mp4", "aac": "audio/aac", # Video "mp4": "video/mp4", "mpeg": "video/mpeg", "webm": "video/webm", "avi": "video/x-msvideo", "mov": "video/quicktime", "flv": "video/x-flv", "wmv": "video/x-ms-wmv", "mkv": "video/x-matroska", # Applications "json": "application/json", "xml": "application/xml", "pdf": "application/pdf", "zip": "application/zip", "gz": "application/gzip", "tar": "application/x-tar", "rar": "application/vnd.rar", "7z": "application/x-7z-compressed", "exe": "application/vnd.microsoft.portable-executable", "msi": "application/x-msdownload", # Fonts "woff": "font/woff", "woff2": "font/woff2", "ttf": "font/ttf", "otf": "font/otf", # Microsoft Office "doc": "application/msword", "dot": "application/msword", "docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document", "xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", "xls": "application/vnd.ms-excel", "ppt": "application/vnd.ms-powerpoint", "pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation", # OpenDocument Formats "odt": "application/vnd.oasis.opendocument.text", "ods": "application/vnd.oasis.opendocument.spreadsheet", "odp": "application/vnd.oasis.opendocument.presentation", # Archives "tar.gz": "application/gzip", "tgz": "application/gzip", "bz2": "application/x-bzip2", # Others "rtf": "application/rtf", "apk": "application/vnd.android.package-archive", "epub": "application/epub+zip", "jar": "application/java-archive", "swf": "application/x-shockwave-flash", "midi": "audio/midi", "mid": "audio/midi", "ps": "application/postscript", "ai": "application/postscript", "eps": "application/postscript", # Custom or less common "bin": "application/octet-stream", "dmg": "application/x-apple-diskimage", "iso": "application/x-iso9660-image", "deb": "application/x-debian-package", "rpm": "application/x-rpm", "sqlite": "application/vnd.sqlite3", # Placeholder "unknown": "application/octet-stream", # Fallback for unknown file types # php "php": "application/x-httpd-php", "php3": "application/x-httpd-php", "php4": "application/x-httpd-php", "php5": "application/x-httpd-php", "php7": "application/x-httpd-php", "phtml": "application/x-httpd-php", "phps": "application/x-httpd-php-source", } @staticmethod @lru_cache(maxsize=1000) def _extract_extension(url: str) -> str: """Extracts file extension from a URL.""" # Remove scheme (http://, https://) if present if "://" in url: url = url.split("://", 1)[-1] # Get everything after '://' # Remove domain (everything up to the first '/') path_start = url.find("/") path = url[path_start:] if path_start != -1 else "" # Extract last filename in path filename = path.rsplit("/", 1)[-1] if "/" in path else "" # Extract and validate extension if "." not in filename: return "" return filename.rpartition(".")[-1].lower() def __init__( self, allowed_types: Union[str, List[str]], check_extension: bool = True, ext_map: Dict[str, str] = _MIME_MAP, ): super().__init__() # Normalize and store as frozenset for fast lookup self.allowed_types = frozenset( t.lower() for t in ( allowed_types if isinstance(allowed_types, list) else [allowed_types] ) ) self._check_extension = check_extension # Pre-compute extension map for allowed types self._ext_map = frozenset( ext for ext, mime in self._MIME_MAP.items() if any(allowed in mime for allowed in self.allowed_types) ) @lru_cache(maxsize=1000) def _check_url_cached(self, url: str) -> bool: """Cached URL checking""" if not self._check_extension: return True ext = self._extract_extension(url) if not ext: return True return ext in self._ext_map def apply(self, url: str) -> bool: """Fast extension check with caching""" result = self._check_url_cached(url) self._update_stats(result) return result class DomainFilter(URLFilter): """Optimized domain filter with fast lookups and caching""" __slots__ = ("_allowed_domains", "_blocked_domains", "_domain_cache") # Regex for fast domain extraction _DOMAIN_REGEX = re.compile(r"://([^/]+)") def __init__( self, allowed_domains: Union[str, List[str]] = None, blocked_domains: Union[str, List[str]] = None, ): super().__init__() # Convert inputs to frozensets for immutable, fast lookups self._allowed_domains = ( frozenset(self._normalize_domains(allowed_domains)) if allowed_domains else None ) self._blocked_domains = ( frozenset(self._normalize_domains(blocked_domains)) if blocked_domains else frozenset() ) @staticmethod def _normalize_domains(domains: Union[str, List[str]]) -> Set[str]: """Fast domain normalization""" if isinstance(domains, str): return {domains.lower()} return {d.lower() for d in domains} @staticmethod def _is_subdomain(domain: str, parent_domain: str) -> bool: """Check if domain is a subdomain of parent_domain""" return domain == parent_domain or domain.endswith(f".{parent_domain}") @staticmethod @lru_cache(maxsize=10000) def _extract_domain(url: str) -> str: """Ultra-fast domain extraction with regex and caching""" match = DomainFilter._DOMAIN_REGEX.search(url) return match.group(1).lower() if match else "" def apply(self, url: str) -> bool: """Optimized domain checking with early returns""" # Skip processing if no filters if not self._blocked_domains and self._allowed_domains is None: self._update_stats(True) return True domain = self._extract_domain(url) # Check for blocked domains, including subdomains for blocked in self._blocked_domains: if self._is_subdomain(domain, blocked): self._update_stats(False) return False # If no allowed domains specified, accept all non-blocked if self._allowed_domains is None: self._update_stats(True) return True # Check if domain matches any allowed domain (including subdomains) for allowed in self._allowed_domains: if self._is_subdomain(domain, allowed): self._update_stats(True) return True # No matches found self._update_stats(False) return False class ContentRelevanceFilter(URLFilter): """BM25-based relevance filter using head section content""" __slots__ = ("query_terms", "threshold", "k1", "b", "avgdl", "query") def __init__( self, query: Union[str, List[str]], threshold: float, k1: float = 1.2, b: float = 0.75, avgdl: int = 1000, ): super().__init__(name="BM25RelevanceFilter") if isinstance(query, list): self.query = " ".join(query) else: self.query = query self.query_terms = self._tokenize(self.query) self.threshold = threshold self.k1 = k1 # TF saturation parameter self.b = b # Length normalization parameter self.avgdl = avgdl # Average document length (empirical value) async def apply(self, url: str) -> bool: head_content = await HeadPeekr.peek_html(url) if not head_content: self._update_stats(False) return False # Field extraction with weighting fields = { "title": HeadPeekr.get_title(head_content) or "", "meta": HeadPeekr.extract_meta_tags(head_content), } doc_text = self._build_document(fields) score = self._bm25(doc_text) decision = score >= self.threshold self._update_stats(decision) return decision def _build_document(self, fields: Dict) -> str: """Weighted document construction""" return " ".join( [ fields["title"] * 3, # Title weight fields["meta"].get("description", "") * 2, fields["meta"].get("keywords", ""), " ".join(fields["meta"].values()), ] ) def _tokenize(self, text: str) -> List[str]: """Fast case-insensitive tokenization""" return text.lower().split() def _bm25(self, document: str) -> float: """Optimized BM25 implementation for head sections""" doc_terms = self._tokenize(document) doc_len = len(doc_terms) tf = defaultdict(int) for term in doc_terms: tf[term] += 1 score = 0.0 for term in set(self.query_terms): term_freq = tf[term] idf = math.log((1 + 1) / (term_freq + 0.5) + 1) # Simplified IDF numerator = term_freq * (self.k1 + 1) denominator = term_freq + self.k1 * ( 1 - self.b + self.b * (doc_len / self.avgdl) ) score += idf * (numerator / denominator) return score class SEOFilter(URLFilter): """Quantitative SEO quality assessment filter using head section analysis""" __slots__ = ("threshold", "_weights", "_kw_patterns") # Based on SEMrush/Google ranking factors research DEFAULT_WEIGHTS = { "title_length": 0.15, "title_kw": 0.18, "meta_description": 0.12, "canonical": 0.10, "robot_ok": 0.20, # Most critical factor "schema_org": 0.10, "url_quality": 0.15, } def __init__( self, threshold: float = 0.65, keywords: List[str] = None, weights: Dict[str, float] = None, ): super().__init__(name="SEOFilter") self.threshold = threshold self._weights = weights or self.DEFAULT_WEIGHTS self._kw_patterns = ( re.compile( r"\b({})\b".format("|".join(map(re.escape, keywords or []))), re.I ) if keywords else None ) async def apply(self, url: str) -> bool: head_content = await HeadPeekr.peek_html(url) if not head_content: self._update_stats(False) return False meta = HeadPeekr.extract_meta_tags(head_content) title = HeadPeekr.get_title(head_content) or "" parsed_url = urlparse(url) scores = { "title_length": self._score_title_length(title), "title_kw": self._score_keyword_presence(title), "meta_description": self._score_meta_description( meta.get("description", "") ), "canonical": self._score_canonical(meta.get("canonical"), url), "robot_ok": 1.0 if "noindex" not in meta.get("robots", "") else 0.0, "schema_org": self._score_schema_org(head_content), "url_quality": self._score_url_quality(parsed_url), } total_score = sum( weight * scores[factor] for factor, weight in self._weights.items() ) decision = total_score >= self.threshold self._update_stats(decision) return decision def _score_title_length(self, title: str) -> float: length = len(title) if 50 <= length <= 60: return 1.0 if 40 <= length < 50 or 60 < length <= 70: return 0.7 return 0.3 # Poor length def _score_keyword_presence(self, text: str) -> float: if not self._kw_patterns: return 0.0 matches = len(self._kw_patterns.findall(text)) return min(matches * 0.3, 1.0) # Max 3 matches def _score_meta_description(self, desc: str) -> float: length = len(desc) if 140 <= length <= 160: return 1.0 return 0.5 if 120 <= length <= 200 else 0.2 def _score_canonical(self, canonical: str, original: str) -> float: if not canonical: return 0.5 # Neutral score return 1.0 if canonical == original else 0.2 def _score_schema_org(self, html: str) -> float: # Detect any schema.org markup in head return ( 1.0 if re.search(r'<script[^>]+type=["\']application/ld\+json', html) else 0.0 ) def _score_url_quality(self, parsed_url) -> float: score = 1.0 path = parsed_url.path.lower() # Penalty factors if len(path) > 80: score *= 0.7 if re.search(r"\d{4}", path): score *= 0.8 # Numbers in path if parsed_url.query: score *= 0.6 # URL parameters if "_" in path: score *= 0.9 # Underscores vs hyphens return score
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/deep_crawling/__init__.py
crawl4ai/deep_crawling/__init__.py
# deep_crawling/__init__.py from .base_strategy import DeepCrawlDecorator, DeepCrawlStrategy from .bfs_strategy import BFSDeepCrawlStrategy from .bff_strategy import BestFirstCrawlingStrategy from .dfs_strategy import DFSDeepCrawlStrategy from .filters import ( FilterChain, ContentTypeFilter, DomainFilter, URLFilter, URLPatternFilter, FilterStats, ContentRelevanceFilter, SEOFilter ) from .scorers import ( KeywordRelevanceScorer, URLScorer, CompositeScorer, DomainAuthorityScorer, FreshnessScorer, PathDepthScorer, ContentTypeScorer ) __all__ = [ "DeepCrawlDecorator", "DeepCrawlStrategy", "BFSDeepCrawlStrategy", "BestFirstCrawlingStrategy", "DFSDeepCrawlStrategy", "FilterChain", "ContentTypeFilter", "DomainFilter", "URLFilter", "URLPatternFilter", "FilterStats", "ContentRelevanceFilter", "SEOFilter", "KeywordRelevanceScorer", "URLScorer", "CompositeScorer", "DomainAuthorityScorer", "FreshnessScorer", "PathDepthScorer", "ContentTypeScorer", ]
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/deep_crawling/bff_strategy.py
crawl4ai/deep_crawling/bff_strategy.py
# best_first_crawling_strategy.py import asyncio import logging from datetime import datetime from typing import AsyncGenerator, Optional, Set, Dict, List, Tuple from urllib.parse import urlparse from ..models import TraversalStats from .filters import FilterChain from .scorers import URLScorer from . import DeepCrawlStrategy from ..types import AsyncWebCrawler, CrawlerRunConfig, CrawlResult, RunManyReturn from ..utils import normalize_url_for_deep_crawl from math import inf as infinity # Configurable batch size for processing items from the priority queue BATCH_SIZE = 10 class BestFirstCrawlingStrategy(DeepCrawlStrategy): """ Best-First Crawling Strategy using a priority queue. This strategy prioritizes URLs based on their score, ensuring that higher-value pages are crawled first. It reimplements the core traversal loop to use a priority queue while keeping URL validation and link discovery consistent with our design. Core methods: - arun: Returns either a list (batch mode) or an async generator (stream mode). - _arun_best_first: Core generator that uses a priority queue to yield CrawlResults. - can_process_url: Validates URLs and applies filtering (inherited behavior). - link_discovery: Extracts and validates links from a CrawlResult. """ def __init__( self, max_depth: int, filter_chain: FilterChain = FilterChain(), url_scorer: Optional[URLScorer] = None, include_external: bool = False, max_pages: int = infinity, logger: Optional[logging.Logger] = None, ): self.max_depth = max_depth self.filter_chain = filter_chain self.url_scorer = url_scorer self.include_external = include_external self.max_pages = max_pages # self.logger = logger or logging.getLogger(__name__) # Ensure logger is always a Logger instance, not a dict from serialization if isinstance(logger, logging.Logger): self.logger = logger else: # Create a new logger if logger is None, dict, or any other non-Logger type self.logger = logging.getLogger(__name__) self.stats = TraversalStats(start_time=datetime.now()) self._cancel_event = asyncio.Event() self._pages_crawled = 0 async def can_process_url(self, url: str, depth: int) -> bool: """ Validate the URL format and apply filtering. For the starting URL (depth 0), filtering is bypassed. """ try: parsed = urlparse(url) if not parsed.scheme or not parsed.netloc: raise ValueError("Missing scheme or netloc") if parsed.scheme not in ("http", "https"): raise ValueError("Invalid scheme") if "." not in parsed.netloc: raise ValueError("Invalid domain") except Exception as e: self.logger.warning(f"Invalid URL: {url}, error: {e}") return False if depth != 0 and not await self.filter_chain.apply(url): return False return True async def link_discovery( self, result: CrawlResult, source_url: str, current_depth: int, visited: Set[str], next_links: List[Tuple[str, Optional[str]]], depths: Dict[str, int], ) -> None: """ Extract links from the crawl result, validate them, and append new URLs (with their parent references) to next_links. Also updates the depths dictionary. """ new_depth = current_depth + 1 if new_depth > self.max_depth: return # If we've reached the max pages limit, don't discover new links remaining_capacity = self.max_pages - self._pages_crawled if remaining_capacity <= 0: self.logger.info(f"Max pages limit ({self.max_pages}) reached, stopping link discovery") return # Retrieve internal links; include external links if enabled. links = result.links.get("internal", []) if self.include_external: links += result.links.get("external", []) # If we have more links than remaining capacity, limit how many we'll process valid_links = [] for link in links: url = link.get("href") base_url = normalize_url_for_deep_crawl(url, source_url) if base_url in visited: continue if not await self.can_process_url(url, new_depth): self.stats.urls_skipped += 1 continue valid_links.append(base_url) # Record the new depths and add to next_links for url in valid_links: depths[url] = new_depth next_links.append((url, source_url)) async def _arun_best_first( self, start_url: str, crawler: AsyncWebCrawler, config: CrawlerRunConfig, ) -> AsyncGenerator[CrawlResult, None]: """ Core best-first crawl method using a priority queue. The queue items are tuples of (score, depth, url, parent_url). Lower scores are treated as higher priority. URLs are processed in batches for efficiency. """ queue: asyncio.PriorityQueue = asyncio.PriorityQueue() # Push the initial URL with score 0 and depth 0. initial_score = self.url_scorer.score(start_url) if self.url_scorer else 0 await queue.put((-initial_score, 0, start_url, None)) visited: Set[str] = set() depths: Dict[str, int] = {start_url: 0} while not queue.empty() and not self._cancel_event.is_set(): # Stop if we've reached the max pages limit if self._pages_crawled >= self.max_pages: self.logger.info(f"Max pages limit ({self.max_pages}) reached, stopping crawl") break # Calculate how many more URLs we can process in this batch remaining = self.max_pages - self._pages_crawled batch_size = min(BATCH_SIZE, remaining) if batch_size <= 0: # No more pages to crawl self.logger.info(f"Max pages limit ({self.max_pages}) reached, stopping crawl") break batch: List[Tuple[float, int, str, Optional[str]]] = [] # Retrieve up to BATCH_SIZE items from the priority queue. for _ in range(BATCH_SIZE): if queue.empty(): break item = await queue.get() score, depth, url, parent_url = item if url in visited: continue visited.add(url) batch.append(item) if not batch: continue # Process the current batch of URLs. urls = [item[2] for item in batch] batch_config = config.clone(deep_crawl_strategy=None, stream=True) stream_gen = await crawler.arun_many(urls=urls, config=batch_config) async for result in stream_gen: result_url = result.url # Find the corresponding tuple from the batch. corresponding = next((item for item in batch if item[2] == result_url), None) if not corresponding: continue score, depth, url, parent_url = corresponding result.metadata = result.metadata or {} result.metadata["depth"] = depth result.metadata["parent_url"] = parent_url result.metadata["score"] = -score # Count only successful crawls toward max_pages limit if result.success: self._pages_crawled += 1 # Check if we've reached the limit during batch processing if self._pages_crawled >= self.max_pages: self.logger.info(f"Max pages limit ({self.max_pages}) reached during batch, stopping crawl") break # Exit the generator yield result # Only discover links from successful crawls if result.success: # Discover new links from this result new_links: List[Tuple[str, Optional[str]]] = [] await self.link_discovery(result, result_url, depth, visited, new_links, depths) for new_url, new_parent in new_links: new_depth = depths.get(new_url, depth + 1) new_score = self.url_scorer.score(new_url) if self.url_scorer else 0 await queue.put((-new_score, new_depth, new_url, new_parent)) # End of crawl. async def _arun_batch( self, start_url: str, crawler: AsyncWebCrawler, config: CrawlerRunConfig, ) -> List[CrawlResult]: """ Best-first crawl in batch mode. Aggregates all CrawlResults into a list. """ results: List[CrawlResult] = [] async for result in self._arun_best_first(start_url, crawler, config): results.append(result) return results async def _arun_stream( self, start_url: str, crawler: AsyncWebCrawler, config: CrawlerRunConfig, ) -> AsyncGenerator[CrawlResult, None]: """ Best-first crawl in streaming mode. Yields CrawlResults as they become available. """ async for result in self._arun_best_first(start_url, crawler, config): yield result async def arun( self, start_url: str, crawler: AsyncWebCrawler, config: Optional[CrawlerRunConfig] = None, ) -> "RunManyReturn": """ Main entry point for best-first crawling. Returns either a list (batch mode) or an async generator (stream mode) of CrawlResults. """ if config is None: raise ValueError("CrawlerRunConfig must be provided") if config.stream: return self._arun_stream(start_url, crawler, config) else: return await self._arun_batch(start_url, crawler, config) async def shutdown(self) -> None: """ Signal cancellation and clean up resources. """ self._cancel_event.set() self.stats.end_time = datetime.now()
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/components/crawler_monitor.py
crawl4ai/components/crawler_monitor.py
import time import uuid import threading import psutil from datetime import datetime, timedelta from typing import Dict, Optional, List import threading from rich.console import Console from rich.layout import Layout from rich.panel import Panel from rich.table import Table from rich.text import Text from rich.live import Live from rich import box from ..models import CrawlStatus class TerminalUI: """Terminal user interface for CrawlerMonitor using rich library.""" def __init__(self, refresh_rate: float = 1.0, max_width: int = 120): """ Initialize the terminal UI. Args: refresh_rate: How often to refresh the UI (in seconds) max_width: Maximum width of the UI in characters """ self.console = Console(width=max_width) self.layout = Layout() self.refresh_rate = refresh_rate self.stop_event = threading.Event() self.ui_thread = None self.monitor = None # Will be set by CrawlerMonitor self.max_width = max_width # Setup layout - vertical layout (top to bottom) self.layout.split( Layout(name="header", size=3), Layout(name="pipeline_status", size=10), Layout(name="task_details", ratio=1), Layout(name="footer", size=3) # Increased footer size to fit all content ) def start(self, monitor): """Start the UI thread.""" self.monitor = monitor self.stop_event.clear() self.ui_thread = threading.Thread(target=self._ui_loop) self.ui_thread.daemon = True self.ui_thread.start() def stop(self): """Stop the UI thread.""" if self.ui_thread and self.ui_thread.is_alive(): self.stop_event.set() # Only try to join if we're not in the UI thread # This prevents "cannot join current thread" errors if threading.current_thread() != self.ui_thread: self.ui_thread.join(timeout=5.0) def _ui_loop(self): """Main UI rendering loop.""" import sys import select import termios import tty # Setup terminal for non-blocking input old_settings = termios.tcgetattr(sys.stdin) try: tty.setcbreak(sys.stdin.fileno()) # Use Live display to render the UI with Live(self.layout, refresh_per_second=1/self.refresh_rate, screen=True) as live: self.live = live # Store the live display for updates # Main UI loop while not self.stop_event.is_set(): self._update_display() # Check for key press (non-blocking) if select.select([sys.stdin], [], [], 0)[0]: key = sys.stdin.read(1) # Check for 'q' to quit if key == 'q': # Signal stop but don't call monitor.stop() from UI thread # as it would cause the thread to try to join itself self.stop_event.set() self.monitor.is_running = False break time.sleep(self.refresh_rate) # Just check if the monitor was stopped if not self.monitor.is_running: break finally: # Restore terminal settings termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_settings) def _update_display(self): """Update the terminal display with current statistics.""" if not self.monitor: return # Update crawler status panel self.layout["header"].update(self._create_status_panel()) # Update pipeline status panel and task details panel self.layout["pipeline_status"].update(self._create_pipeline_panel()) self.layout["task_details"].update(self._create_task_details_panel()) # Update footer self.layout["footer"].update(self._create_footer()) def _create_status_panel(self) -> Panel: """Create the crawler status panel.""" summary = self.monitor.get_summary() # Format memory status with icon memory_status = self.monitor.get_memory_status() memory_icon = "🟒" # Default NORMAL if memory_status == "PRESSURE": memory_icon = "🟠" elif memory_status == "CRITICAL": memory_icon = "πŸ”΄" # Get current memory usage current_memory = psutil.Process().memory_info().rss / (1024 * 1024) # MB memory_percent = (current_memory / psutil.virtual_memory().total) * 100 # Format runtime runtime = self.monitor._format_time(time.time() - self.monitor.start_time if self.monitor.start_time else 0) # Create the status text status_text = Text() status_text.append(f"Web Crawler Dashboard | Runtime: {runtime} | Memory: {memory_percent:.1f}% {memory_icon}\n") status_text.append(f"Status: {memory_status} | URLs: {summary['urls_completed']}/{summary['urls_total']} | ") status_text.append(f"Peak Mem: {summary['peak_memory_percent']:.1f}% at {self.monitor._format_time(summary['peak_memory_time'])}") return Panel(status_text, title="Crawler Status", border_style="blue") def _create_pipeline_panel(self) -> Panel: """Create the pipeline status panel.""" summary = self.monitor.get_summary() queue_stats = self.monitor.get_queue_stats() # Create a table for status counts table = Table(show_header=True, box=None) table.add_column("Status", style="cyan") table.add_column("Count", justify="right") table.add_column("Percentage", justify="right") table.add_column("Stat", style="cyan") table.add_column("Value", justify="right") # Calculate overall progress progress = f"{summary['urls_completed']}/{summary['urls_total']}" progress_percent = f"{summary['completion_percentage']:.1f}%" # Add rows for each status table.add_row( "Overall Progress", progress, progress_percent, "Est. Completion", summary.get('estimated_completion_time', "N/A") ) # Add rows for each status status_counts = summary['status_counts'] total = summary['urls_total'] or 1 # Avoid division by zero # Status rows table.add_row( "Completed", str(status_counts.get(CrawlStatus.COMPLETED.name, 0)), f"{status_counts.get(CrawlStatus.COMPLETED.name, 0) / total * 100:.1f}%", "Avg. Time/URL", f"{summary.get('avg_task_duration', 0):.2f}s" ) table.add_row( "Failed", str(status_counts.get(CrawlStatus.FAILED.name, 0)), f"{status_counts.get(CrawlStatus.FAILED.name, 0) / total * 100:.1f}%", "Concurrent Tasks", str(status_counts.get(CrawlStatus.IN_PROGRESS.name, 0)) ) table.add_row( "In Progress", str(status_counts.get(CrawlStatus.IN_PROGRESS.name, 0)), f"{status_counts.get(CrawlStatus.IN_PROGRESS.name, 0) / total * 100:.1f}%", "Queue Size", str(queue_stats['total_queued']) ) table.add_row( "Queued", str(status_counts.get(CrawlStatus.QUEUED.name, 0)), f"{status_counts.get(CrawlStatus.QUEUED.name, 0) / total * 100:.1f}%", "Max Wait Time", f"{queue_stats['highest_wait_time']:.1f}s" ) # Requeued is a special case as it's not a status requeued_count = summary.get('requeued_count', 0) table.add_row( "Requeued", str(requeued_count), f"{summary.get('requeue_rate', 0):.1f}%", "Avg Wait Time", f"{queue_stats['avg_wait_time']:.1f}s" ) # Add empty row for spacing table.add_row( "", "", "", "Requeue Rate", f"{summary.get('requeue_rate', 0):.1f}%" ) return Panel(table, title="Pipeline Status", border_style="green") def _create_task_details_panel(self) -> Panel: """Create the task details panel.""" # Create a table for task details table = Table(show_header=True, expand=True) table.add_column("Task ID", style="cyan", no_wrap=True, width=10) table.add_column("URL", style="blue", ratio=3) table.add_column("Status", style="green", width=15) table.add_column("Memory", justify="right", width=8) table.add_column("Peak", justify="right", width=8) table.add_column("Duration", justify="right", width=10) # Get all task stats task_stats = self.monitor.get_all_task_stats() # Add summary row active_tasks = sum(1 for stats in task_stats.values() if stats['status'] == CrawlStatus.IN_PROGRESS.name) total_memory = sum(stats['memory_usage'] for stats in task_stats.values()) total_peak = sum(stats['peak_memory'] for stats in task_stats.values()) # Summary row with separators table.add_row( "SUMMARY", f"Total: {len(task_stats)}", f"Active: {active_tasks}", f"{total_memory:.1f}", f"{total_peak:.1f}", "N/A" ) # Add a separator table.add_row("β€”" * 10, "β€”" * 20, "β€”" * 10, "β€”" * 8, "β€”" * 8, "β€”" * 10) # Status icons status_icons = { CrawlStatus.QUEUED.name: "⏳", CrawlStatus.IN_PROGRESS.name: "πŸ”„", CrawlStatus.COMPLETED.name: "βœ…", CrawlStatus.FAILED.name: "❌" } # Calculate how many rows we can display based on available space # We can display more rows now that we have a dedicated panel display_count = min(len(task_stats), 20) # Display up to 20 tasks # Add rows for each task for task_id, stats in sorted( list(task_stats.items())[:display_count], # Sort: 1. IN_PROGRESS first, 2. QUEUED, 3. COMPLETED/FAILED by recency key=lambda x: ( 0 if x[1]['status'] == CrawlStatus.IN_PROGRESS.name else 1 if x[1]['status'] == CrawlStatus.QUEUED.name else 2, -1 * (x[1].get('end_time', 0) or 0) # Most recent first ) ): # Truncate task_id and URL for display short_id = task_id[:8] url = stats['url'] if len(url) > 50: # Allow longer URLs in the dedicated panel url = url[:47] + "..." # Format status with icon status = f"{status_icons.get(stats['status'], '?')} {stats['status']}" # Add row table.add_row( short_id, url, status, f"{stats['memory_usage']:.1f}", f"{stats['peak_memory']:.1f}", stats['duration'] if 'duration' in stats else "0:00" ) return Panel(table, title="Task Details", border_style="yellow") def _create_footer(self) -> Panel: """Create the footer panel.""" from rich.columns import Columns from rich.align import Align memory_status = self.monitor.get_memory_status() memory_icon = "🟒" # Default NORMAL if memory_status == "PRESSURE": memory_icon = "🟠" elif memory_status == "CRITICAL": memory_icon = "πŸ”΄" # Left section - memory status left_text = Text() left_text.append("Memory Status: ", style="bold") status_style = "green" if memory_status == "NORMAL" else "yellow" if memory_status == "PRESSURE" else "red bold" left_text.append(f"{memory_icon} {memory_status}", style=status_style) # Center section - copyright center_text = Text("Β© Crawl4AI 2025 | Made by UnclecCode", style="cyan italic") # Right section - quit instruction right_text = Text() right_text.append("Press ", style="bold") right_text.append("q", style="white on blue") right_text.append(" to quit", style="bold") # Create columns with the three sections footer_content = Columns( [ Align.left(left_text), Align.center(center_text), Align.right(right_text) ], expand=True ) # Create a more visible footer panel return Panel( footer_content, border_style="white", padding=(0, 1) # Add padding for better visibility ) class CrawlerMonitor: """ Comprehensive monitoring and visualization system for tracking web crawler operations in real-time. Provides a terminal-based dashboard that displays task statuses, memory usage, queue statistics, and performance metrics. """ def __init__( self, urls_total: int = 0, refresh_rate: float = 1.0, enable_ui: bool = True, max_width: int = 120 ): """ Initialize the CrawlerMonitor. Args: urls_total: Total number of URLs to be crawled refresh_rate: How often to refresh the UI (in seconds) enable_ui: Whether to display the terminal UI max_width: Maximum width of the UI in characters """ # Core monitoring attributes self.stats = {} # Task ID -> stats dict self.memory_status = "NORMAL" self.start_time = None self.end_time = None self.is_running = False self.queue_stats = { "total_queued": 0, "highest_wait_time": 0.0, "avg_wait_time": 0.0 } self.urls_total = urls_total self.urls_completed = 0 self.peak_memory_percent = 0.0 self.peak_memory_time = 0.0 # Status counts self.status_counts = { CrawlStatus.QUEUED.name: 0, CrawlStatus.IN_PROGRESS.name: 0, CrawlStatus.COMPLETED.name: 0, CrawlStatus.FAILED.name: 0 } # Requeue tracking self.requeued_count = 0 # Thread-safety self._lock = threading.RLock() # Terminal UI self.enable_ui = enable_ui self.terminal_ui = TerminalUI( refresh_rate=refresh_rate, max_width=max_width ) if enable_ui else None def start(self): """ Start the monitoring session. - Initializes the start_time - Sets is_running to True - Starts the terminal UI if enabled """ with self._lock: self.start_time = time.time() self.is_running = True # Start the terminal UI if self.enable_ui and self.terminal_ui: self.terminal_ui.start(self) def stop(self): """ Stop the monitoring session. - Records end_time - Sets is_running to False - Stops the terminal UI - Generates final summary statistics """ with self._lock: self.end_time = time.time() self.is_running = False # Stop the terminal UI if self.enable_ui and self.terminal_ui: self.terminal_ui.stop() def add_task(self, task_id: str, url: str): """ Register a new task with the monitor. Args: task_id: Unique identifier for the task url: URL being crawled The task is initialized with: - status: QUEUED - url: The URL to crawl - enqueue_time: Current time - memory_usage: 0 - peak_memory: 0 - wait_time: 0 - retry_count: 0 """ with self._lock: self.stats[task_id] = { "task_id": task_id, "url": url, "status": CrawlStatus.QUEUED.name, "enqueue_time": time.time(), "start_time": None, "end_time": None, "memory_usage": 0.0, "peak_memory": 0.0, "error_message": "", "wait_time": 0.0, "retry_count": 0, "duration": "0:00", "counted_requeue": False } # Update status counts self.status_counts[CrawlStatus.QUEUED.name] += 1 def update_task( self, task_id: str, status: Optional[CrawlStatus] = None, start_time: Optional[float] = None, end_time: Optional[float] = None, memory_usage: Optional[float] = None, peak_memory: Optional[float] = None, error_message: Optional[str] = None, retry_count: Optional[int] = None, wait_time: Optional[float] = None ): """ Update statistics for a specific task. Args: task_id: Unique identifier for the task status: New status (QUEUED, IN_PROGRESS, COMPLETED, FAILED) start_time: When task execution started end_time: When task execution ended memory_usage: Current memory usage in MB peak_memory: Maximum memory usage in MB error_message: Error description if failed retry_count: Number of retry attempts wait_time: Time spent in queue Updates task statistics and updates status counts. If status changes, decrements old status count and increments new status count. """ with self._lock: # Check if task exists if task_id not in self.stats: return task_stats = self.stats[task_id] # Update status counts if status is changing old_status = task_stats["status"] if status and status.name != old_status: self.status_counts[old_status] -= 1 self.status_counts[status.name] += 1 # Track completion if status == CrawlStatus.COMPLETED: self.urls_completed += 1 # Track requeues if old_status in [CrawlStatus.COMPLETED.name, CrawlStatus.FAILED.name] and not task_stats.get("counted_requeue", False): self.requeued_count += 1 task_stats["counted_requeue"] = True # Update task statistics if status: task_stats["status"] = status.name if start_time is not None: task_stats["start_time"] = start_time if end_time is not None: task_stats["end_time"] = end_time if memory_usage is not None: task_stats["memory_usage"] = memory_usage # Update peak memory if necessary current_percent = (memory_usage / psutil.virtual_memory().total) * 100 if current_percent > self.peak_memory_percent: self.peak_memory_percent = current_percent self.peak_memory_time = time.time() if peak_memory is not None: task_stats["peak_memory"] = peak_memory if error_message is not None: task_stats["error_message"] = error_message if retry_count is not None: task_stats["retry_count"] = retry_count if wait_time is not None: task_stats["wait_time"] = wait_time # Calculate duration if task_stats["start_time"]: end = task_stats["end_time"] or time.time() duration = end - task_stats["start_time"] task_stats["duration"] = self._format_time(duration) def update_memory_status(self, status: str): """ Update the current memory status. Args: status: Memory status (NORMAL, PRESSURE, CRITICAL, or custom) Also updates the UI to reflect the new status. """ with self._lock: self.memory_status = status def update_queue_statistics( self, total_queued: int, highest_wait_time: float, avg_wait_time: float ): """ Update statistics related to the task queue. Args: total_queued: Number of tasks currently in queue highest_wait_time: Longest wait time of any queued task avg_wait_time: Average wait time across all queued tasks """ with self._lock: self.queue_stats = { "total_queued": total_queued, "highest_wait_time": highest_wait_time, "avg_wait_time": avg_wait_time } def get_task_stats(self, task_id: str) -> Dict: """ Get statistics for a specific task. Args: task_id: Unique identifier for the task Returns: Dictionary containing all task statistics """ with self._lock: return self.stats.get(task_id, {}).copy() def get_all_task_stats(self) -> Dict[str, Dict]: """ Get statistics for all tasks. Returns: Dictionary mapping task_ids to their statistics """ with self._lock: return self.stats.copy() def get_memory_status(self) -> str: """ Get the current memory status. Returns: Current memory status string """ with self._lock: return self.memory_status def get_queue_stats(self) -> Dict: """ Get current queue statistics. Returns: Dictionary with queue statistics including: - total_queued: Number of tasks in queue - highest_wait_time: Longest wait time - avg_wait_time: Average wait time """ with self._lock: return self.queue_stats.copy() def get_summary(self) -> Dict: """ Get a summary of all crawler statistics. Returns: Dictionary containing: - runtime: Total runtime in seconds - urls_total: Total URLs to process - urls_completed: Number of completed URLs - completion_percentage: Percentage complete - status_counts: Count of tasks in each status - memory_status: Current memory status - peak_memory_percent: Highest memory usage - peak_memory_time: When peak memory occurred - avg_task_duration: Average task processing time - estimated_completion_time: Projected finish time - requeue_rate: Percentage of tasks requeued """ with self._lock: # Calculate runtime current_time = time.time() runtime = current_time - (self.start_time or current_time) # Calculate completion percentage completion_percentage = 0 if self.urls_total > 0: completion_percentage = (self.urls_completed / self.urls_total) * 100 # Calculate average task duration for completed tasks completed_tasks = [ task for task in self.stats.values() if task["status"] == CrawlStatus.COMPLETED.name and task.get("start_time") and task.get("end_time") ] avg_task_duration = 0 if completed_tasks: total_duration = sum(task["end_time"] - task["start_time"] for task in completed_tasks) avg_task_duration = total_duration / len(completed_tasks) # Calculate requeue rate requeue_rate = 0 if len(self.stats) > 0: requeue_rate = (self.requeued_count / len(self.stats)) * 100 # Calculate estimated completion time estimated_completion_time = "N/A" if avg_task_duration > 0 and self.urls_total > 0 and self.urls_completed > 0: remaining_tasks = self.urls_total - self.urls_completed estimated_seconds = remaining_tasks * avg_task_duration estimated_completion_time = self._format_time(estimated_seconds) return { "runtime": runtime, "urls_total": self.urls_total, "urls_completed": self.urls_completed, "completion_percentage": completion_percentage, "status_counts": self.status_counts.copy(), "memory_status": self.memory_status, "peak_memory_percent": self.peak_memory_percent, "peak_memory_time": self.peak_memory_time, "avg_task_duration": avg_task_duration, "estimated_completion_time": estimated_completion_time, "requeue_rate": requeue_rate, "requeued_count": self.requeued_count } def render(self): """ Render the terminal UI. This is the main UI rendering loop that: 1. Updates all statistics 2. Formats the display 3. Renders the ASCII interface 4. Handles keyboard input Note: The actual rendering is handled by the TerminalUI class which uses the rich library's Live display. """ if self.enable_ui and self.terminal_ui: # Force an update of the UI if hasattr(self.terminal_ui, '_update_display'): self.terminal_ui._update_display() def _format_time(self, seconds: float) -> str: """ Format time in hours:minutes:seconds. Args: seconds: Time in seconds Returns: Formatted time string (e.g., "1:23:45") """ delta = timedelta(seconds=int(seconds)) hours, remainder = divmod(delta.seconds, 3600) minutes, seconds = divmod(remainder, 60) if hours > 0: return f"{hours}:{minutes:02}:{seconds:02}" else: return f"{minutes}:{seconds:02}" def _calculate_estimated_completion(self) -> str: """ Calculate estimated completion time based on current progress. Returns: Formatted time string """ summary = self.get_summary() return summary.get("estimated_completion_time", "N/A") # Example code for testing if __name__ == "__main__": # Initialize the monitor monitor = CrawlerMonitor(urls_total=100) # Start monitoring monitor.start() try: # Simulate some tasks for i in range(20): task_id = str(uuid.uuid4()) url = f"https://example.com/page{i}" monitor.add_task(task_id, url) # Simulate 20% of tasks are already running if i < 4: monitor.update_task( task_id=task_id, status=CrawlStatus.IN_PROGRESS, start_time=time.time() - 30, # Started 30 seconds ago memory_usage=10.5 ) # Simulate 10% of tasks are completed if i >= 4 and i < 6: start_time = time.time() - 60 end_time = time.time() - 15 monitor.update_task( task_id=task_id, status=CrawlStatus.IN_PROGRESS, start_time=start_time, memory_usage=8.2 ) monitor.update_task( task_id=task_id, status=CrawlStatus.COMPLETED, end_time=end_time, memory_usage=0, peak_memory=15.7 ) # Simulate 5% of tasks fail if i >= 6 and i < 7: start_time = time.time() - 45 end_time = time.time() - 20 monitor.update_task( task_id=task_id, status=CrawlStatus.IN_PROGRESS, start_time=start_time, memory_usage=12.3 ) monitor.update_task( task_id=task_id, status=CrawlStatus.FAILED, end_time=end_time, memory_usage=0, peak_memory=18.2, error_message="Connection timeout" ) # Simulate memory pressure monitor.update_memory_status("PRESSURE") # Simulate queue statistics monitor.update_queue_statistics( total_queued=16, # 20 - 4 (in progress) highest_wait_time=120.5, avg_wait_time=60.2 ) # Keep the monitor running for a demonstration print("Crawler Monitor is running. Press 'q' to exit.") while monitor.is_running: time.sleep(0.1) except KeyboardInterrupt: print("\nExiting crawler monitor...") finally: # Stop the monitor monitor.stop() print("Crawler monitor exited successfully.")
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/crawlers/__init__.py
crawl4ai/crawlers/__init__.py
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/crawlers/google_search/__init__.py
crawl4ai/crawlers/google_search/__init__.py
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/crawlers/google_search/crawler.py
crawl4ai/crawlers/google_search/crawler.py
from crawl4ai import BrowserConfig, AsyncWebCrawler, CrawlerRunConfig, CacheMode from crawl4ai.hub import BaseCrawler from crawl4ai.utils import optimize_html, get_home_folder, preprocess_html_for_schema from crawl4ai import JsonCssExtractionStrategy from pathlib import Path import json import os from typing import Dict class GoogleSearchCrawler(BaseCrawler): __meta__ = { "version": "1.0.0", "tested_on": ["google.com/search*"], "rate_limit": "10 RPM", "description": "Crawls Google Search results (text + images)", } def __init__(self): super().__init__() self.js_script = (Path(__file__).parent / "script.js").read_text() async def run(self, url="", query: str = "", search_type: str = "text", schema_cache_path = None, **kwargs) -> str: """Crawl Google Search results for a query""" url = f"https://www.google.com/search?q={query}&gl=sg&hl=en" if search_type == "text" else f"https://www.google.com/search?q={query}&gl=sg&hl=en&tbs=qdr:d&udm=2" if kwargs.get("page_start", 1) > 1: url = f"{url}&start={kwargs['page_start'] * 10}" if kwargs.get("page_length", 1) > 1: url = f"{url}&num={kwargs['page_length']}" browser_config = BrowserConfig(headless=True, verbose=True) async with AsyncWebCrawler(config=browser_config) as crawler: config = CrawlerRunConfig( cache_mode=kwargs.get("cache_mode", CacheMode.BYPASS), keep_attrs=["id", "class"], keep_data_attributes=True, delay_before_return_html=kwargs.get( "delay", 2 if search_type == "image" else 1), js_code=self.js_script if search_type == "image" else None, ) result = await crawler.arun(url=url, config=config) if not result.success: return json.dumps({"error": result.error}) if search_type == "image": if result.js_execution_result.get("success", False) is False: return json.dumps({"error": result.js_execution_result.get("error", "Unknown error")}) if "results" in result.js_execution_result: image_result = result.js_execution_result['results'][0] if image_result.get("success", False) is False: return json.dumps({"error": image_result.get("error", "Unknown error")}) return json.dumps(image_result["result"], indent=4) # For text search, extract structured data schemas = await self._build_schemas(result.cleaned_html, schema_cache_path) extracted = { key: JsonCssExtractionStrategy(schema=schemas[key]).run( url=url, sections=[result.html] ) for key in schemas } return json.dumps(extracted, indent=4) async def _build_schemas(self, html: str, schema_cache_path: str = None) -> Dict[str, Dict]: """Build extraction schemas (organic, top stories, etc.)""" home_dir = get_home_folder() if not schema_cache_path else schema_cache_path os.makedirs(f"{home_dir}/schema", exist_ok=True) # cleaned_html = optimize_html(html, threshold=100) cleaned_html = preprocess_html_for_schema(html) organic_schema = None if os.path.exists(f"{home_dir}/schema/organic_schema.json"): with open(f"{home_dir}/schema/organic_schema.json", "r") as f: organic_schema = json.load(f) else: organic_schema = JsonCssExtractionStrategy.generate_schema( html=cleaned_html, target_json_example="""{ "title": "...", "link": "...", "snippet": "...", "date": "1 hour ago", }""", query="""The given html is the crawled html from Google search result. Please find the schema for organic search item in the given html, I am interested in title, link, snippet text. date.""" ) with open(f"{home_dir}/schema/organic_schema.json", "w") as f: f.write(json.dumps(organic_schema)) top_stories_schema = None if os.path.exists(f"{home_dir}/schema/top_stories_schema.json"): with open(f"{home_dir}/schema/top_stories_schema.json", "r") as f: top_stories_schema = json.load(f) else: top_stories_schema = JsonCssExtractionStrategy.generate_schema( html=cleaned_html, target_json_example="""{ "title": "...", "link": "...", "source": "Insider Monkey", "date": "1 hour ago", }""", query="""The given html is the crawled html from Google search result. Please find the schema for Top Story item int he given html, I am interested in title, link, source. date and imageUrl.""" ) with open(f"{home_dir}/schema/top_stories_schema.json", "w") as f: f.write(json.dumps(top_stories_schema)) suggested_query_schema = None if os.path.exists(f"{home_dir}/schema/suggested_query_schema.json"): with open(f"{home_dir}/schema/suggested_query_schema.json", "r") as f: suggested_query_schema = json.load(f) else: suggested_query_schema = JsonCssExtractionStrategy.generate_schema( html=cleaned_html, target_json_example="""{ "query": "A for Apple", }""", query="""The given HTML contains the crawled HTML from Google search results. Please find the schema for each suggested query in the section "People also search for" within the given HTML. I am interested in the queries only.""" ) with open(f"{home_dir}/schema/suggested_query_schema.json", "w") as f: f.write(json.dumps(suggested_query_schema)) return { "organic_schema": organic_schema, "top_stories_schema": top_stories_schema, "suggested_query_schema": suggested_query_schema, }
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/crawlers/amazon_product/__init__.py
crawl4ai/crawlers/amazon_product/__init__.py
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/crawlers/amazon_product/crawler.py
crawl4ai/crawlers/amazon_product/crawler.py
from crawl4ai.hub import BaseCrawler __meta__ = { "version": "1.2.0", "tested_on": ["amazon.com"], "rate_limit": "50 RPM", "schema": {"product": ["name", "price"]} } class AmazonProductCrawler(BaseCrawler): async def run(self, url: str, **kwargs) -> str: try: self.logger.info(f"Crawling {url}") return '{"product": {"name": "Test Amazon Product"}}' except Exception as e: self.logger.error(f"Crawl failed: {str(e)}") return json.dumps({ "error": str(e), "metadata": self.meta # Include meta in error response })
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/html2text/_typing.py
crawl4ai/html2text/_typing.py
class OutCallback: def __call__(self, s: str) -> None: ...
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/html2text/cli.py
crawl4ai/html2text/cli.py
import argparse import sys from . import HTML2Text, __version__, config def main() -> None: baseurl = "" class bcolors: HEADER = "\033[95m" OKBLUE = "\033[94m" OKGREEN = "\033[92m" WARNING = "\033[93m" FAIL = "\033[91m" ENDC = "\033[0m" BOLD = "\033[1m" UNDERLINE = "\033[4m" p = argparse.ArgumentParser() p.add_argument( "--default-image-alt", dest="default_image_alt", default=config.DEFAULT_IMAGE_ALT, help="The default alt string for images with missing ones", ) p.add_argument( "--pad-tables", dest="pad_tables", action="store_true", default=config.PAD_TABLES, help="pad the cells to equal column width in tables", ) p.add_argument( "--no-wrap-links", dest="wrap_links", action="store_false", default=config.WRAP_LINKS, help="don't wrap links during conversion", ) p.add_argument( "--wrap-list-items", dest="wrap_list_items", action="store_true", default=config.WRAP_LIST_ITEMS, help="wrap list items during conversion", ) p.add_argument( "--wrap-tables", dest="wrap_tables", action="store_true", default=config.WRAP_TABLES, help="wrap tables", ) p.add_argument( "--ignore-emphasis", dest="ignore_emphasis", action="store_true", default=config.IGNORE_EMPHASIS, help="don't include any formatting for emphasis", ) p.add_argument( "--reference-links", dest="inline_links", action="store_false", default=config.INLINE_LINKS, help="use reference style links instead of inline links", ) p.add_argument( "--ignore-links", dest="ignore_links", action="store_true", default=config.IGNORE_ANCHORS, help="don't include any formatting for links", ) p.add_argument( "--ignore-mailto-links", action="store_true", dest="ignore_mailto_links", default=config.IGNORE_MAILTO_LINKS, help="don't include mailto: links", ) p.add_argument( "--protect-links", dest="protect_links", action="store_true", default=config.PROTECT_LINKS, help="protect links from line breaks surrounding them with angle brackets", ) p.add_argument( "--ignore-images", dest="ignore_images", action="store_true", default=config.IGNORE_IMAGES, help="don't include any formatting for images", ) p.add_argument( "--images-as-html", dest="images_as_html", action="store_true", default=config.IMAGES_AS_HTML, help=( "Always write image tags as raw html; preserves `height`, `width` and " "`alt` if possible." ), ) p.add_argument( "--images-to-alt", dest="images_to_alt", action="store_true", default=config.IMAGES_TO_ALT, help="Discard image data, only keep alt text", ) p.add_argument( "--images-with-size", dest="images_with_size", action="store_true", default=config.IMAGES_WITH_SIZE, help=( "Write image tags with height and width attrs as raw html to retain " "dimensions" ), ) p.add_argument( "-g", "--google-doc", action="store_true", dest="google_doc", default=False, help="convert an html-exported Google Document", ) p.add_argument( "-d", "--dash-unordered-list", action="store_true", dest="ul_style_dash", default=False, help="use a dash rather than a star for unordered list items", ) p.add_argument( "-e", "--asterisk-emphasis", action="store_true", dest="em_style_asterisk", default=False, help="use an asterisk rather than an underscore for emphasized text", ) p.add_argument( "-b", "--body-width", dest="body_width", type=int, default=config.BODY_WIDTH, help="number of characters per output line, 0 for no wrap", ) p.add_argument( "-i", "--google-list-indent", dest="list_indent", type=int, default=config.GOOGLE_LIST_INDENT, help="number of pixels Google indents nested lists", ) p.add_argument( "-s", "--hide-strikethrough", action="store_true", dest="hide_strikethrough", default=False, help="hide strike-through text. only relevant when -g is " "specified as well", ) p.add_argument( "--escape-all", action="store_true", dest="escape_snob", default=False, help=( "Escape all special characters. Output is less readable, but avoids " "corner case formatting issues." ), ) p.add_argument( "--bypass-tables", action="store_true", dest="bypass_tables", default=config.BYPASS_TABLES, help="Format tables in HTML rather than Markdown syntax.", ) p.add_argument( "--ignore-tables", action="store_true", dest="ignore_tables", default=config.IGNORE_TABLES, help="Ignore table-related tags (table, th, td, tr) " "while keeping rows.", ) p.add_argument( "--single-line-break", action="store_true", dest="single_line_break", default=config.SINGLE_LINE_BREAK, help=( "Use a single line break after a block element rather than two line " "breaks. NOTE: Requires --body-width=0" ), ) p.add_argument( "--unicode-snob", action="store_true", dest="unicode_snob", default=config.UNICODE_SNOB, help="Use unicode throughout document", ) p.add_argument( "--no-automatic-links", action="store_false", dest="use_automatic_links", default=config.USE_AUTOMATIC_LINKS, help="Do not use automatic links wherever applicable", ) p.add_argument( "--no-skip-internal-links", action="store_false", dest="skip_internal_links", default=config.SKIP_INTERNAL_LINKS, help="Do not skip internal links", ) p.add_argument( "--links-after-para", action="store_true", dest="links_each_paragraph", default=config.LINKS_EACH_PARAGRAPH, help="Put links after each paragraph instead of document", ) p.add_argument( "--mark-code", action="store_true", dest="mark_code", default=config.MARK_CODE, help="Mark program code blocks with [code]...[/code]", ) p.add_argument( "--decode-errors", dest="decode_errors", default=config.DECODE_ERRORS, help=( "What to do in case of decode errors.'ignore', 'strict' and 'replace' are " "acceptable values" ), ) p.add_argument( "--open-quote", dest="open_quote", default=config.OPEN_QUOTE, help="The character used to open quotes", ) p.add_argument( "--close-quote", dest="close_quote", default=config.CLOSE_QUOTE, help="The character used to close quotes", ) p.add_argument( "--version", action="version", version=".".join(map(str, __version__)) ) p.add_argument("filename", nargs="?") p.add_argument("encoding", nargs="?", default="utf-8") p.add_argument( "--include-sup-sub", dest="include_sup_sub", action="store_true", default=config.INCLUDE_SUP_SUB, help="Include the sup and sub tags", ) args = p.parse_args() if args.filename and args.filename != "-": with open(args.filename, "rb") as fp: data = fp.read() else: data = sys.stdin.buffer.read() try: html = data.decode(args.encoding, args.decode_errors) except UnicodeDecodeError as err: warning = bcolors.WARNING + "Warning:" + bcolors.ENDC warning += " Use the " + bcolors.OKGREEN warning += "--decode-errors=ignore" + bcolors.ENDC + " flag." print(warning) raise err h = HTML2Text(baseurl=baseurl) # handle options if args.ul_style_dash: h.ul_item_mark = "-" if args.em_style_asterisk: h.emphasis_mark = "*" h.strong_mark = "__" h.body_width = args.body_width h.google_list_indent = args.list_indent h.ignore_emphasis = args.ignore_emphasis h.ignore_links = args.ignore_links h.ignore_mailto_links = args.ignore_mailto_links h.protect_links = args.protect_links h.ignore_images = args.ignore_images h.images_as_html = args.images_as_html h.images_to_alt = args.images_to_alt h.images_with_size = args.images_with_size h.google_doc = args.google_doc h.hide_strikethrough = args.hide_strikethrough h.escape_snob = args.escape_snob h.bypass_tables = args.bypass_tables h.ignore_tables = args.ignore_tables h.single_line_break = args.single_line_break h.inline_links = args.inline_links h.unicode_snob = args.unicode_snob h.use_automatic_links = args.use_automatic_links h.skip_internal_links = args.skip_internal_links h.links_each_paragraph = args.links_each_paragraph h.mark_code = args.mark_code h.wrap_links = args.wrap_links h.wrap_list_items = args.wrap_list_items h.wrap_tables = args.wrap_tables h.pad_tables = args.pad_tables h.default_image_alt = args.default_image_alt h.open_quote = args.open_quote h.close_quote = args.close_quote h.include_sup_sub = args.include_sup_sub sys.stdout.write(h.handle(html))
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/html2text/__main__.py
crawl4ai/html2text/__main__.py
from .cli import main main()
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/html2text/utils.py
crawl4ai/html2text/utils.py
import html.entities from typing import Dict, List, Optional from . import config unifiable_n = { html.entities.name2codepoint[k]: v for k, v in config.UNIFIABLE.items() if k != "nbsp" } def hn(tag: str) -> int: if tag[0] == "h" and len(tag) == 2: n = tag[1] if "0" < n <= "9": return int(n) return 0 def dumb_property_dict(style: str) -> Dict[str, str]: """ :returns: A hash of css attributes """ return { x.strip().lower(): y.strip().lower() for x, y in [z.split(":", 1) for z in style.split(";") if ":" in z] } def dumb_css_parser(data: str) -> Dict[str, Dict[str, str]]: """ :type data: str :returns: A hash of css selectors, each of which contains a hash of css attributes. :rtype: dict """ # remove @import sentences data += ";" importIndex = data.find("@import") while importIndex != -1: data = data[0:importIndex] + data[data.find(";", importIndex) + 1 :] importIndex = data.find("@import") # parse the css. reverted from dictionary comprehension in order to # support older pythons pairs = [x.split("{") for x in data.split("}") if "{" in x.strip()] try: elements = {a.strip(): dumb_property_dict(b) for a, b in pairs} except ValueError: elements = {} # not that important return elements def element_style( attrs: Dict[str, Optional[str]], style_def: Dict[str, Dict[str, str]], parent_style: Dict[str, str], ) -> Dict[str, str]: """ :type attrs: dict :type style_def: dict :type style_def: dict :returns: A hash of the 'final' style attributes of the element :rtype: dict """ style = parent_style.copy() if "class" in attrs: assert attrs["class"] is not None for css_class in attrs["class"].split(): css_style = style_def.get("." + css_class, {}) style.update(css_style) if "style" in attrs: assert attrs["style"] is not None immediate_style = dumb_property_dict(attrs["style"]) style.update(immediate_style) return style def google_list_style(style: Dict[str, str]) -> str: """ Finds out whether this is an ordered or unordered list :type style: dict :rtype: str """ if "list-style-type" in style: list_style = style["list-style-type"] if list_style in ["disc", "circle", "square", "none"]: return "ul" return "ol" def google_has_height(style: Dict[str, str]) -> bool: """ Check if the style of the element has the 'height' attribute explicitly defined :type style: dict :rtype: bool """ return "height" in style def google_text_emphasis(style: Dict[str, str]) -> List[str]: """ :type style: dict :returns: A list of all emphasis modifiers of the element :rtype: list """ emphasis = [] if "text-decoration" in style: emphasis.append(style["text-decoration"]) if "font-style" in style: emphasis.append(style["font-style"]) if "font-weight" in style: emphasis.append(style["font-weight"]) return emphasis def google_fixed_width_font(style: Dict[str, str]) -> bool: """ Check if the css of the current element defines a fixed width font :type style: dict :rtype: bool """ font_family = "" if "font-family" in style: font_family = style["font-family"] return "courier new" == font_family or "consolas" == font_family def list_numbering_start(attrs: Dict[str, Optional[str]]) -> int: """ Extract numbering from list element attributes :type attrs: dict :rtype: int or None """ if "start" in attrs: assert attrs["start"] is not None try: return int(attrs["start"]) - 1 except ValueError: pass return 0 def skipwrap( para: str, wrap_links: bool, wrap_list_items: bool, wrap_tables: bool ) -> bool: # If it appears to contain a link # don't wrap if not wrap_links and config.RE_LINK.search(para): return True # If the text begins with four spaces or one tab, it's a code block; # don't wrap if para[0:4] == " " or para[0] == "\t": return True # If the text begins with only two "--", possibly preceded by # whitespace, that's an emdash; so wrap. stripped = para.lstrip() if stripped[0:2] == "--" and len(stripped) > 2 and stripped[2] != "-": return False # I'm not sure what this is for; I thought it was to detect lists, # but there's a <br>-inside-<span> case in one of the tests that # also depends upon it. if stripped[0:1] in ("-", "*") and not stripped[0:2] == "**": return not wrap_list_items # If text contains a pipe character it is likely a table if not wrap_tables and config.RE_TABLE.search(para): return True # If the text begins with a single -, *, or +, followed by a space, # or an integer, followed by a ., followed by a space (in either # case optionally proceeded by whitespace), it's a list; don't wrap. return bool( config.RE_ORDERED_LIST_MATCHER.match(stripped) or config.RE_UNORDERED_LIST_MATCHER.match(stripped) ) def escape_md(text: str) -> str: """ Escapes markdown-sensitive characters within other markdown constructs. """ return config.RE_MD_CHARS_MATCHER.sub(r"\\\1", text) def escape_md_section( text: str, escape_backslash: bool = True, snob: bool = False, escape_dot: bool = True, escape_plus: bool = True, escape_dash: bool = True, ) -> str: """ Escapes markdown-sensitive characters across whole document sections. Each escaping operation can be controlled individually. """ if escape_backslash: text = config.RE_MD_BACKSLASH_MATCHER.sub(r"\\\1", text) if snob: text = config.RE_MD_CHARS_MATCHER_ALL.sub(r"\\\1", text) if escape_dot: text = config.RE_MD_DOT_MATCHER.sub(r"\1\\\2", text) if escape_plus: text = config.RE_MD_PLUS_MATCHER.sub(r"\1\\\2", text) if escape_dash: text = config.RE_MD_DASH_MATCHER.sub(r"\1\\\2", text) return text def reformat_table(lines: List[str], right_margin: int) -> List[str]: """ Given the lines of a table padds the cells and returns the new lines """ # find the maximum width of the columns max_width = [len(x.rstrip()) + right_margin for x in lines[0].split("|")] max_cols = len(max_width) for line in lines: cols = [x.rstrip() for x in line.split("|")] num_cols = len(cols) # don't drop any data if colspan attributes result in unequal lengths if num_cols < max_cols: cols += [""] * (max_cols - num_cols) elif max_cols < num_cols: max_width += [len(x) + right_margin for x in cols[-(num_cols - max_cols) :]] max_cols = num_cols max_width = [ max(len(x) + right_margin, old_len) for x, old_len in zip(cols, max_width) ] # reformat new_lines = [] for line in lines: cols = [x.rstrip() for x in line.split("|")] if set(line.strip()) == set("-|"): filler = "-" new_cols = [ x.rstrip() + (filler * (M - len(x.rstrip()))) for x, M in zip(cols, max_width) ] new_lines.append("|-" + "|".join(new_cols) + "|") else: filler = " " new_cols = [ x.rstrip() + (filler * (M - len(x.rstrip()))) for x, M in zip(cols, max_width) ] new_lines.append("| " + "|".join(new_cols) + "|") return new_lines def pad_tables_in_text(text: str, right_margin: int = 1) -> str: """ Provide padding for tables in the text """ lines = text.split("\n") table_buffer = [] # type: List[str] table_started = False new_lines = [] for line in lines: # Toggle table started if config.TABLE_MARKER_FOR_PAD in line: table_started = not table_started if not table_started: table = reformat_table(table_buffer, right_margin) new_lines.extend(table) table_buffer = [] new_lines.append("") continue # Process lines if table_started: table_buffer.append(line) else: new_lines.append(line) return "\n".join(new_lines)
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/html2text/config.py
crawl4ai/html2text/config.py
import re # Use Unicode characters instead of their ascii pseudo-replacements UNICODE_SNOB = False # Marker to use for marking tables for padding post processing TABLE_MARKER_FOR_PAD = "special_marker_for_table_padding" # Escape all special characters. Output is less readable, but avoids # corner case formatting issues. ESCAPE_SNOB = False ESCAPE_BACKSLASH = False ESCAPE_DOT = False ESCAPE_PLUS = False ESCAPE_DASH = False # Put the links after each paragraph instead of at the end. LINKS_EACH_PARAGRAPH = False # Wrap long lines at position. 0 for no wrapping. BODY_WIDTH = 78 # Don't show internal links (href="#local-anchor") -- corresponding link # targets won't be visible in the plain text file anyway. SKIP_INTERNAL_LINKS = True # Use inline, rather than reference, formatting for images and links INLINE_LINKS = True # Protect links from line breaks surrounding them with angle brackets (in # addition to their square brackets) PROTECT_LINKS = False # WRAP_LINKS = True WRAP_LINKS = True # Wrap list items. WRAP_LIST_ITEMS = False # Wrap tables WRAP_TABLES = False # Number of pixels Google indents nested lists GOOGLE_LIST_INDENT = 36 # Values Google and others may use to indicate bold text BOLD_TEXT_STYLE_VALUES = ("bold", "700", "800", "900") IGNORE_ANCHORS = False IGNORE_MAILTO_LINKS = False IGNORE_IMAGES = False IMAGES_AS_HTML = False IMAGES_TO_ALT = False IMAGES_WITH_SIZE = False IGNORE_EMPHASIS = False MARK_CODE = False DECODE_ERRORS = "strict" DEFAULT_IMAGE_ALT = "" PAD_TABLES = False # Convert links with same href and text to <href> format # if they are absolute links USE_AUTOMATIC_LINKS = True # For checking space-only lines on line 771 RE_SPACE = re.compile(r"\s\+") RE_ORDERED_LIST_MATCHER = re.compile(r"\d+\.\s") RE_UNORDERED_LIST_MATCHER = re.compile(r"[-\*\+]\s") RE_MD_CHARS_MATCHER = re.compile(r"([\\\[\]\(\)])") RE_MD_CHARS_MATCHER_ALL = re.compile(r"([`\*_{}\[\]\(\)#!])") # to find links in the text RE_LINK = re.compile(r"(\[.*?\] ?\(.*?\))|(\[.*?\]:.*?)") # to find table separators RE_TABLE = re.compile(r" \| ") RE_MD_DOT_MATCHER = re.compile( r""" ^ # start of line (\s*\d+) # optional whitespace and a number (\.) # dot (?=\s) # lookahead assert whitespace """, re.MULTILINE | re.VERBOSE, ) RE_MD_PLUS_MATCHER = re.compile( r""" ^ (\s*) (\+) (?=\s) """, flags=re.MULTILINE | re.VERBOSE, ) RE_MD_DASH_MATCHER = re.compile( r""" ^ (\s*) (-) (?=\s|\-) # followed by whitespace (bullet list, or spaced out hr) # or another dash (header or hr) """, flags=re.MULTILINE | re.VERBOSE, ) RE_SLASH_CHARS = r"\`*_{}[]()#+-.!" RE_MD_BACKSLASH_MATCHER = re.compile( r""" (\\) # match one slash (?=[%s]) # followed by a char that requires escaping """ % re.escape(RE_SLASH_CHARS), flags=re.VERBOSE, ) UNIFIABLE = { "rsquo": "'", "lsquo": "'", "rdquo": '"', "ldquo": '"', "copy": "(C)", "mdash": "--", "nbsp": " ", "rarr": "->", "larr": "<-", "middot": "*", "ndash": "-", "oelig": "oe", "aelig": "ae", "agrave": "a", "aacute": "a", "acirc": "a", "atilde": "a", "auml": "a", "aring": "a", "egrave": "e", "eacute": "e", "ecirc": "e", "euml": "e", "igrave": "i", "iacute": "i", "icirc": "i", "iuml": "i", "ograve": "o", "oacute": "o", "ocirc": "o", "otilde": "o", "ouml": "o", "ugrave": "u", "uacute": "u", "ucirc": "u", "uuml": "u", "lrm": "", "rlm": "", } # Format tables in HTML rather than Markdown syntax BYPASS_TABLES = False # Ignore table-related tags (table, th, td, tr) while keeping rows IGNORE_TABLES = False # Use a single line break after a block element rather than two line breaks. # NOTE: Requires body width setting to be 0. SINGLE_LINE_BREAK = False # Use double quotation marks when converting the <q> tag. OPEN_QUOTE = '"' CLOSE_QUOTE = '"' # Include the <sup> and <sub> tags INCLUDE_SUP_SUB = False
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/html2text/__init__.py
crawl4ai/html2text/__init__.py
"""html2text: Turn HTML into equivalent Markdown-structured text.""" import html.entities import html.parser import re import string import urllib.parse as urlparse from textwrap import wrap from typing import Dict, List, Optional, Tuple, Union from . import config from ._typing import OutCallback from .elements import AnchorElement, ListElement from .utils import ( dumb_css_parser, element_style, escape_md, escape_md_section, google_fixed_width_font, google_has_height, google_list_style, google_text_emphasis, hn, list_numbering_start, pad_tables_in_text, skipwrap, unifiable_n, ) __version__ = (2024, 2, 26) # TODO: # Support decoded entities with UNIFIABLE. class HTML2Text(html.parser.HTMLParser): def __init__( self, out: Optional[OutCallback] = None, baseurl: str = "", bodywidth: int = config.BODY_WIDTH, ) -> None: """ Input parameters: out: possible custom replacement for self.outtextf (which appends lines of text). baseurl: base URL of the document we process """ super().__init__(convert_charrefs=False) # Config options self.split_next_td = False self.td_count = 0 self.table_start = False self.unicode_snob = config.UNICODE_SNOB # covered in cli self.escape_snob = config.ESCAPE_SNOB # covered in cli self.escape_backslash = config.ESCAPE_BACKSLASH # covered in cli self.escape_dot = config.ESCAPE_DOT # covered in cli self.escape_plus = config.ESCAPE_PLUS # covered in cli self.escape_dash = config.ESCAPE_DASH # covered in cli self.links_each_paragraph = config.LINKS_EACH_PARAGRAPH self.body_width = bodywidth # covered in cli self.skip_internal_links = config.SKIP_INTERNAL_LINKS # covered in cli self.inline_links = config.INLINE_LINKS # covered in cli self.protect_links = config.PROTECT_LINKS # covered in cli self.google_list_indent = config.GOOGLE_LIST_INDENT # covered in cli self.ignore_links = config.IGNORE_ANCHORS # covered in cli self.ignore_mailto_links = config.IGNORE_MAILTO_LINKS # covered in cli self.ignore_images = config.IGNORE_IMAGES # covered in cli self.images_as_html = config.IMAGES_AS_HTML # covered in cli self.images_to_alt = config.IMAGES_TO_ALT # covered in cli self.images_with_size = config.IMAGES_WITH_SIZE # covered in cli self.ignore_emphasis = config.IGNORE_EMPHASIS # covered in cli self.bypass_tables = config.BYPASS_TABLES # covered in cli self.ignore_tables = config.IGNORE_TABLES # covered in cli self.google_doc = False # covered in cli self.ul_item_mark = "*" # covered in cli self.emphasis_mark = "_" # covered in cli self.strong_mark = "**" self.single_line_break = config.SINGLE_LINE_BREAK # covered in cli self.use_automatic_links = config.USE_AUTOMATIC_LINKS # covered in cli self.hide_strikethrough = False # covered in cli self.mark_code = config.MARK_CODE self.wrap_list_items = config.WRAP_LIST_ITEMS # covered in cli self.wrap_links = config.WRAP_LINKS # covered in cli self.wrap_tables = config.WRAP_TABLES self.pad_tables = config.PAD_TABLES # covered in cli self.default_image_alt = config.DEFAULT_IMAGE_ALT # covered in cli self.tag_callback = None self.open_quote = config.OPEN_QUOTE # covered in cli self.close_quote = config.CLOSE_QUOTE # covered in cli self.include_sup_sub = config.INCLUDE_SUP_SUB # covered in cli if out is None: self.out = self.outtextf else: self.out = out # empty list to store output characters before they are "joined" self.outtextlist: List[str] = [] self.quiet = 0 self.p_p = 0 # number of newline character to print before next output self.outcount = 0 self.start = True self.space = False self.a: List[AnchorElement] = [] self.astack: List[Optional[Dict[str, Optional[str]]]] = [] self.maybe_automatic_link: Optional[str] = None self.empty_link = False self.absolute_url_matcher = re.compile(r"^[a-zA-Z+]+://") self.acount = 0 self.list: List[ListElement] = [] self.blockquote = 0 self.pre = False self.startpre = False self.code = False self.quote = False self.br_toggle = "" self.lastWasNL = False self.lastWasList = False self.style = 0 self.style_def: Dict[str, Dict[str, str]] = {} self.tag_stack: List[Tuple[str, Dict[str, Optional[str]], Dict[str, str]]] = [] self.emphasis = 0 self.drop_white_space = 0 self.inheader = False # Current abbreviation definition self.abbr_title: Optional[str] = None # Last inner HTML (for abbr being defined) self.abbr_data: Optional[str] = None # Stack of abbreviations to write later self.abbr_list: Dict[str, str] = {} self.baseurl = baseurl self.stressed = False self.preceding_stressed = False self.preceding_data = "" self.current_tag = "" config.UNIFIABLE["nbsp"] = "&nbsp_place_holder;" def update_params(self, **kwargs): for key, value in kwargs.items(): setattr(self, key, value) def feed(self, data: str) -> None: data = data.replace("</' + 'script>", "</ignore>") super().feed(data) def handle(self, data: str) -> str: self.start = True self.feed(data) self.feed("") markdown = self.optwrap(self.finish()) if self.pad_tables: return pad_tables_in_text(markdown) else: return markdown def outtextf(self, s: str) -> None: self.outtextlist.append(s) if s: self.lastWasNL = s[-1] == "\n" def finish(self) -> str: self.close() self.pbr() self.o("", force="end") outtext = "".join(self.outtextlist) if self.unicode_snob: nbsp = html.entities.html5["nbsp;"] else: nbsp = " " outtext = outtext.replace("&nbsp_place_holder;", nbsp) # Clear self.outtextlist to avoid memory leak of its content to # the next handling. self.outtextlist = [] return outtext def handle_charref(self, c: str) -> None: self.handle_data(self.charref(c), True) def handle_entityref(self, c: str) -> None: ref = self.entityref(c) # ref may be an empty string (e.g. for &lrm;/&rlm; markers that should # not contribute to the final output). # self.handle_data cannot handle a zero-length string right after a # stressed tag or mid-text within a stressed tag (text get split and # self.stressed/self.preceding_stressed gets switched after the first # part of that text). if ref: self.handle_data(ref, True) def handle_starttag(self, tag: str, attrs: List[Tuple[str, Optional[str]]]) -> None: self.handle_tag(tag, dict(attrs), start=True) def handle_endtag(self, tag: str) -> None: self.handle_tag(tag, {}, start=False) def previousIndex(self, attrs: Dict[str, Optional[str]]) -> Optional[int]: """ :type attrs: dict :returns: The index of certain set of attributes (of a link) in the self.a list. If the set of attributes is not found, returns None :rtype: int """ if "href" not in attrs: return None match = False for i, a in enumerate(self.a): if "href" in a.attrs and a.attrs["href"] == attrs["href"]: if "title" in a.attrs or "title" in attrs: if ( "title" in a.attrs and "title" in attrs and a.attrs["title"] == attrs["title"] ): match = True else: match = True if match: return i return None def handle_emphasis( self, start: bool, tag_style: Dict[str, str], parent_style: Dict[str, str] ) -> None: """ Handles various text emphases """ tag_emphasis = google_text_emphasis(tag_style) parent_emphasis = google_text_emphasis(parent_style) # handle Google's text emphasis strikethrough = "line-through" in tag_emphasis and self.hide_strikethrough # google and others may mark a font's weight as `bold` or `700` bold = False for bold_marker in config.BOLD_TEXT_STYLE_VALUES: bold = bold_marker in tag_emphasis and bold_marker not in parent_emphasis if bold: break italic = "italic" in tag_emphasis and "italic" not in parent_emphasis fixed = ( google_fixed_width_font(tag_style) and not google_fixed_width_font(parent_style) and not self.pre ) if start: # crossed-out text must be handled before other attributes # in order not to output qualifiers unnecessarily if bold or italic or fixed: self.emphasis += 1 if strikethrough: self.quiet += 1 if italic: self.o(self.emphasis_mark) self.drop_white_space += 1 if bold: self.o(self.strong_mark) self.drop_white_space += 1 if fixed: self.o("`") self.drop_white_space += 1 self.code = True else: if bold or italic or fixed: # there must not be whitespace before closing emphasis mark self.emphasis -= 1 self.space = False if fixed: if self.drop_white_space: # empty emphasis, drop it self.drop_white_space -= 1 else: self.o("`") self.code = False if bold: if self.drop_white_space: # empty emphasis, drop it self.drop_white_space -= 1 else: self.o(self.strong_mark) if italic: if self.drop_white_space: # empty emphasis, drop it self.drop_white_space -= 1 else: self.o(self.emphasis_mark) # space is only allowed after *all* emphasis marks if (bold or italic) and not self.emphasis: self.o(" ") if strikethrough: self.quiet -= 1 def handle_tag( self, tag: str, attrs: Dict[str, Optional[str]], start: bool ) -> None: self.current_tag = tag if self.tag_callback is not None: if self.tag_callback(self, tag, attrs, start) is True: return # first thing inside the anchor tag is another tag # that produces some output if ( start and self.maybe_automatic_link is not None and tag not in ["p", "div", "style", "dl", "dt"] and (tag != "img" or self.ignore_images) ): self.o("[") self.maybe_automatic_link = None self.empty_link = False if self.google_doc: # the attrs parameter is empty for a closing tag. in addition, we # need the attributes of the parent nodes in order to get a # complete style description for the current element. we assume # that google docs export well formed html. parent_style: Dict[str, str] = {} if start: if self.tag_stack: parent_style = self.tag_stack[-1][2] tag_style = element_style(attrs, self.style_def, parent_style) self.tag_stack.append((tag, attrs, tag_style)) else: dummy, attrs, tag_style = ( self.tag_stack.pop() if self.tag_stack else (None, {}, {}) ) if self.tag_stack: parent_style = self.tag_stack[-1][2] if hn(tag): # check if nh is inside of an 'a' tag (incorrect but found in the wild) if self.astack: if start: self.inheader = True # are inside link name, so only add '#' if it can appear before '[' if self.outtextlist and self.outtextlist[-1] == "[": self.outtextlist.pop() self.space = False self.o(hn(tag) * "#" + " ") self.o("[") else: self.p_p = 0 # don't break up link name self.inheader = False return # prevent redundant emphasis marks on headers else: self.p() if start: self.inheader = True self.o(hn(tag) * "#" + " ") else: self.inheader = False return # prevent redundant emphasis marks on headers if tag in ["p", "div"]: if self.google_doc: if start and google_has_height(tag_style): self.p() else: self.soft_br() elif self.astack: pass elif self.split_next_td: pass else: self.p() if tag == "br" and start: if self.blockquote > 0: self.o(" \n> ") else: self.o(" \n") if tag == "hr" and start: self.p() self.o("* * *") self.p() if tag in ["head", "style", "script"]: if start: self.quiet += 1 else: self.quiet -= 1 if tag == "style": if start: self.style += 1 else: self.style -= 1 if tag in ["body"]: self.quiet = 0 # sites like 9rules.com never close <head> if tag == "blockquote": if start: self.p() self.o("> ", force=True) self.start = True self.blockquote += 1 else: self.blockquote -= 1 self.p() if tag in ["em", "i", "u"] and not self.ignore_emphasis: # Separate with a space if we immediately follow an alphanumeric # character, since otherwise Markdown won't render the emphasis # marks, and we'll be left with eg 'foo_bar_' visible. # (Don't add a space otherwise, though, since there isn't one in the # original HTML.) if ( start and self.preceding_data and self.preceding_data[-1] not in string.whitespace and self.preceding_data[-1] not in string.punctuation ): emphasis = " " + self.emphasis_mark self.preceding_data += " " else: emphasis = self.emphasis_mark self.o(emphasis) if start: self.stressed = True if tag in ["strong", "b"] and not self.ignore_emphasis: # Separate with space if we immediately follow an * character, since # without it, Markdown won't render the resulting *** correctly. # (Don't add a space otherwise, though, since there isn't one in the # original HTML.) if ( start and self.preceding_data # When `self.strong_mark` is set to empty, the next condition # will cause IndexError since it's trying to match the data # with the first character of the `self.strong_mark`. and len(self.strong_mark) > 0 and self.preceding_data[-1] == self.strong_mark[0] ): strong = " " + self.strong_mark self.preceding_data += " " else: strong = self.strong_mark self.o(strong) if start: self.stressed = True if tag in ["del", "strike", "s"]: if start and self.preceding_data and self.preceding_data[-1] == "~": strike = " ~~" self.preceding_data += " " else: strike = "~~" self.o(strike) if start: self.stressed = True if self.google_doc: if not self.inheader: # handle some font attributes, but leave headers clean self.handle_emphasis(start, tag_style, parent_style) if tag in ["kbd", "code", "tt"] and not self.pre: self.o("`") # TODO: `` `this` `` self.code = not self.code if tag == "abbr": if start: self.abbr_title = None self.abbr_data = "" if "title" in attrs: self.abbr_title = attrs["title"] else: if self.abbr_title is not None: assert self.abbr_data is not None self.abbr_list[self.abbr_data] = self.abbr_title self.abbr_title = None self.abbr_data = None if tag == "q": if not self.quote: self.o(self.open_quote) else: self.o(self.close_quote) self.quote = not self.quote def link_url(self: HTML2Text, link: str, title: str = "") -> None: url = urlparse.urljoin(self.baseurl, link) title = ' "{}"'.format(title) if title.strip() else "" self.o("]({url}{title})".format(url=escape_md(url), title=title)) if tag == "a" and not self.ignore_links: if start: self.inside_link = True if ( "href" in attrs and attrs["href"] is not None and not (self.skip_internal_links and attrs["href"].startswith("#")) and not ( self.ignore_mailto_links and attrs["href"].startswith("mailto:") ) ): self.astack.append(attrs) self.maybe_automatic_link = attrs["href"] self.empty_link = True if self.protect_links: attrs["href"] = "<" + attrs["href"] + ">" else: self.astack.append(None) else: self.inside_link = False if self.astack: a = self.astack.pop() if self.maybe_automatic_link and not self.empty_link: self.maybe_automatic_link = None elif a: assert a["href"] is not None if self.empty_link: self.o("[") self.empty_link = False self.maybe_automatic_link = None if self.inline_links: self.p_p = 0 title = a.get("title") or "" title = escape_md(title) link_url(self, a["href"], title) else: i = self.previousIndex(a) if i is not None: a_props = self.a[i] else: self.acount += 1 a_props = AnchorElement(a, self.acount, self.outcount) self.a.append(a_props) self.o("][" + str(a_props.count) + "]") if tag == "img" and start and not self.ignore_images: if "src" in attrs and attrs["src"] is not None: if not self.images_to_alt: attrs["href"] = attrs["src"] alt = attrs.get("alt") or self.default_image_alt # If we have images_with_size, write raw html including width, # height, and alt attributes if self.images_as_html or ( self.images_with_size and ("width" in attrs or "height" in attrs) ): self.o("<img src='" + attrs["src"] + "' ") if "width" in attrs and attrs["width"] is not None: self.o("width='" + attrs["width"] + "' ") if "height" in attrs and attrs["height"] is not None: self.o("height='" + attrs["height"] + "' ") if alt: self.o("alt='" + alt + "' ") self.o("/>") return # If we have a link to create, output the start if self.maybe_automatic_link is not None: href = self.maybe_automatic_link if ( self.images_to_alt and escape_md(alt) == href and self.absolute_url_matcher.match(href) ): self.o("<" + escape_md(alt) + ">") self.empty_link = False return else: self.o("[") self.maybe_automatic_link = None self.empty_link = False # If we have images_to_alt, we discard the image itself, # considering only the alt text. if self.images_to_alt: self.o(escape_md(alt)) else: self.o("![" + escape_md(alt) + "]") if self.inline_links: href = attrs.get("href") or "" self.o( "(" + escape_md(urlparse.urljoin(self.baseurl, href)) + ")" ) else: i = self.previousIndex(attrs) if i is not None: a_props = self.a[i] else: self.acount += 1 a_props = AnchorElement(attrs, self.acount, self.outcount) self.a.append(a_props) self.o("[" + str(a_props.count) + "]") if tag == "dl" and start: self.p() # Add paragraph break before list starts self.p_p = 0 # Reset paragraph state elif tag == "dt" and start: if self.p_p == 0: # If not first term self.o("\n\n") # Add spacing before new term-definition pair self.p_p = 0 # Reset paragraph state elif tag == "dt" and not start: self.o("\n") # Single newline between term and definition elif tag == "dd" and start: self.o(" ") # Indent definition elif tag == "dd" and not start: self.p_p = 0 if tag in ["ol", "ul"]: # Google Docs create sub lists as top level lists if not self.list and not self.lastWasList: self.p() if start: if self.google_doc: list_style = google_list_style(tag_style) else: list_style = tag numbering_start = list_numbering_start(attrs) self.list.append(ListElement(list_style, numbering_start)) else: if self.list: self.list.pop() if not self.google_doc and not self.list: self.o("\n") self.lastWasList = True else: self.lastWasList = False if tag == "li": self.pbr() if start: if self.list: li = self.list[-1] else: li = ListElement("ul", 0) if self.google_doc: self.o(" " * self.google_nest_count(tag_style)) else: # Indent two spaces per list, except use three spaces for an # unordered list inside an ordered list. # https://spec.commonmark.org/0.28/#motivation # TODO: line up <ol><li>s > 9 correctly. parent_list = None for list in self.list: self.o( " " if parent_list == "ol" and list.name == "ul" else " " ) parent_list = list.name if li.name == "ul": self.o(self.ul_item_mark + " ") elif li.name == "ol": li.num += 1 self.o(str(li.num) + ". ") self.start = True if tag in ["table", "tr", "td", "th"]: if self.ignore_tables: if tag == "tr": if start: pass else: self.soft_br() else: pass elif self.bypass_tables: if start: self.soft_br() if tag in ["td", "th"]: if start: self.o("<{}>\n\n".format(tag)) else: self.o("\n</{}>".format(tag)) else: if start: self.o("<{}>".format(tag)) else: self.o("</{}>".format(tag)) else: if tag == "table": if start: self.table_start = True if self.pad_tables: self.o("<" + config.TABLE_MARKER_FOR_PAD + ">") self.o(" \n") else: if self.pad_tables: # add break in case the table is empty or its 1 row table self.soft_br() self.o("</" + config.TABLE_MARKER_FOR_PAD + ">") self.o(" \n") if tag in ["td", "th"] and start: if self.split_next_td: self.o("| ") self.split_next_td = True if tag == "tr" and start: self.td_count = 0 if tag == "tr" and not start: self.split_next_td = False self.soft_br() if tag == "tr" and not start and self.table_start: # Underline table header self.o("|".join(["---"] * self.td_count)) self.soft_br() self.table_start = False if tag in ["td", "th"] and start: self.td_count += 1 if tag == "pre": if start: self.startpre = True self.pre = True else: self.pre = False if self.mark_code: self.out("\n[/code]") self.p() if tag in ["sup", "sub"] and self.include_sup_sub: if start: self.o("<{}>".format(tag)) else: self.o("</{}>".format(tag)) # TODO: Add docstring for these one letter functions def pbr(self) -> None: "Pretty print has a line break" if self.p_p == 0: self.p_p = 1 def p(self) -> None: "Set pretty print to 1 or 2 lines" self.p_p = 1 if self.single_line_break else 2 def soft_br(self) -> None: "Soft breaks" self.pbr() self.br_toggle = " " def o( self, data: str, puredata: bool = False, force: Union[bool, str] = False ) -> None: """ Deal with indentation and whitespace """ if self.abbr_data is not None: self.abbr_data += data if not self.quiet: if self.google_doc: # prevent white space immediately after 'begin emphasis' # marks ('**' and '_') lstripped_data = data.lstrip() if self.drop_white_space and not (self.pre or self.code): data = lstripped_data if lstripped_data != "": self.drop_white_space = 0 if puredata and not self.pre: # This is a very dangerous call ... it could mess up # all handling of &nbsp; when not handled properly # (see entityref) data = re.sub(r"\s+", r" ", data) if data and data[0] == " ": self.space = True data = data[1:] if not data and not force: return if self.startpre: # self.out(" :") #TODO: not output when already one there if not data.startswith("\n") and not data.startswith("\r\n"): # <pre>stuff... data = "\n" + data if self.mark_code: self.out("\n[code]") self.p_p = 0 bq = ">" * self.blockquote if not (force and data and data[0] == ">") and self.blockquote: bq += " " if self.pre: if not self.list: bq += " " # else: list content is already partially indented bq += " " * len(self.list) data = data.replace("\n", "\n" + bq) if self.startpre: self.startpre = False if self.list: # use existing initial indentation data = data.lstrip("\n") if self.start: self.space = False self.p_p = 0 self.start = False if force == "end": # It's the end. self.p_p = 0 self.out("\n") self.space = False if self.p_p: self.out((self.br_toggle + "\n" + bq) * self.p_p) self.space = False self.br_toggle = "" if self.space: if not self.lastWasNL: self.out(" ") self.space = False if self.a and ( (self.p_p == 2 and self.links_each_paragraph) or force == "end" ): if force == "end": self.out("\n") newa = [] for link in self.a: if self.outcount > link.outcount: self.out( " [" + str(link.count) + "]: " + urlparse.urljoin(self.baseurl, link.attrs["href"]) ) if "title" in link.attrs and link.attrs["title"] is not None: self.out(" (" + link.attrs["title"] + ")") self.out("\n") else: newa.append(link) # Don't need an extra line when nothing was done. if self.a != newa: self.out("\n") self.a = newa if self.abbr_list and force == "end": for abbr, definition in self.abbr_list.items(): self.out(" *[" + abbr + "]: " + definition + "\n") self.p_p = 0 self.out(data) self.outcount += 1 def handle_data(self, data: str, entity_char: bool = False) -> None: if not data:
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
true
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/html2text/elements.py
crawl4ai/html2text/elements.py
from typing import Dict, Optional class AnchorElement: __slots__ = ["attrs", "count", "outcount"] def __init__(self, attrs: Dict[str, Optional[str]], count: int, outcount: int): self.attrs = attrs self.count = count self.outcount = outcount class ListElement: __slots__ = ["name", "num"] def __init__(self, name: str, num: int): self.name = name self.num = num
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/js_snippet/__init__.py
crawl4ai/js_snippet/__init__.py
import os # Create a function get name of a js script, then load from the CURRENT folder of this script and return its content as string, make sure its error free def load_js_script(script_name): # Get the path of the current script current_script_path = os.path.dirname(os.path.realpath(__file__)) # Get the path of the script to load script_path = os.path.join(current_script_path, script_name + ".js") # Check if the script exists if not os.path.exists(script_path): raise ValueError( f"Script {script_name} not found in the folder {current_script_path}" ) # Load the content of the script with open(script_path, "r") as f: script_content = f.read() return script_content
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/script/c4a_result.py
crawl4ai/script/c4a_result.py
""" Result classes for C4A-Script compilation Clean API design with no exceptions """ from __future__ import annotations from dataclasses import dataclass, field from enum import Enum from typing import List, Dict, Any, Optional import json class ErrorType(Enum): SYNTAX = "syntax" SEMANTIC = "semantic" RUNTIME = "runtime" class Severity(Enum): ERROR = "error" WARNING = "warning" INFO = "info" @dataclass class Suggestion: """A suggestion for fixing an error""" message: str fix: Optional[str] = None def to_dict(self) -> dict: return { "message": self.message, "fix": self.fix } @dataclass class ErrorDetail: """Detailed information about a compilation error""" # Core info type: ErrorType code: str # E001, E002, etc. severity: Severity message: str # Location line: int column: int # Context source_line: str # Optional fields with defaults end_line: Optional[int] = None end_column: Optional[int] = None line_before: Optional[str] = None line_after: Optional[str] = None # Help suggestions: List[Suggestion] = field(default_factory=list) documentation_url: Optional[str] = None def to_dict(self) -> dict: """Convert to dictionary for JSON serialization""" return { "type": self.type.value, "code": self.code, "severity": self.severity.value, "message": self.message, "location": { "line": self.line, "column": self.column, "endLine": self.end_line, "endColumn": self.end_column }, "context": { "sourceLine": self.source_line, "lineBefore": self.line_before, "lineAfter": self.line_after, "marker": { "start": self.column - 1, "length": (self.end_column - self.column) if self.end_column else 1 } }, "suggestions": [s.to_dict() for s in self.suggestions], "documentationUrl": self.documentation_url } def to_json(self) -> str: """Convert to JSON string""" return json.dumps(self.to_dict(), indent=2) @property def formatted_message(self) -> str: """Returns the nice text format for terminals""" lines = [] lines.append(f"\n{'='*60}") lines.append(f"{self.type.value.title()} Error [{self.code}]") lines.append(f"{'='*60}") lines.append(f"Location: Line {self.line}, Column {self.column}") lines.append(f"Error: {self.message}") if self.source_line: marker = " " * (self.column - 1) + "^" if self.end_column: marker += "~" * (self.end_column - self.column - 1) lines.append(f"\nCode:") if self.line_before: lines.append(f" {self.line - 1: >3} | {self.line_before}") lines.append(f" {self.line: >3} | {self.source_line}") lines.append(f" | {marker}") if self.line_after: lines.append(f" {self.line + 1: >3} | {self.line_after}") if self.suggestions: lines.append("\nSuggestions:") for i, suggestion in enumerate(self.suggestions, 1): lines.append(f" {i}. {suggestion.message}") if suggestion.fix: lines.append(f" Fix: {suggestion.fix}") lines.append("="*60) return "\n".join(lines) @property def simple_message(self) -> str: """Returns just the error message without formatting""" return f"Line {self.line}: {self.message}" @dataclass class WarningDetail: """Information about a compilation warning""" code: str message: str line: int column: int def to_dict(self) -> dict: return { "code": self.code, "message": self.message, "line": self.line, "column": self.column } @dataclass class CompilationResult: """Result of C4A-Script compilation""" success: bool js_code: Optional[List[str]] = None errors: List[ErrorDetail] = field(default_factory=list) warnings: List[WarningDetail] = field(default_factory=list) metadata: Dict[str, Any] = field(default_factory=dict) def to_dict(self) -> dict: """Convert to dictionary for JSON serialization""" return { "success": self.success, "jsCode": self.js_code, "errors": [e.to_dict() for e in self.errors], "warnings": [w.to_dict() for w in self.warnings], "metadata": self.metadata } def to_json(self) -> str: """Convert to JSON string""" return json.dumps(self.to_dict(), indent=2) @property def has_errors(self) -> bool: """Check if there are any errors""" return len(self.errors) > 0 @property def has_warnings(self) -> bool: """Check if there are any warnings""" return len(self.warnings) > 0 @property def first_error(self) -> Optional[ErrorDetail]: """Get the first error if any""" return self.errors[0] if self.errors else None def __str__(self) -> str: """String representation for debugging""" if self.success: msg = f"βœ“ Compilation successful" if self.js_code: msg += f" - {len(self.js_code)} statements generated" if self.warnings: msg += f" ({len(self.warnings)} warnings)" return msg else: return f"βœ— Compilation failed - {len(self.errors)} error(s)" @dataclass class ValidationResult: """Result of script validation""" valid: bool errors: List[ErrorDetail] = field(default_factory=list) warnings: List[WarningDetail] = field(default_factory=list) def to_dict(self) -> dict: return { "valid": self.valid, "errors": [e.to_dict() for e in self.errors], "warnings": [w.to_dict() for w in self.warnings] } def to_json(self) -> str: return json.dumps(self.to_dict(), indent=2) @property def first_error(self) -> Optional[ErrorDetail]: return self.errors[0] if self.errors else None
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/script/__init__.py
crawl4ai/script/__init__.py
""" C4A-Script: A domain-specific language for web automation in Crawl4AI """ from .c4a_compile import C4ACompiler, compile, validate, compile_file from .c4a_result import ( CompilationResult, ValidationResult, ErrorDetail, WarningDetail, ErrorType, Severity, Suggestion ) __all__ = [ # Main compiler "C4ACompiler", # Convenience functions "compile", "validate", "compile_file", # Result types "CompilationResult", "ValidationResult", "ErrorDetail", "WarningDetail", # Enums "ErrorType", "Severity", "Suggestion" ]
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/script/c4ai_script.py
crawl4ai/script/c4ai_script.py
""" 2025-06-03 By Unclcode: C4A-Script Language Documentation Feeds Crawl4AI via CrawlerRunConfig(js_code=[ ... ]) – no core modifications. """ from __future__ import annotations import pathlib, re, sys, textwrap from dataclasses import dataclass from typing import Any, Dict, List, Union from lark import Lark, Transformer, v_args from lark.exceptions import UnexpectedToken, UnexpectedCharacters, VisitError # --------------------------------------------------------------------------- # # Custom Error Classes # --------------------------------------------------------------------------- # class C4AScriptError(Exception): """Custom error class for C4A-Script compilation errors""" def __init__(self, message: str, line: int = None, column: int = None, error_type: str = "Syntax Error", details: str = None): self.message = message self.line = line self.column = column self.error_type = error_type self.details = details super().__init__(self._format_message()) def _format_message(self) -> str: """Format a clear error message""" lines = [f"\n{'='*60}"] lines.append(f"C4A-Script {self.error_type}") lines.append(f"{'='*60}") if self.line: lines.append(f"Location: Line {self.line}" + (f", Column {self.column}" if self.column else "")) lines.append(f"Error: {self.message}") if self.details: lines.append(f"\nDetails: {self.details}") lines.append("="*60) return "\n".join(lines) @classmethod def from_exception(cls, exc: Exception, script: Union[str, List[str]]) -> 'C4AScriptError': """Create C4AScriptError from another exception""" script_text = script if isinstance(script, str) else '\n'.join(script) script_lines = script_text.split('\n') if isinstance(exc, UnexpectedToken): # Extract line and column from UnexpectedToken line = exc.line column = exc.column # Get the problematic line if 0 < line <= len(script_lines): problem_line = script_lines[line - 1] marker = " " * (column - 1) + "^" details = f"\nCode:\n {problem_line}\n {marker}\n" # Improve error message based on context if exc.token.type == 'CLICK' and 'THEN' in str(exc.expected): message = "Missing 'THEN' keyword after IF condition" elif exc.token.type == '$END': message = "Unexpected end of script. Check for missing ENDPROC or incomplete commands" elif 'RPAR' in str(exc.expected): message = "Missing closing parenthesis ')'" elif 'COMMA' in str(exc.expected): message = "Missing comma ',' in command" else: message = f"Unexpected '{exc.token}'" if exc.expected: expected_list = [str(e) for e in exc.expected if not e.startswith('_')] if expected_list: message += f". Expected: {', '.join(expected_list[:3])}" details += f"Token: {exc.token.type} ('{exc.token.value}')" else: message = str(exc) details = None return cls(message, line, column, "Syntax Error", details) elif isinstance(exc, UnexpectedCharacters): # Extract line and column line = exc.line column = exc.column if 0 < line <= len(script_lines): problem_line = script_lines[line - 1] marker = " " * (column - 1) + "^" details = f"\nCode:\n {problem_line}\n {marker}\n" message = f"Invalid character or unexpected text at position {column}" else: message = str(exc) details = None return cls(message, line, column, "Syntax Error", details) elif isinstance(exc, ValueError): # Handle runtime errors like undefined procedures message = str(exc) # Try to find which line caused the error if "Unknown procedure" in message: proc_name = re.search(r"'([^']+)'", message) if proc_name: proc_name = proc_name.group(1) for i, line in enumerate(script_lines, 1): if proc_name in line and not line.strip().startswith('PROC'): details = f"\nCode:\n {line.strip()}\n\nMake sure the procedure '{proc_name}' is defined with PROC...ENDPROC" return cls(f"Undefined procedure '{proc_name}'", i, None, "Runtime Error", details) return cls(message, None, None, "Runtime Error", None) else: # Generic error return cls(str(exc), None, None, "Compilation Error", None) # --------------------------------------------------------------------------- # # 1. Grammar # --------------------------------------------------------------------------- # GRAMMAR = r""" start : line* ?line : command | proc_def | include | comment command : wait | nav | click_cmd | double_click | right_click | move | drag | scroll | type | clear | set_input | press | key_down | key_up | eval_cmd | setvar | proc_call | if_cmd | repeat_cmd wait : "WAIT" (ESCAPED_STRING|BACKTICK_STRING|NUMBER) NUMBER? -> wait_cmd nav : "GO" URL -> go | "RELOAD" -> reload | "BACK" -> back | "FORWARD" -> forward click_cmd : "CLICK" (BACKTICK_STRING|NUMBER NUMBER) -> click double_click : "DOUBLE_CLICK" (BACKTICK_STRING|NUMBER NUMBER) -> double_click right_click : "RIGHT_CLICK" (BACKTICK_STRING|NUMBER NUMBER) -> right_click move : "MOVE" coords -> move drag : "DRAG" coords coords -> drag scroll : "SCROLL" DIR NUMBER? -> scroll type : "TYPE" (ESCAPED_STRING | NAME) -> type clear : "CLEAR" BACKTICK_STRING -> clear set_input : "SET" BACKTICK_STRING (ESCAPED_STRING | BACKTICK_STRING | NAME) -> set_input press : "PRESS" WORD -> press key_down : "KEY_DOWN" WORD -> key_down key_up : "KEY_UP" WORD -> key_up eval_cmd : "EVAL" BACKTICK_STRING -> eval_cmd setvar : "SETVAR" NAME "=" value -> setvar proc_call : NAME -> proc_call proc_def : "PROC" NAME line* "ENDPROC" -> proc_def include : "USE" ESCAPED_STRING -> include comment : /#.*/ -> comment if_cmd : "IF" "(" condition ")" "THEN" command ("ELSE" command)? -> if_cmd repeat_cmd : "REPEAT" "(" command "," repeat_count ")" -> repeat_cmd condition : not_cond | exists_cond | js_cond not_cond : "NOT" condition -> not_cond exists_cond : "EXISTS" BACKTICK_STRING -> exists_cond js_cond : BACKTICK_STRING -> js_cond repeat_count : NUMBER | BACKTICK_STRING coords : NUMBER NUMBER value : ESCAPED_STRING | BACKTICK_STRING | NUMBER DIR : /(UP|DOWN|LEFT|RIGHT)/i REST : /[^\n]+/ URL : /(http|https):\/\/[^\s]+/ NAME : /\$?[A-Za-z_][A-Za-z0-9_]*/ WORD : /[A-Za-z0-9+]+/ BACKTICK_STRING : /`[^`]*`/ %import common.NUMBER %import common.ESCAPED_STRING %import common.WS_INLINE %import common.NEWLINE %ignore WS_INLINE %ignore NEWLINE """ # --------------------------------------------------------------------------- # # 2. IR dataclasses # --------------------------------------------------------------------------- # @dataclass class Cmd: op: str args: List[Any] @dataclass class Proc: name: str body: List[Cmd] # --------------------------------------------------------------------------- # # 3. AST β†’ IR # --------------------------------------------------------------------------- # @v_args(inline=True) class ASTBuilder(Transformer): # helpers def _strip(self, s): if s.startswith('"') and s.endswith('"'): return s[1:-1] elif s.startswith('`') and s.endswith('`'): return s[1:-1] return s def start(self,*i): return list(i) def line(self,i): return i def command(self,i): return i # WAIT def wait_cmd(self, rest, timeout=None): rest_str = str(rest) # Check if it's a number (including floats) try: num_val = float(rest_str) payload = (num_val, "seconds") except ValueError: if rest_str.startswith('"') and rest_str.endswith('"'): payload = (self._strip(rest_str), "text") elif rest_str.startswith('`') and rest_str.endswith('`'): payload = (self._strip(rest_str), "selector") else: payload = (rest_str, "selector") return Cmd("WAIT", [payload, int(timeout) if timeout else None]) # NAV def go(self,u): return Cmd("GO",[str(u)]) def reload(self): return Cmd("RELOAD",[]) def back(self): return Cmd("BACK",[]) def forward(self): return Cmd("FORWARD",[]) # CLICK, DOUBLE_CLICK, RIGHT_CLICK def click(self, *args): return self._handle_click("CLICK", args) def double_click(self, *args): return self._handle_click("DBLCLICK", args) def right_click(self, *args): return self._handle_click("RIGHTCLICK", args) def _handle_click(self, op, args): if len(args) == 1: # Single argument - backtick string target = self._strip(str(args[0])) return Cmd(op, [("selector", target)]) else: # Two arguments - coordinates x, y = args return Cmd(op, [("coords", int(x), int(y))]) # MOVE / DRAG / SCROLL def coords(self,x,y): return ("coords",int(x),int(y)) def move(self,c): return Cmd("MOVE",[c]) def drag(self,c1,c2): return Cmd("DRAG",[c1,c2]) def scroll(self,dir_tok,amt=None): return Cmd("SCROLL",[dir_tok.upper(), int(amt) if amt else 500]) # KEYS def type(self,tok): return Cmd("TYPE",[self._strip(str(tok))]) def clear(self,sel): return Cmd("CLEAR",[self._strip(str(sel))]) def set_input(self,sel,val): return Cmd("SET",[self._strip(str(sel)), self._strip(str(val))]) def press(self,w): return Cmd("PRESS",[str(w)]) def key_down(self,w): return Cmd("KEYDOWN",[str(w)]) def key_up(self,w): return Cmd("KEYUP",[str(w)]) # FLOW def eval_cmd(self,txt): return Cmd("EVAL",[self._strip(str(txt))]) def setvar(self,n,v): # v might be a Token or a Tree, extract value properly if hasattr(v, 'value'): value = v.value elif hasattr(v, 'children') and len(v.children) > 0: value = v.children[0].value else: value = str(v) return Cmd("SETVAR",[str(n), self._strip(value)]) def proc_call(self,n): return Cmd("CALL",[str(n)]) def proc_def(self,n,*body): return Proc(str(n),[b for b in body if isinstance(b,Cmd)]) def include(self,p): return Cmd("INCLUDE",[self._strip(p)]) def comment(self,*_): return Cmd("NOP",[]) # IF-THEN-ELSE and EXISTS def if_cmd(self, condition, then_cmd, else_cmd=None): return Cmd("IF", [condition, then_cmd, else_cmd]) def condition(self, cond): return cond def not_cond(self, cond): return ("NOT", cond) def exists_cond(self, selector): return ("EXISTS", self._strip(str(selector))) def js_cond(self, expr): return ("JS", self._strip(str(expr))) # REPEAT def repeat_cmd(self, cmd, count): return Cmd("REPEAT", [cmd, count]) def repeat_count(self, value): return str(value) # --------------------------------------------------------------------------- # # 4. Compiler # --------------------------------------------------------------------------- # class Compiler: def __init__(self, root: pathlib.Path|None=None): self.parser = Lark(GRAMMAR,start="start",parser="lalr") self.root = pathlib.Path(root or ".").resolve() self.vars: Dict[str,Any] = {} self.procs: Dict[str,Proc]= {} def compile(self, text: Union[str, List[str]]) -> List[str]: # Handle list input by joining with newlines if isinstance(text, list): text = '\n'.join(text) ir = self._parse_with_includes(text) ir = self._collect_procs(ir) ir = self._inline_calls(ir) ir = self._apply_set_vars(ir) return [self._emit_js(c) for c in ir if isinstance(c,Cmd) and c.op!="NOP"] # passes def _parse_with_includes(self,txt,seen=None): seen=seen or set() cmds=ASTBuilder().transform(self.parser.parse(txt)) out=[] for c in cmds: if isinstance(c,Cmd) and c.op=="INCLUDE": p=(self.root/c.args[0]).resolve() if p in seen: raise ValueError(f"Circular include {p}") seen.add(p); out+=self._parse_with_includes(p.read_text(),seen) else: out.append(c) return out def _collect_procs(self,ir): out=[] for i in ir: if isinstance(i,Proc): self.procs[i.name]=i else: out.append(i) return out def _inline_calls(self,ir): out=[] for c in ir: if isinstance(c,Cmd) and c.op=="CALL": if c.args[0] not in self.procs: raise ValueError(f"Unknown procedure {c.args[0]!r}") out+=self._inline_calls(self.procs[c.args[0]].body) else: out.append(c) return out def _apply_set_vars(self,ir): def sub(s): return re.sub(r"\$(\w+)",lambda m:str(self.vars.get(m.group(1),m.group(0))) ,s) if isinstance(s,str) else s out=[] for c in ir: if isinstance(c,Cmd): if c.op=="SETVAR": # Store variable self.vars[c.args[0].lstrip('$')]=c.args[1] else: # Apply variable substitution to commands that use them if c.op in("TYPE","EVAL","SET"): c.args=[sub(a) for a in c.args] out.append(c) return out # JS emitter def _emit_js(self, cmd: Cmd) -> str: op, a = cmd.op, cmd.args if op == "GO": return f"window.location.href = '{a[0]}';" if op == "RELOAD": return "window.location.reload();" if op == "BACK": return "window.history.back();" if op == "FORWARD": return "window.history.forward();" if op == "WAIT": arg, kind = a[0] timeout = a[1] or 10 if kind == "seconds": return f"await new Promise(r=>setTimeout(r,{arg}*1000));" if kind == "selector": sel = arg.replace("\\","\\\\").replace("'","\\'") return textwrap.dedent(f""" await new Promise((res,rej)=>{{ const max = {timeout*1000}, t0 = performance.now(); const id = setInterval(()=>{{ if(document.querySelector('{sel}')){{clearInterval(id);res();}} else if(performance.now()-t0>max){{clearInterval(id);rej('WAIT selector timeout');}} }},100); }}); """).strip() if kind == "text": txt = arg.replace('`', '\\`') return textwrap.dedent(f""" await new Promise((res,rej)=>{{ const max={timeout*1000},t0=performance.now(); const id=setInterval(()=>{{ if(document.body.innerText.includes(`{txt}`)){{clearInterval(id);res();}} else if(performance.now()-t0>max){{clearInterval(id);rej('WAIT text timeout');}} }},100); }}); """).strip() # click-style helpers def _js_click(sel, evt="click", button=0, detail=1): sel = sel.replace("'", "\\'") return textwrap.dedent(f""" (()=>{{ const el=document.querySelector('{sel}'); if(el){{ el.focus&&el.focus(); el.dispatchEvent(new MouseEvent('{evt}',{{bubbles:true,button:{button},detail:{detail}}})); }} }})(); """).strip() def _js_click_xy(x, y, evt="click", button=0, detail=1): return textwrap.dedent(f""" (()=>{{ const el=document.elementFromPoint({x},{y}); if(el){{ el.focus&&el.focus(); el.dispatchEvent(new MouseEvent('{evt}',{{bubbles:true,button:{button},detail:{detail}}})); }} }})(); """).strip() if op in ("CLICK", "DBLCLICK", "RIGHTCLICK"): evt = {"CLICK":"click","DBLCLICK":"dblclick","RIGHTCLICK":"contextmenu"}[op] btn = 2 if op=="RIGHTCLICK" else 0 det = 2 if op=="DBLCLICK" else 1 kind,*rest = a[0] return _js_click_xy(*rest) if kind=="coords" else _js_click(rest[0],evt,btn,det) if op == "MOVE": _, x, y = a[0] return textwrap.dedent(f""" document.dispatchEvent(new MouseEvent('mousemove',{{clientX:{x},clientY:{y},bubbles:true}})); """).strip() if op == "DRAG": (_, x1, y1), (_, x2, y2) = a return textwrap.dedent(f""" (()=>{{ const s=document.elementFromPoint({x1},{y1}); if(!s) return; s.dispatchEvent(new MouseEvent('mousedown',{{bubbles:true,clientX:{x1},clientY:{y1}}})); document.dispatchEvent(new MouseEvent('mousemove',{{bubbles:true,clientX:{x2},clientY:{y2}}})); document.dispatchEvent(new MouseEvent('mouseup', {{bubbles:true,clientX:{x2},clientY:{y2}}})); }})(); """).strip() if op == "SCROLL": dir_, amt = a dx, dy = {"UP":(0,-amt),"DOWN":(0,amt),"LEFT":(-amt,0),"RIGHT":(amt,0)}[dir_] return f"window.scrollBy({dx},{dy});" if op == "TYPE": txt = a[0].replace("'", "\\'") return textwrap.dedent(f""" (()=>{{ const el=document.activeElement; if(el){{ el.value += '{txt}'; el.dispatchEvent(new Event('input',{{bubbles:true}})); }} }})(); """).strip() if op == "CLEAR": sel = a[0].replace("'", "\\'") return textwrap.dedent(f""" (()=>{{ const el=document.querySelector('{sel}'); if(el && 'value' in el){{ el.value = ''; el.dispatchEvent(new Event('input',{{bubbles:true}})); el.dispatchEvent(new Event('change',{{bubbles:true}})); }} }})(); """).strip() if op == "SET" and len(a) == 2: # This is SET for input fields (SET `#field` "value") sel = a[0].replace("'", "\\'") val = a[1].replace("'", "\\'") return textwrap.dedent(f""" (()=>{{ const el=document.querySelector('{sel}'); if(el && 'value' in el){{ el.value = ''; el.focus&&el.focus(); el.value = '{val}'; el.dispatchEvent(new Event('input',{{bubbles:true}})); el.dispatchEvent(new Event('change',{{bubbles:true}})); }} }})(); """).strip() if op in ("PRESS","KEYDOWN","KEYUP"): key = a[0] evs = {"PRESS":("keydown","keyup"),"KEYDOWN":("keydown",),"KEYUP":("keyup",)}[op] return ";".join([f"document.dispatchEvent(new KeyboardEvent('{e}',{{key:'{key}',bubbles:true}}))" for e in evs]) + ";" if op == "EVAL": return textwrap.dedent(f""" (()=>{{ try {{ {a[0]}; }} catch (e) {{ console.error('C4A-Script EVAL error:', e); }} }})(); """).strip() if op == "IF": condition, then_cmd, else_cmd = a # Generate condition JavaScript js_condition = self._emit_condition(condition) # Generate commands - handle both regular commands and procedure calls then_js = self._handle_cmd_or_proc(then_cmd) else_js = self._handle_cmd_or_proc(else_cmd) if else_cmd else "" if else_cmd: return textwrap.dedent(f""" if ({js_condition}) {{ {then_js} }} else {{ {else_js} }} """).strip() else: return textwrap.dedent(f""" if ({js_condition}) {{ {then_js} }} """).strip() if op == "REPEAT": cmd, count = a # Handle the count - could be number or JS expression if count.isdigit(): # Simple number repeat_js = self._handle_cmd_or_proc(cmd) return textwrap.dedent(f""" for (let _i = 0; _i < {count}; _i++) {{ {repeat_js} }} """).strip() else: # JS expression (from backticks) count_expr = count[1:-1] if count.startswith('`') and count.endswith('`') else count repeat_js = self._handle_cmd_or_proc(cmd) return textwrap.dedent(f""" (()=>{{ const _count = {count_expr}; if (typeof _count === 'number') {{ for (let _i = 0; _i < _count; _i++) {{ {repeat_js} }} }} else if (_count) {{ {repeat_js} }} }})(); """).strip() raise ValueError(f"Unhandled op {op}") def _emit_condition(self, condition): """Convert a condition tuple to JavaScript""" cond_type = condition[0] if cond_type == "EXISTS": return f"!!document.querySelector('{condition[1]}')" elif cond_type == "NOT": # Recursively handle the negated condition inner_condition = self._emit_condition(condition[1]) return f"!({inner_condition})" else: # JS condition return condition[1] def _handle_cmd_or_proc(self, cmd): """Handle a command that might be a regular command or a procedure call""" if not cmd: return "" if isinstance(cmd, Cmd): if cmd.op == "CALL": # Inline the procedure if cmd.args[0] not in self.procs: raise ValueError(f"Unknown procedure {cmd.args[0]!r}") proc_body = self.procs[cmd.args[0]].body return "\n".join([self._emit_js(c) for c in proc_body if c.op != "NOP"]) else: return self._emit_js(cmd) return "" # --------------------------------------------------------------------------- # # 5. Helpers + demo # --------------------------------------------------------------------------- # def compile_string(script: Union[str, List[str]], *, root: Union[pathlib.Path, None] = None) -> List[str]: """Compile C4A-Script from string or list of strings to JavaScript. Args: script: C4A-Script as a string or list of command strings root: Root directory for resolving includes (optional) Returns: List of JavaScript command strings Raises: C4AScriptError: When compilation fails with detailed error information """ try: return Compiler(root).compile(script) except Exception as e: # Wrap the error with better formatting raise C4AScriptError.from_exception(e, script) def compile_file(path: pathlib.Path) -> List[str]: """Compile C4A-Script from file to JavaScript. Args: path: Path to C4A-Script file Returns: List of JavaScript command strings """ return compile_string(path.read_text(), root=path.parent) def compile_lines(lines: List[str], *, root: Union[pathlib.Path, None] = None) -> List[str]: """Compile C4A-Script from list of lines to JavaScript. Args: lines: List of C4A-Script command lines root: Root directory for resolving includes (optional) Returns: List of JavaScript command strings """ return compile_string(lines, root=root) DEMO = """ # quick sanity demo PROC login SET `input[name="username"]` $user SET `input[name="password"]` $pass CLICK `button.submit` ENDPROC SETVAR user = "tom@crawl4ai.com" SETVAR pass = "hunter2" GO https://example.com/login WAIT `input[name="username"]` 10 login WAIT 3 EVAL `console.log('logged in')` """ if __name__ == "__main__": if len(sys.argv) == 2: for js in compile_file(pathlib.Path(sys.argv[1])): print(js) else: print("=== DEMO ===") for js in compile_string(DEMO): print(js)
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/script/c4a_compile.py
crawl4ai/script/c4a_compile.py
""" Clean C4A-Script API with Result pattern No exceptions - always returns results """ from __future__ import annotations import pathlib import re from typing import Union, List, Optional # JSON_SCHEMA_BUILDER is still used elsewhere, # but we now also need the new script-builder prompt. from ..prompts import GENERATE_JS_SCRIPT_PROMPT, GENERATE_SCRIPT_PROMPT import logging import re from .c4a_result import ( CompilationResult, ValidationResult, ErrorDetail, WarningDetail, ErrorType, Severity, Suggestion ) from .c4ai_script import Compiler from lark.exceptions import UnexpectedToken, UnexpectedCharacters, VisitError from ..async_configs import LLMConfig from ..utils import perform_completion_with_backoff class C4ACompiler: """Main compiler with result-based API""" # Error code mapping ERROR_CODES = { "missing_then": "E001", "missing_paren": "E002", "missing_comma": "E003", "missing_endproc": "E004", "undefined_proc": "E005", "missing_backticks": "E006", "invalid_command": "E007", "syntax_error": "E999" } @classmethod def compile(cls, script: Union[str, List[str]], root: Optional[pathlib.Path] = None) -> CompilationResult: """ Compile C4A-Script to JavaScript Args: script: C4A-Script as string or list of lines root: Root directory for includes Returns: CompilationResult with success status and JS code or errors """ # Normalize input if isinstance(script, list): script_text = '\n'.join(script) script_lines = script else: script_text = script script_lines = script.split('\n') try: # Try compilation compiler = Compiler(root) js_code = compiler.compile(script_text) # Success! result = CompilationResult( success=True, js_code=js_code, metadata={ "lineCount": len(script_lines), "statementCount": len(js_code) } ) # Add any warnings (future feature) # result.warnings = cls._check_warnings(script_text) return result except Exception as e: # Convert exception to ErrorDetail error = cls._exception_to_error(e, script_lines) return CompilationResult( success=False, errors=[error], metadata={ "lineCount": len(script_lines) } ) @classmethod def validate(cls, script: Union[str, List[str]]) -> ValidationResult: """ Validate script syntax without generating code Args: script: C4A-Script to validate Returns: ValidationResult with validity status and any errors """ result = cls.compile(script) return ValidationResult( valid=result.success, errors=result.errors, warnings=result.warnings ) @classmethod def compile_file(cls, path: Union[str, pathlib.Path]) -> CompilationResult: """ Compile a C4A-Script file Args: path: Path to the file Returns: CompilationResult """ path = pathlib.Path(path) if not path.exists(): error = ErrorDetail( type=ErrorType.RUNTIME, code="E100", severity=Severity.ERROR, message=f"File not found: {path}", line=0, column=0, source_line="" ) return CompilationResult(success=False, errors=[error]) try: script = path.read_text() return cls.compile(script, root=path.parent) except Exception as e: error = ErrorDetail( type=ErrorType.RUNTIME, code="E101", severity=Severity.ERROR, message=f"Error reading file: {str(e)}", line=0, column=0, source_line="" ) return CompilationResult(success=False, errors=[error]) @classmethod def _exception_to_error(cls, exc: Exception, script_lines: List[str]) -> ErrorDetail: """Convert an exception to ErrorDetail""" if isinstance(exc, UnexpectedToken): return cls._handle_unexpected_token(exc, script_lines) elif isinstance(exc, UnexpectedCharacters): return cls._handle_unexpected_chars(exc, script_lines) elif isinstance(exc, ValueError): return cls._handle_value_error(exc, script_lines) else: # Generic error return ErrorDetail( type=ErrorType.SYNTAX, code=cls.ERROR_CODES["syntax_error"], severity=Severity.ERROR, message=str(exc), line=1, column=1, source_line=script_lines[0] if script_lines else "" ) @classmethod def _handle_unexpected_token(cls, exc: UnexpectedToken, script_lines: List[str]) -> ErrorDetail: """Handle UnexpectedToken errors""" line = exc.line column = exc.column # Get context lines source_line = script_lines[line - 1] if 0 < line <= len(script_lines) else "" line_before = script_lines[line - 2] if line > 1 and line <= len(script_lines) + 1 else None line_after = script_lines[line] if 0 < line < len(script_lines) else None # Determine error type and suggestions if exc.token.type == 'CLICK' and 'THEN' in str(exc.expected): code = cls.ERROR_CODES["missing_then"] message = "Missing 'THEN' keyword after IF condition" suggestions = [ Suggestion( "Add 'THEN' after the condition", source_line.replace("CLICK", "THEN CLICK") if source_line else None ) ] elif exc.token.type == '$END': code = cls.ERROR_CODES["missing_endproc"] message = "Unexpected end of script" suggestions = [ Suggestion("Check for missing ENDPROC"), Suggestion("Ensure all procedures are properly closed") ] elif 'RPAR' in str(exc.expected): code = cls.ERROR_CODES["missing_paren"] message = "Missing closing parenthesis ')'" suggestions = [ Suggestion("Add closing parenthesis at the end of the condition") ] elif 'COMMA' in str(exc.expected): code = cls.ERROR_CODES["missing_comma"] message = "Missing comma ',' in command" suggestions = [ Suggestion("Add comma between arguments") ] else: # Check if this might be missing backticks if exc.token.type == 'NAME' and 'BACKTICK_STRING' in str(exc.expected): code = cls.ERROR_CODES["missing_backticks"] message = "Selector must be wrapped in backticks" suggestions = [ Suggestion( "Wrap the selector in backticks", f"`{exc.token.value}`" ) ] else: code = cls.ERROR_CODES["syntax_error"] message = f"Unexpected '{exc.token.value}'" if exc.expected: expected_list = [str(e) for e in exc.expected if not str(e).startswith('_')][:3] if expected_list: message += f". Expected: {', '.join(expected_list)}" suggestions = [] return ErrorDetail( type=ErrorType.SYNTAX, code=code, severity=Severity.ERROR, message=message, line=line, column=column, source_line=source_line, line_before=line_before, line_after=line_after, suggestions=suggestions ) @classmethod def _handle_unexpected_chars(cls, exc: UnexpectedCharacters, script_lines: List[str]) -> ErrorDetail: """Handle UnexpectedCharacters errors""" line = exc.line column = exc.column source_line = script_lines[line - 1] if 0 < line <= len(script_lines) else "" # Check for missing backticks if "CLICK" in source_line and column > source_line.find("CLICK"): code = cls.ERROR_CODES["missing_backticks"] message = "Selector must be wrapped in backticks" suggestions = [ Suggestion( "Wrap the selector in backticks", re.sub(r'CLICK\s+([^\s]+)', r'CLICK `\1`', source_line) ) ] else: code = cls.ERROR_CODES["syntax_error"] message = f"Invalid character at position {column}" suggestions = [] return ErrorDetail( type=ErrorType.SYNTAX, code=code, severity=Severity.ERROR, message=message, line=line, column=column, source_line=source_line, suggestions=suggestions ) @classmethod def _handle_value_error(cls, exc: ValueError, script_lines: List[str]) -> ErrorDetail: """Handle ValueError (runtime errors)""" message = str(exc) # Check for undefined procedure if "Unknown procedure" in message: proc_match = re.search(r"'([^']+)'", message) if proc_match: proc_name = proc_match.group(1) # Find the line with the procedure call for i, line in enumerate(script_lines): if proc_name in line and not line.strip().startswith('PROC'): return ErrorDetail( type=ErrorType.RUNTIME, code=cls.ERROR_CODES["undefined_proc"], severity=Severity.ERROR, message=f"Undefined procedure '{proc_name}'", line=i + 1, column=line.find(proc_name) + 1, source_line=line, suggestions=[ Suggestion( f"Define the procedure before using it", f"PROC {proc_name}\n # commands here\nENDPROC" ) ] ) # Generic runtime error return ErrorDetail( type=ErrorType.RUNTIME, code="E999", severity=Severity.ERROR, message=message, line=1, column=1, source_line=script_lines[0] if script_lines else "" ) @staticmethod def generate_script( html: str, query: str | None = None, mode: str = "c4a", llm_config: LLMConfig | None = None, **completion_kwargs, ) -> str: """ One-shot helper that calls the LLM exactly once to convert a natural-language goal + HTML snippet into either: 1. raw JavaScript (`mode="js"`) 2. Crawl4ai DSL (`mode="c4a"`) The returned string is guaranteed to be free of markdown wrappers or explanatory text, ready for direct execution. """ if llm_config is None: llm_config = LLMConfig() # falls back to env vars / defaults # Build the user chunk user_prompt = "\n".join( [ "## GOAL", "<<goael>>", (query or "Prepare the page for crawling."), "<</goal>>", "", "## HTML", "<<html>>", html[:100000], # guardrail against token blast "<</html>>", "", "## MODE", mode, ] ) # Call the LLM with retry/back-off logic full_prompt = f"{GENERATE_SCRIPT_PROMPT}\n\n{user_prompt}" if mode == "c4a" else f"{GENERATE_JS_SCRIPT_PROMPT}\n\n{user_prompt}" response = perform_completion_with_backoff( provider=llm_config.provider, prompt_with_variables=full_prompt, api_token=llm_config.api_token, json_response=False, base_url=getattr(llm_config, 'base_url', None), **completion_kwargs, ) # Extract content from the response raw_response = response.choices[0].message.content.strip() # Strip accidental markdown fences (```js … ```) clean = re.sub(r"^```(?:[a-zA-Z0-9_-]+)?\s*|```$", "", raw_response, flags=re.MULTILINE).strip() if not clean: raise RuntimeError("LLM returned empty script.") return clean # Convenience functions for direct use def compile(script: Union[str, List[str]], root: Optional[pathlib.Path] = None) -> CompilationResult: """Compile C4A-Script to JavaScript""" return C4ACompiler.compile(script, root) def validate(script: Union[str, List[str]]) -> ValidationResult: """Validate C4A-Script syntax""" return C4ACompiler.validate(script) def compile_file(path: Union[str, pathlib.Path]) -> CompilationResult: """Compile C4A-Script file""" return C4ACompiler.compile_file(path)
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/legacy/llmtxt.py
crawl4ai/legacy/llmtxt.py
import os from pathlib import Path import re from typing import Dict, List, Tuple, Optional, Any import json from tqdm import tqdm import time import psutil import numpy as np from rank_bm25 import BM25Okapi from nltk.tokenize import word_tokenize from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer from litellm import batch_completion from .async_logger import AsyncLogger import litellm import pickle import hashlib # <--- ADDED for file-hash import glob litellm.set_verbose = False def _compute_file_hash(file_path: Path) -> str: """Compute MD5 hash for the file's entire content.""" hash_md5 = hashlib.md5() with file_path.open("rb") as f: for chunk in iter(lambda: f.read(4096), b""): hash_md5.update(chunk) return hash_md5.hexdigest() class AsyncLLMTextManager: def __init__( self, docs_dir: Path, logger: Optional[AsyncLogger] = None, max_concurrent_calls: int = 5, batch_size: int = 3, ) -> None: self.docs_dir = docs_dir self.logger = logger self.max_concurrent_calls = max_concurrent_calls self.batch_size = batch_size self.bm25_index = None self.document_map: Dict[str, Any] = {} self.tokenized_facts: List[str] = [] self.bm25_index_file = self.docs_dir / "bm25_index.pkl" async def _process_document_batch(self, doc_batch: List[Path]) -> None: """Process a batch of documents in parallel""" contents = [] for file_path in doc_batch: try: with open(file_path, "r", encoding="utf-8") as f: contents.append(f.read()) except Exception as e: self.logger.error(f"Error reading {file_path}: {str(e)}") contents.append("") # Add empty content to maintain batch alignment prompt = """Given a documentation file, generate a list of atomic facts where each fact: 1. Represents a single piece of knowledge 2. Contains variations in terminology for the same concept 3. References relevant code patterns if they exist 4. Is written in a way that would match natural language queries Each fact should follow this format: <main_concept>: <fact_statement> | <related_terms> | <code_reference> Example Facts: browser_config: Configure headless mode and browser type for AsyncWebCrawler | headless, browser_type, chromium, firefox | BrowserConfig(browser_type="chromium", headless=True) redis_connection: Redis client connection requires host and port configuration | redis setup, redis client, connection params | Redis(host='localhost', port=6379, db=0) pandas_filtering: Filter DataFrame rows using boolean conditions | dataframe filter, query, boolean indexing | df[df['column'] > 5] Wrap your response in <index>...</index> tags. """ # Prepare messages for batch processing messages_list = [ [ { "role": "user", "content": f"{prompt}\n\nGenerate index for this documentation:\n\n{content}", } ] for content in contents if content ] try: responses = batch_completion( model="anthropic/claude-3-5-sonnet-latest", messages=messages_list, logger_fn=None, ) # Process responses and save index files for response, file_path in zip(responses, doc_batch): try: index_content_match = re.search( r"<index>(.*?)</index>", response.choices[0].message.content, re.DOTALL, ) if not index_content_match: self.logger.warning( f"No <index>...</index> content found for {file_path}" ) continue index_content = re.sub( r"\n\s*\n", "\n", index_content_match.group(1) ).strip() if index_content: index_file = file_path.with_suffix(".q.md") with open(index_file, "w", encoding="utf-8") as f: f.write(index_content) self.logger.info(f"Created index file: {index_file}") else: self.logger.warning( f"No index content found in response for {file_path}" ) except Exception as e: self.logger.error( f"Error processing response for {file_path}: {str(e)}" ) except Exception as e: self.logger.error(f"Error in batch completion: {str(e)}") def _validate_fact_line(self, line: str) -> Tuple[bool, Optional[str]]: if "|" not in line: return False, "Missing separator '|'" parts = [p.strip() for p in line.split("|")] if len(parts) != 3: return False, f"Expected 3 parts, got {len(parts)}" concept_part = parts[0] if ":" not in concept_part: return False, "Missing ':' in concept definition" return True, None def _load_or_create_token_cache(self, fact_file: Path) -> Dict: """ Load token cache from .q.tokens if present and matching file hash. Otherwise return a new structure with updated file-hash. """ cache_file = fact_file.with_suffix(".q.tokens") current_hash = _compute_file_hash(fact_file) if cache_file.exists(): try: with open(cache_file, "r") as f: cache = json.load(f) # If the hash matches, return it directly if cache.get("content_hash") == current_hash: return cache # Otherwise, we signal that it's changed self.logger.info(f"Hash changed for {fact_file}, reindex needed.") except json.JSONDecodeError: self.logger.warning(f"Corrupt token cache for {fact_file}, rebuilding.") except Exception as e: self.logger.warning(f"Error reading cache for {fact_file}: {str(e)}") # Return a fresh cache return {"facts": {}, "content_hash": current_hash} def _save_token_cache(self, fact_file: Path, cache: Dict) -> None: cache_file = fact_file.with_suffix(".q.tokens") # Always ensure we're saving the correct file-hash cache["content_hash"] = _compute_file_hash(fact_file) with open(cache_file, "w") as f: json.dump(cache, f) def preprocess_text(self, text: str) -> List[str]: parts = [x.strip() for x in text.split("|")] if "|" in text else [text] # Remove : after the first word of parts[0] parts[0] = re.sub(r"^(.*?):", r"\1", parts[0]) lemmatizer = WordNetLemmatizer() stop_words = set(stopwords.words("english")) - { "how", "what", "when", "where", "why", "which", } tokens = [] for part in parts: if "(" in part and ")" in part: code_tokens = re.findall( r'[\w_]+(?=\()|[\w_]+(?==[\'"]{1}[\w_]+[\'"]{1})', part ) tokens.extend(code_tokens) words = word_tokenize(part.lower()) tokens.extend( [ lemmatizer.lemmatize(token) for token in words if token not in stop_words ] ) return tokens def maybe_load_bm25_index(self, clear_cache=False) -> bool: """ Load existing BM25 index from disk, if present and clear_cache=False. """ if not clear_cache and os.path.exists(self.bm25_index_file): self.logger.info("Loading existing BM25 index from disk.") with open(self.bm25_index_file, "rb") as f: data = pickle.load(f) self.tokenized_facts = data["tokenized_facts"] self.bm25_index = data["bm25_index"] return True return False def build_search_index(self, clear_cache=False) -> None: """ Checks for new or modified .q.md files by comparing file-hash. If none need reindexing and clear_cache is False, loads existing index if available. Otherwise, reindexes only changed/new files and merges or creates a new index. """ # If clear_cache is True, we skip partial logic: rebuild everything from scratch if clear_cache: self.logger.info("Clearing cache and rebuilding full search index.") if self.bm25_index_file.exists(): self.bm25_index_file.unlink() process = psutil.Process() self.logger.info("Checking which .q.md files need (re)indexing...") # Gather all .q.md files q_files = [ self.docs_dir / f for f in os.listdir(self.docs_dir) if f.endswith(".q.md") ] # We'll store known (unchanged) facts in these lists existing_facts: List[str] = [] existing_tokens: List[List[str]] = [] # Keep track of invalid lines for logging invalid_lines = [] needSet = [] # files that must be (re)indexed for qf in q_files: token_cache_file = qf.with_suffix(".q.tokens") # If no .q.tokens or clear_cache is True β†’ definitely reindex if clear_cache or not token_cache_file.exists(): needSet.append(qf) continue # Otherwise, load the existing cache and compare hash cache = self._load_or_create_token_cache(qf) # If the .q.tokens was out of date (i.e. changed hash), we reindex if len(cache["facts"]) == 0 or cache.get( "content_hash" ) != _compute_file_hash(qf): needSet.append(qf) else: # File is unchanged β†’ retrieve cached token data for line, cache_data in cache["facts"].items(): existing_facts.append(line) existing_tokens.append(cache_data["tokens"]) self.document_map[line] = qf # track the doc for that fact if not needSet and not clear_cache: # If no file needs reindexing, try loading existing index if self.maybe_load_bm25_index(clear_cache=False): self.logger.info( "No new/changed .q.md files found. Using existing BM25 index." ) return else: # If there's no existing index, we must build a fresh index from the old caches self.logger.info( "No existing BM25 index found. Building from cached facts." ) if existing_facts: self.logger.info( f"Building BM25 index with {len(existing_facts)} cached facts." ) self.bm25_index = BM25Okapi(existing_tokens) self.tokenized_facts = existing_facts with open(self.bm25_index_file, "wb") as f: pickle.dump( { "bm25_index": self.bm25_index, "tokenized_facts": self.tokenized_facts, }, f, ) else: self.logger.warning("No facts found at all. Index remains empty.") return # ----------------------------------------------------- /Users/unclecode/.crawl4ai/docs/14_proxy_security.q.q.tokens '/Users/unclecode/.crawl4ai/docs/14_proxy_security.q.md' # If we reach here, we have new or changed .q.md files # We'll parse them, reindex them, and then combine with existing_facts # ----------------------------------------------------- self.logger.info(f"{len(needSet)} file(s) need reindexing. Parsing now...") # 1) Parse the new or changed .q.md files new_facts = [] new_tokens = [] with tqdm(total=len(needSet), desc="Indexing changed files") as file_pbar: for file in needSet: # We'll build up a fresh cache fresh_cache = {"facts": {}, "content_hash": _compute_file_hash(file)} try: with open(file, "r", encoding="utf-8") as f_obj: content = f_obj.read().strip() lines = [l.strip() for l in content.split("\n") if l.strip()] for line in lines: is_valid, error = self._validate_fact_line(line) if not is_valid: invalid_lines.append((file, line, error)) continue tokens = self.preprocess_text(line) fresh_cache["facts"][line] = { "tokens": tokens, "added": time.time(), } new_facts.append(line) new_tokens.append(tokens) self.document_map[line] = file # Save the new .q.tokens with updated hash self._save_token_cache(file, fresh_cache) mem_usage = process.memory_info().rss / 1024 / 1024 self.logger.debug( f"Memory usage after {file.name}: {mem_usage:.2f}MB" ) except Exception as e: self.logger.error(f"Error processing {file}: {str(e)}") file_pbar.update(1) if invalid_lines: self.logger.warning(f"Found {len(invalid_lines)} invalid fact lines:") for file, line, error in invalid_lines: self.logger.warning(f"{file}: {error} in line: {line[:50]}...") # 2) Merge newly tokenized facts with the existing ones all_facts = existing_facts + new_facts all_tokens = existing_tokens + new_tokens # 3) Build BM25 index from combined facts self.logger.info( f"Building BM25 index with {len(all_facts)} total facts (old + new)." ) self.bm25_index = BM25Okapi(all_tokens) self.tokenized_facts = all_facts # 4) Save the updated BM25 index to disk with open(self.bm25_index_file, "wb") as f: pickle.dump( { "bm25_index": self.bm25_index, "tokenized_facts": self.tokenized_facts, }, f, ) final_mem = process.memory_info().rss / 1024 / 1024 self.logger.info(f"Search index updated. Final memory usage: {final_mem:.2f}MB") async def generate_index_files( self, force_generate_facts: bool = False, clear_bm25_cache: bool = False ) -> None: """ Generate index files for all documents in parallel batches Args: force_generate_facts (bool): If True, regenerate indexes even if they exist clear_bm25_cache (bool): If True, clear existing BM25 index cache """ self.logger.info("Starting index generation for documentation files.") md_files = [ self.docs_dir / f for f in os.listdir(self.docs_dir) if f.endswith(".md") and not any(f.endswith(x) for x in [".q.md", ".xs.md"]) ] # Filter out files that already have .q files unless force=True if not force_generate_facts: md_files = [ f for f in md_files if not (self.docs_dir / f.name.replace(".md", ".q.md")).exists() ] if not md_files: self.logger.info("All index files exist. Use force=True to regenerate.") else: # Process documents in batches for i in range(0, len(md_files), self.batch_size): batch = md_files[i : i + self.batch_size] self.logger.info( f"Processing batch {i//self.batch_size + 1}/{(len(md_files)//self.batch_size) + 1}" ) await self._process_document_batch(batch) self.logger.info("Index generation complete, building/updating search index.") self.build_search_index(clear_cache=clear_bm25_cache) def generate(self, sections: List[str], mode: str = "extended") -> str: # Get all markdown files all_files = glob.glob(str(self.docs_dir / "[0-9]*.md")) + glob.glob( str(self.docs_dir / "[0-9]*.xs.md") ) # Extract base names without extensions base_docs = { Path(f).name.split(".")[0] for f in all_files if not Path(f).name.endswith(".q.md") } # Filter by sections if provided if sections: base_docs = { doc for doc in base_docs if any(section.lower() in doc.lower() for section in sections) } # Get file paths based on mode files = [] for doc in sorted( base_docs, key=lambda x: int(x.split("_")[0]) if x.split("_")[0].isdigit() else 999999, ): if mode == "condensed": xs_file = self.docs_dir / f"{doc}.xs.md" regular_file = self.docs_dir / f"{doc}.md" files.append(str(xs_file if xs_file.exists() else regular_file)) else: files.append(str(self.docs_dir / f"{doc}.md")) # Read and format content content = [] for file in files: try: with open(file, "r", encoding="utf-8") as f: fname = Path(file).name content.append(f"{'#'*20}\n# {fname}\n{'#'*20}\n\n{f.read()}") except Exception as e: self.logger.error(f"Error reading {file}: {str(e)}") return "\n\n---\n\n".join(content) if content else "" def search(self, query: str, top_k: int = 5) -> str: if not self.bm25_index: return "No search index available. Call build_search_index() first." query_tokens = self.preprocess_text(query) doc_scores = self.bm25_index.get_scores(query_tokens) mean_score = np.mean(doc_scores) std_score = np.std(doc_scores) score_threshold = mean_score + (0.25 * std_score) file_data = self._aggregate_search_scores( doc_scores=doc_scores, score_threshold=score_threshold, query_tokens=query_tokens, ) ranked_files = sorted( file_data.items(), key=lambda x: ( x[1]["code_match_score"] * 2.0 + x[1]["match_count"] * 1.5 + x[1]["total_score"] ), reverse=True, )[:top_k] results = [] for file, _ in ranked_files: main_doc = str(file).replace(".q.md", ".md") if os.path.exists(self.docs_dir / main_doc): with open(self.docs_dir / main_doc, "r", encoding="utf-8") as f: only_file_name = main_doc.split("/")[-1] content = ["#" * 20, f"# {only_file_name}", "#" * 20, "", f.read()] results.append("\n".join(content)) return "\n\n---\n\n".join(results) def _aggregate_search_scores( self, doc_scores: List[float], score_threshold: float, query_tokens: List[str] ) -> Dict: file_data = {} for idx, score in enumerate(doc_scores): if score <= score_threshold: continue fact = self.tokenized_facts[idx] file_path = self.document_map[fact] if file_path not in file_data: file_data[file_path] = { "total_score": 0, "match_count": 0, "code_match_score": 0, "matched_facts": [], } components = fact.split("|") if "|" in fact else [fact] code_match_score = 0 if len(components) == 3: code_ref = components[2].strip() code_tokens = self.preprocess_text(code_ref) code_match_score = len(set(query_tokens) & set(code_tokens)) / len( query_tokens ) file_data[file_path]["total_score"] += score file_data[file_path]["match_count"] += 1 file_data[file_path]["code_match_score"] = max( file_data[file_path]["code_match_score"], code_match_score ) file_data[file_path]["matched_facts"].append(fact) return file_data def refresh_index(self) -> None: """Convenience method for a full rebuild.""" self.build_search_index(clear_cache=True)
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/legacy/docs_manager.py
crawl4ai/legacy/docs_manager.py
import requests import shutil from pathlib import Path from crawl4ai.async_logger import AsyncLogger from crawl4ai.llmtxt import AsyncLLMTextManager class DocsManager: def __init__(self, logger=None): self.docs_dir = Path.home() / ".crawl4ai" / "docs" self.local_docs = Path(__file__).parent.parent / "docs" / "llm.txt" self.docs_dir.mkdir(parents=True, exist_ok=True) self.logger = logger or AsyncLogger(verbose=True) self.llm_text = AsyncLLMTextManager(self.docs_dir, self.logger) async def ensure_docs_exist(self): """Fetch docs if not present""" if not any(self.docs_dir.iterdir()): await self.fetch_docs() async def fetch_docs(self) -> bool: """Copy from local docs or download from GitHub""" try: # Try local first if self.local_docs.exists() and ( any(self.local_docs.glob("*.md")) or any(self.local_docs.glob("*.tokens")) ): # Empty the local docs directory for file_path in self.docs_dir.glob("*.md"): file_path.unlink() # for file_path in self.docs_dir.glob("*.tokens"): # file_path.unlink() for file_path in self.local_docs.glob("*.md"): shutil.copy2(file_path, self.docs_dir / file_path.name) # for file_path in self.local_docs.glob("*.tokens"): # shutil.copy2(file_path, self.docs_dir / file_path.name) return True # Fallback to GitHub response = requests.get( "https://api.github.com/repos/unclecode/crawl4ai/contents/docs/llm.txt", headers={"Accept": "application/vnd.github.v3+json"}, ) response.raise_for_status() for item in response.json(): if item["type"] == "file" and item["name"].endswith(".md"): content = requests.get(item["download_url"]).text with open(self.docs_dir / item["name"], "w", encoding="utf-8") as f: f.write(content) return True except Exception as e: self.logger.error(f"Failed to fetch docs: {str(e)}") raise def list(self) -> list[str]: """List available topics""" names = [file_path.stem for file_path in self.docs_dir.glob("*.md")] # Remove [0-9]+_ prefix names = [name.split("_", 1)[1] if name[0].isdigit() else name for name in names] # Exclude those end with .xs.md and .q.md names = [ name for name in names if not name.endswith(".xs") and not name.endswith(".q") ] return names def generate(self, sections, mode="extended"): return self.llm_text.generate(sections, mode) def search(self, query: str, top_k: int = 5): return self.llm_text.search(query, top_k)
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/legacy/crawler_strategy.py
crawl4ai/legacy/crawler_strategy.py
from abc import ABC, abstractmethod from selenium import webdriver from selenium.webdriver.chrome.service import Service from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.chrome.options import Options from selenium.common.exceptions import InvalidArgumentException, WebDriverException # from selenium.webdriver.chrome.service import Service as ChromeService # from webdriver_manager.chrome import ChromeDriverManager # from urllib3.exceptions import MaxRetryError from .config import * import logging, time import base64 from PIL import Image, ImageDraw, ImageFont from io import BytesIO from typing import Callable import requests import os from pathlib import Path from .utils import * logger = logging.getLogger("selenium.webdriver.remote.remote_connection") logger.setLevel(logging.WARNING) logger_driver = logging.getLogger("selenium.webdriver.common.service") logger_driver.setLevel(logging.WARNING) urllib3_logger = logging.getLogger("urllib3.connectionpool") urllib3_logger.setLevel(logging.WARNING) # Disable http.client logging http_client_logger = logging.getLogger("http.client") http_client_logger.setLevel(logging.WARNING) # Disable driver_finder and service logging driver_finder_logger = logging.getLogger("selenium.webdriver.common.driver_finder") driver_finder_logger.setLevel(logging.WARNING) class CrawlerStrategy(ABC): @abstractmethod def crawl(self, url: str, **kwargs) -> str: pass @abstractmethod def take_screenshot(self, save_path: str): pass @abstractmethod def update_user_agent(self, user_agent: str): pass @abstractmethod def set_hook(self, hook_type: str, hook: Callable): pass class CloudCrawlerStrategy(CrawlerStrategy): def __init__(self, use_cached_html=False): super().__init__() self.use_cached_html = use_cached_html def crawl(self, url: str) -> str: data = { "urls": [url], "include_raw_html": True, "forced": True, "extract_blocks": False, } response = requests.post("http://crawl4ai.uccode.io/crawl", json=data) response = response.json() html = response["results"][0]["html"] return sanitize_input_encode(html) class LocalSeleniumCrawlerStrategy(CrawlerStrategy): def __init__(self, use_cached_html=False, js_code=None, **kwargs): super().__init__() print("[LOG] πŸš€ Initializing LocalSeleniumCrawlerStrategy") self.options = Options() self.options.headless = True if kwargs.get("proxy"): self.options.add_argument("--proxy-server={}".format(kwargs.get("proxy"))) if kwargs.get("user_agent"): self.options.add_argument("--user-agent=" + kwargs.get("user_agent")) else: user_agent = kwargs.get( "user_agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36", ) self.options.add_argument(f"--user-agent={user_agent}") self.options.add_argument( "user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" ) self.options.headless = kwargs.get("headless", True) if self.options.headless: self.options.add_argument("--headless") self.options.add_argument("--disable-gpu") self.options.add_argument("--window-size=1920,1080") self.options.add_argument("--no-sandbox") self.options.add_argument("--disable-dev-shm-usage") self.options.add_argument("--disable-blink-features=AutomationControlled") # self.options.add_argument("--disable-dev-shm-usage") self.options.add_argument("--disable-gpu") # self.options.add_argument("--disable-extensions") # self.options.add_argument("--disable-infobars") # self.options.add_argument("--disable-logging") # self.options.add_argument("--disable-popup-blocking") # self.options.add_argument("--disable-translate") # self.options.add_argument("--disable-default-apps") # self.options.add_argument("--disable-background-networking") # self.options.add_argument("--disable-sync") # self.options.add_argument("--disable-features=NetworkService,NetworkServiceInProcess") # self.options.add_argument("--disable-browser-side-navigation") # self.options.add_argument("--dns-prefetch-disable") # self.options.add_argument("--disable-web-security") self.options.add_argument("--log-level=3") self.use_cached_html = use_cached_html self.use_cached_html = use_cached_html self.js_code = js_code self.verbose = kwargs.get("verbose", False) # Hooks self.hooks = { "on_driver_created": None, "on_user_agent_updated": None, "before_get_url": None, "after_get_url": None, "before_return_html": None, } # chromedriver_autoinstaller.install() # import chromedriver_autoinstaller # crawl4ai_folder = os.path.join(os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home()), ".crawl4ai") # driver = webdriver.Chrome(service=ChromeService(ChromeDriverManager().install()), options=self.options) # chromedriver_path = chromedriver_autoinstaller.install() # chromedriver_path = chromedriver_autoinstaller.utils.download_chromedriver() # self.service = Service(chromedriver_autoinstaller.install()) # chromedriver_path = ChromeDriverManager().install() # self.service = Service(chromedriver_path) # self.service.log_path = "NUL" # self.driver = webdriver.Chrome(service=self.service, options=self.options) # Use selenium-manager (built into Selenium 4.10.0+) self.service = Service() self.driver = webdriver.Chrome(options=self.options) self.driver = self.execute_hook("on_driver_created", self.driver) if kwargs.get("cookies"): for cookie in kwargs.get("cookies"): self.driver.add_cookie(cookie) def set_hook(self, hook_type: str, hook: Callable): if hook_type in self.hooks: self.hooks[hook_type] = hook else: raise ValueError(f"Invalid hook type: {hook_type}") def execute_hook(self, hook_type: str, *args): hook = self.hooks.get(hook_type) if hook: result = hook(*args) if result is not None: if isinstance(result, webdriver.Chrome): return result else: raise TypeError( f"Hook {hook_type} must return an instance of webdriver.Chrome or None." ) # If the hook returns None or there is no hook, return self.driver return self.driver def update_user_agent(self, user_agent: str): self.options.add_argument(f"user-agent={user_agent}") self.driver.quit() self.driver = webdriver.Chrome(service=self.service, options=self.options) self.driver = self.execute_hook("on_user_agent_updated", self.driver) def set_custom_headers(self, headers: dict): # Enable Network domain for sending headers self.driver.execute_cdp_cmd("Network.enable", {}) # Set extra HTTP headers self.driver.execute_cdp_cmd("Network.setExtraHTTPHeaders", {"headers": headers}) def _ensure_page_load(self, max_checks=6, check_interval=0.01): initial_length = len(self.driver.page_source) for ix in range(max_checks): # print(f"Checking page load: {ix}") time.sleep(check_interval) current_length = len(self.driver.page_source) if current_length != initial_length: break return self.driver.page_source def crawl(self, url: str, **kwargs) -> str: # Create md5 hash of the URL import hashlib url_hash = hashlib.md5(url.encode()).hexdigest() if self.use_cached_html: cache_file_path = os.path.join( os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home()), ".crawl4ai", "cache", url_hash, ) if os.path.exists(cache_file_path): with open(cache_file_path, "r") as f: return sanitize_input_encode(f.read()) try: self.driver = self.execute_hook("before_get_url", self.driver) if self.verbose: print(f"[LOG] πŸ•ΈοΈ Crawling {url} using LocalSeleniumCrawlerStrategy...") self.driver.get(url) # <html><head></head><body></body></html> WebDriverWait(self.driver, 20).until( lambda d: d.execute_script("return document.readyState") == "complete" ) WebDriverWait(self.driver, 10).until( EC.presence_of_all_elements_located((By.TAG_NAME, "body")) ) self.driver.execute_script( "window.scrollTo(0, document.body.scrollHeight);" ) self.driver = self.execute_hook("after_get_url", self.driver) html = sanitize_input_encode( self._ensure_page_load() ) # self.driver.page_source can_not_be_done_headless = ( False # Look at my creativity for naming variables ) # TODO: Very ugly approach, but promise to change it! if ( kwargs.get("bypass_headless", False) or html == "<html><head></head><body></body></html>" ): print( "[LOG] πŸ™Œ Page could not be loaded in headless mode. Trying non-headless mode..." ) can_not_be_done_headless = True options = Options() options.headless = False # set window size very small options.add_argument("--window-size=5,5") driver = webdriver.Chrome(service=self.service, options=options) driver.get(url) self.driver = self.execute_hook("after_get_url", driver) html = sanitize_input_encode(driver.page_source) driver.quit() # Execute JS code if provided self.js_code = kwargs.get("js_code", self.js_code) if self.js_code and type(self.js_code) == str: self.driver.execute_script(self.js_code) # Optionally, wait for some condition after executing the JS code WebDriverWait(self.driver, 10).until( lambda driver: driver.execute_script("return document.readyState") == "complete" ) elif self.js_code and type(self.js_code) == list: for js in self.js_code: self.driver.execute_script(js) WebDriverWait(self.driver, 10).until( lambda driver: driver.execute_script( "return document.readyState" ) == "complete" ) # Optionally, wait for some condition after executing the JS code : Contributed by (https://github.com/jonymusky) wait_for = kwargs.get("wait_for", False) if wait_for: if callable(wait_for): print("[LOG] πŸ”„ Waiting for condition...") WebDriverWait(self.driver, 20).until(wait_for) else: print("[LOG] πŸ”„ Waiting for condition...") WebDriverWait(self.driver, 20).until( EC.presence_of_element_located((By.CSS_SELECTOR, wait_for)) ) if not can_not_be_done_headless: html = sanitize_input_encode(self.driver.page_source) self.driver = self.execute_hook("before_return_html", self.driver, html) # Store in cache cache_file_path = os.path.join( os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home()), ".crawl4ai", "cache", url_hash, ) with open(cache_file_path, "w", encoding="utf-8") as f: f.write(html) if self.verbose: print(f"[LOG] βœ… Crawled {url} successfully!") return html except InvalidArgumentException as e: if not hasattr(e, "msg"): e.msg = sanitize_input_encode(str(e)) raise InvalidArgumentException(f"Failed to crawl {url}: {e.msg}") except WebDriverException as e: # If e does nlt have msg attribute create it and set it to str(e) if not hasattr(e, "msg"): e.msg = sanitize_input_encode(str(e)) raise WebDriverException(f"Failed to crawl {url}: {e.msg}") except Exception as e: if not hasattr(e, "msg"): e.msg = sanitize_input_encode(str(e)) raise Exception(f"Failed to crawl {url}: {e.msg}") def take_screenshot(self) -> str: try: # Get the dimensions of the page total_width = self.driver.execute_script("return document.body.scrollWidth") total_height = self.driver.execute_script( "return document.body.scrollHeight" ) # Set the window size to the dimensions of the page self.driver.set_window_size(total_width, total_height) # Take screenshot screenshot = self.driver.get_screenshot_as_png() # Open the screenshot with PIL image = Image.open(BytesIO(screenshot)) # Convert image to RGB mode (this will handle both RGB and RGBA images) rgb_image = image.convert("RGB") # Convert to JPEG and compress buffered = BytesIO() rgb_image.save(buffered, format="JPEG", quality=85) img_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8") if self.verbose: print("[LOG] πŸ“Έ Screenshot taken and converted to base64") return img_base64 except Exception as e: error_message = sanitize_input_encode( f"Failed to take screenshot: {str(e)}" ) print(error_message) # Generate an image with black background img = Image.new("RGB", (800, 600), color="black") draw = ImageDraw.Draw(img) # Load a font try: font = ImageFont.truetype("arial.ttf", 40) except IOError: font = ImageFont.load_default() # Define text color and wrap the text text_color = (255, 255, 255) max_width = 780 wrapped_text = wrap_text(draw, error_message, font, max_width) # Calculate text position text_position = (10, 10) # Draw the text on the image draw.text(text_position, wrapped_text, fill=text_color, font=font) # Convert to base64 buffered = BytesIO() img.save(buffered, format="JPEG") img_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8") return img_base64 def quit(self): self.driver.quit()
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/legacy/cli.py
crawl4ai/legacy/cli.py
import click import sys import asyncio from typing import List from .docs_manager import DocsManager from .async_logger import AsyncLogger logger = AsyncLogger(verbose=True) docs_manager = DocsManager(logger) def print_table(headers: List[str], rows: List[List[str]], padding: int = 2): """Print formatted table with headers and rows""" widths = [max(len(str(cell)) for cell in col) for col in zip(headers, *rows)] border = "+" + "+".join("-" * (w + 2 * padding) for w in widths) + "+" def format_row(row): return ( "|" + "|".join( f"{' ' * padding}{str(cell):<{w}}{' ' * padding}" for cell, w in zip(row, widths) ) + "|" ) click.echo(border) click.echo(format_row(headers)) click.echo(border) for row in rows: click.echo(format_row(row)) click.echo(border) @click.group() def cli(): """Crawl4AI Command Line Interface""" pass @cli.group() def docs(): """Documentation operations""" pass @docs.command() @click.argument("sections", nargs=-1) @click.option( "--mode", type=click.Choice(["extended", "condensed"]), default="extended" ) def combine(sections: tuple, mode: str): """Combine documentation sections""" try: asyncio.run(docs_manager.ensure_docs_exist()) click.echo(docs_manager.generate(sections, mode)) except Exception as e: logger.error(str(e), tag="ERROR") sys.exit(1) @docs.command() @click.argument("query") @click.option("--top-k", "-k", default=5) @click.option("--build-index", is_flag=True, help="Build index if missing") def search(query: str, top_k: int, build_index: bool): """Search documentation""" try: result = docs_manager.search(query, top_k) if result == "No search index available. Call build_search_index() first.": if build_index or click.confirm("No search index found. Build it now?"): asyncio.run(docs_manager.llm_text.generate_index_files()) result = docs_manager.search(query, top_k) click.echo(result) except Exception as e: click.echo(f"Error: {str(e)}", err=True) sys.exit(1) @docs.command() def update(): """Update docs from GitHub""" try: asyncio.run(docs_manager.fetch_docs()) click.echo("Documentation updated successfully") except Exception as e: click.echo(f"Error: {str(e)}", err=True) sys.exit(1) @docs.command() @click.option("--force-facts", is_flag=True, help="Force regenerate fact files") @click.option("--clear-cache", is_flag=True, help="Clear BM25 cache") def index(force_facts: bool, clear_cache: bool): """Build or rebuild search indexes""" try: asyncio.run(docs_manager.ensure_docs_exist()) asyncio.run( docs_manager.llm_text.generate_index_files( force_generate_facts=force_facts, clear_bm25_cache=clear_cache ) ) click.echo("Search indexes built successfully") except Exception as e: click.echo(f"Error: {str(e)}", err=True) sys.exit(1) # Add docs list command @docs.command() def list(): """List available documentation sections""" try: sections = docs_manager.list() print_table(["Sections"], [[section] for section in sections]) except Exception as e: click.echo(f"Error: {str(e)}", err=True) sys.exit(1) if __name__ == "__main__": cli()
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/legacy/version_manager.py
crawl4ai/legacy/version_manager.py
# version_manager.py from pathlib import Path from packaging import version from . import __version__ class VersionManager: def __init__(self): self.home_dir = Path.home() / ".crawl4ai" self.version_file = self.home_dir / "version.txt" def get_installed_version(self): """Get the version recorded in home directory""" if not self.version_file.exists(): return None try: return version.parse(self.version_file.read_text().strip()) except: return None def update_version(self): """Update the version file to current library version""" self.version_file.write_text(__version__.__version__) def needs_update(self): """Check if database needs update based on version""" installed = self.get_installed_version() current = version.parse(__version__.__version__) return installed is None or installed < current
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/legacy/database.py
crawl4ai/legacy/database.py
import os from pathlib import Path import sqlite3 from typing import Optional, Tuple DB_PATH = os.path.join(os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home()), ".crawl4ai") os.makedirs(DB_PATH, exist_ok=True) DB_PATH = os.path.join(DB_PATH, "crawl4ai.db") def init_db(): global DB_PATH conn = sqlite3.connect(DB_PATH) cursor = conn.cursor() cursor.execute( """ CREATE TABLE IF NOT EXISTS crawled_data ( url TEXT PRIMARY KEY, html TEXT, cleaned_html TEXT, markdown TEXT, extracted_content TEXT, success BOOLEAN, media TEXT DEFAULT "{}", links TEXT DEFAULT "{}", metadata TEXT DEFAULT "{}", screenshot TEXT DEFAULT "" ) """ ) conn.commit() conn.close() def alter_db_add_screenshot(new_column: str = "media"): check_db_path() try: conn = sqlite3.connect(DB_PATH) cursor = conn.cursor() cursor.execute( f'ALTER TABLE crawled_data ADD COLUMN {new_column} TEXT DEFAULT ""' ) conn.commit() conn.close() except Exception as e: print(f"Error altering database to add screenshot column: {e}") def check_db_path(): if not DB_PATH: raise ValueError("Database path is not set or is empty.") def get_cached_url( url: str, ) -> Optional[Tuple[str, str, str, str, str, str, str, bool, str]]: check_db_path() try: conn = sqlite3.connect(DB_PATH) cursor = conn.cursor() cursor.execute( "SELECT url, html, cleaned_html, markdown, extracted_content, success, media, links, metadata, screenshot FROM crawled_data WHERE url = ?", (url,), ) result = cursor.fetchone() conn.close() return result except Exception as e: print(f"Error retrieving cached URL: {e}") return None def cache_url( url: str, html: str, cleaned_html: str, markdown: str, extracted_content: str, success: bool, media: str = "{}", links: str = "{}", metadata: str = "{}", screenshot: str = "", ): check_db_path() try: conn = sqlite3.connect(DB_PATH) cursor = conn.cursor() cursor.execute( """ INSERT INTO crawled_data (url, html, cleaned_html, markdown, extracted_content, success, media, links, metadata, screenshot) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?) ON CONFLICT(url) DO UPDATE SET html = excluded.html, cleaned_html = excluded.cleaned_html, markdown = excluded.markdown, extracted_content = excluded.extracted_content, success = excluded.success, media = excluded.media, links = excluded.links, metadata = excluded.metadata, screenshot = excluded.screenshot """, ( url, html, cleaned_html, markdown, extracted_content, success, media, links, metadata, screenshot, ), ) conn.commit() conn.close() except Exception as e: print(f"Error caching URL: {e}") def get_total_count() -> int: check_db_path() try: conn = sqlite3.connect(DB_PATH) cursor = conn.cursor() cursor.execute("SELECT COUNT(*) FROM crawled_data") result = cursor.fetchone() conn.close() return result[0] except Exception as e: print(f"Error getting total count: {e}") return 0 def clear_db(): check_db_path() try: conn = sqlite3.connect(DB_PATH) cursor = conn.cursor() cursor.execute("DELETE FROM crawled_data") conn.commit() conn.close() except Exception as e: print(f"Error clearing database: {e}") def flush_db(): check_db_path() try: conn = sqlite3.connect(DB_PATH) cursor = conn.cursor() cursor.execute("DROP TABLE crawled_data") conn.commit() conn.close() except Exception as e: print(f"Error flushing database: {e}") def update_existing_records(new_column: str = "media", default_value: str = "{}"): check_db_path() try: conn = sqlite3.connect(DB_PATH) cursor = conn.cursor() cursor.execute( f'UPDATE crawled_data SET {new_column} = "{default_value}" WHERE screenshot IS NULL' ) conn.commit() conn.close() except Exception as e: print(f"Error updating existing records: {e}") if __name__ == "__main__": # Delete the existing database file if os.path.exists(DB_PATH): os.remove(DB_PATH) init_db() # alter_db_add_screenshot("COL_NAME")
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/legacy/__init__.py
crawl4ai/legacy/__init__.py
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/legacy/web_crawler.py
crawl4ai/legacy/web_crawler.py
import os, time os.environ["TOKENIZERS_PARALLELISM"] = "false" from pathlib import Path from .models import UrlModel, CrawlResult from .database import init_db, get_cached_url, cache_url from .utils import * from .chunking_strategy import * from .extraction_strategy import * from .crawler_strategy import * from typing import List from concurrent.futures import ThreadPoolExecutor from ..content_scraping_strategy import LXMLWebScrapingStrategy as WebScrapingStrategy from .config import * import warnings import json warnings.filterwarnings( "ignore", message='Field "model_name" has conflict with protected namespace "model_".', ) class WebCrawler: def __init__( self, crawler_strategy: CrawlerStrategy = None, always_by_pass_cache: bool = False, verbose: bool = False, ): self.crawler_strategy = crawler_strategy or LocalSeleniumCrawlerStrategy( verbose=verbose ) self.always_by_pass_cache = always_by_pass_cache self.crawl4ai_folder = os.path.join( os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home()), ".crawl4ai" ) os.makedirs(self.crawl4ai_folder, exist_ok=True) os.makedirs(f"{self.crawl4ai_folder}/cache", exist_ok=True) init_db() self.ready = False def warmup(self): print("[LOG] 🌀️ Warming up the WebCrawler") self.run( url="https://google.com/", word_count_threshold=5, extraction_strategy=NoExtractionStrategy(), bypass_cache=False, verbose=False, ) self.ready = True print("[LOG] 🌞 WebCrawler is ready to crawl") def fetch_page( self, url_model: UrlModel, provider: str = DEFAULT_PROVIDER, api_token: str = None, extract_blocks_flag: bool = True, word_count_threshold=MIN_WORD_THRESHOLD, css_selector: str = None, screenshot: bool = False, use_cached_html: bool = False, extraction_strategy: ExtractionStrategy = None, chunking_strategy: ChunkingStrategy = RegexChunking(), **kwargs, ) -> CrawlResult: return self.run( url_model.url, word_count_threshold, extraction_strategy or NoExtractionStrategy(), chunking_strategy, bypass_cache=url_model.forced, css_selector=css_selector, screenshot=screenshot, **kwargs, ) pass def fetch_pages( self, url_models: List[UrlModel], provider: str = DEFAULT_PROVIDER, api_token: str = None, extract_blocks_flag: bool = True, word_count_threshold=MIN_WORD_THRESHOLD, use_cached_html: bool = False, css_selector: str = None, screenshot: bool = False, extraction_strategy: ExtractionStrategy = None, chunking_strategy: ChunkingStrategy = RegexChunking(), **kwargs, ) -> List[CrawlResult]: extraction_strategy = extraction_strategy or NoExtractionStrategy() def fetch_page_wrapper(url_model, *args, **kwargs): return self.fetch_page(url_model, *args, **kwargs) with ThreadPoolExecutor() as executor: results = list( executor.map( fetch_page_wrapper, url_models, [provider] * len(url_models), [api_token] * len(url_models), [extract_blocks_flag] * len(url_models), [word_count_threshold] * len(url_models), [css_selector] * len(url_models), [screenshot] * len(url_models), [use_cached_html] * len(url_models), [extraction_strategy] * len(url_models), [chunking_strategy] * len(url_models), *[kwargs] * len(url_models), ) ) return results def run( self, url: str, word_count_threshold=MIN_WORD_THRESHOLD, extraction_strategy: ExtractionStrategy = None, chunking_strategy: ChunkingStrategy = RegexChunking(), bypass_cache: bool = False, css_selector: str = None, screenshot: bool = False, user_agent: str = None, verbose=True, **kwargs, ) -> CrawlResult: try: extraction_strategy = extraction_strategy or NoExtractionStrategy() extraction_strategy.verbose = verbose if not isinstance(extraction_strategy, ExtractionStrategy): raise ValueError("Unsupported extraction strategy") if not isinstance(chunking_strategy, ChunkingStrategy): raise ValueError("Unsupported chunking strategy") word_count_threshold = max(word_count_threshold, MIN_WORD_THRESHOLD) cached = None screenshot_data = None extracted_content = None if not bypass_cache and not self.always_by_pass_cache: cached = get_cached_url(url) if kwargs.get("warmup", True) and not self.ready: return None if cached: html = sanitize_input_encode(cached[1]) extracted_content = sanitize_input_encode(cached[4]) if screenshot: screenshot_data = cached[9] if not screenshot_data: cached = None if not cached or not html: if user_agent: self.crawler_strategy.update_user_agent(user_agent) t1 = time.time() html = sanitize_input_encode(self.crawler_strategy.crawl(url, **kwargs)) t2 = time.time() if verbose: print( f"[LOG] πŸš€ Crawling done for {url}, success: {bool(html)}, time taken: {t2 - t1:.2f} seconds" ) if screenshot: screenshot_data = self.crawler_strategy.take_screenshot() crawl_result = self.process_html( url, html, extracted_content, word_count_threshold, extraction_strategy, chunking_strategy, css_selector, screenshot_data, verbose, bool(cached), **kwargs, ) crawl_result.success = bool(html) return crawl_result except Exception as e: if not hasattr(e, "msg"): e.msg = str(e) print(f"[ERROR] 🚫 Failed to crawl {url}, error: {e.msg}") return CrawlResult(url=url, html="", success=False, error_message=e.msg) def process_html( self, url: str, html: str, extracted_content: str, word_count_threshold: int, extraction_strategy: ExtractionStrategy, chunking_strategy: ChunkingStrategy, css_selector: str, screenshot: bool, verbose: bool, is_cached: bool, **kwargs, ) -> CrawlResult: t = time.time() # Extract content from HTML try: t1 = time.time() scrapping_strategy = WebScrapingStrategy() extra_params = { k: v for k, v in kwargs.items() if k not in ["only_text", "image_description_min_word_threshold"] } result = scrapping_strategy.scrap( url, html, word_count_threshold=word_count_threshold, css_selector=css_selector, only_text=kwargs.get("only_text", False), image_description_min_word_threshold=kwargs.get( "image_description_min_word_threshold", IMAGE_DESCRIPTION_MIN_WORD_THRESHOLD, ), **extra_params, ) # result = get_content_of_website_optimized(url, html, word_count_threshold, css_selector=css_selector, only_text=kwargs.get("only_text", False)) if verbose: print( f"[LOG] πŸš€ Content extracted for {url}, success: True, time taken: {time.time() - t1:.2f} seconds" ) if result is None: raise ValueError(f"Failed to extract content from the website: {url}") except InvalidCSSSelectorError as e: raise ValueError(str(e)) cleaned_html = sanitize_input_encode(result.get("cleaned_html", "")) markdown = sanitize_input_encode(result.get("markdown", "")) media = result.get("media", []) links = result.get("links", []) metadata = result.get("metadata", {}) if extracted_content is None: if verbose: print( f"[LOG] πŸ”₯ Extracting semantic blocks for {url}, Strategy: {extraction_strategy.name}" ) sections = chunking_strategy.chunk(markdown) extracted_content = extraction_strategy.run(url, sections) extracted_content = json.dumps( extracted_content, indent=4, default=str, ensure_ascii=False ) if verbose: print( f"[LOG] πŸš€ Extraction done for {url}, time taken: {time.time() - t:.2f} seconds." ) screenshot = None if not screenshot else screenshot if not is_cached: cache_url( url, html, cleaned_html, markdown, extracted_content, True, json.dumps(media), json.dumps(links), json.dumps(metadata), screenshot=screenshot, ) return CrawlResult( url=url, html=html, cleaned_html=format_html(cleaned_html), markdown=markdown, media=media, links=links, metadata=metadata, screenshot=screenshot, extracted_content=extracted_content, success=True, error_message="", )
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/processors/pdf/processor.py
crawl4ai/processors/pdf/processor.py
import logging import re from abc import ABC, abstractmethod from datetime import datetime from pathlib import Path from time import time from dataclasses import dataclass, asdict, field from typing import Dict, List, Optional, Any, Union import base64 import tempfile from .utils import * from .utils import ( apply_png_predictor, clean_pdf_text, clean_pdf_text_to_html, ) # Remove direct pypdf imports from the top # import pypdf # from pypdf import PdfReader logger = logging.getLogger(__name__) @dataclass class PDFMetadata: title: Optional[str] = None author: Optional[str] = None producer: Optional[str] = None created: Optional[datetime] = None modified: Optional[datetime] = None pages: int = 0 encrypted: bool = False file_size: Optional[int] = None @dataclass class PDFPage: page_number: int raw_text: str = "" markdown: str = "" html: str = "" images: List[Dict] = field(default_factory=list) links: List[str] = field(default_factory=list) layout: List[Dict] = field(default_factory=list) @dataclass class PDFProcessResult: metadata: PDFMetadata pages: List[PDFPage] processing_time: float = 0.0 version: str = "1.0" class PDFProcessorStrategy(ABC): @abstractmethod def process(self, pdf_path: Path) -> PDFProcessResult: pass class NaivePDFProcessorStrategy(PDFProcessorStrategy): def __init__(self, image_dpi: int = 144, image_quality: int = 85, extract_images: bool = True, save_images_locally: bool = False, image_save_dir: Optional[Path] = None, batch_size: int = 4): # Import check at initialization time try: import pypdf except ImportError: raise ImportError("pypdf is required for PDF processing. Install with 'pip install crawl4ai[pdf]'") self.image_dpi = image_dpi self.image_quality = image_quality self.current_page_number = 0 self.extract_images = extract_images self.save_images_locally = save_images_locally self.image_save_dir = image_save_dir self.batch_size = batch_size self._temp_dir = None def process(self, pdf_path: Path) -> PDFProcessResult: # Import inside method to allow dependency to be optional try: from pypdf import PdfReader except ImportError: raise ImportError("pypdf is required for PDF processing. Install with 'pip install crawl4ai[pdf]'") start_time = time() result = PDFProcessResult( metadata=PDFMetadata(), pages=[], version="1.1" ) try: with pdf_path.open('rb') as file: reader = PdfReader(file) result.metadata = self._extract_metadata(pdf_path, reader) # Handle image directory image_dir = None if self.extract_images and self.save_images_locally: if self.image_save_dir: image_dir = Path(self.image_save_dir) image_dir.mkdir(exist_ok=True, parents=True) else: self._temp_dir = tempfile.mkdtemp(prefix='pdf_images_') image_dir = Path(self._temp_dir) for page_num, page in enumerate(reader.pages): self.current_page_number = page_num + 1 pdf_page = self._process_page(page, image_dir) result.pages.append(pdf_page) except Exception as e: logger.error(f"Failed to process PDF: {str(e)}") raise finally: # Cleanup temp directory if it was created if self._temp_dir and not self.image_save_dir: import shutil try: shutil.rmtree(self._temp_dir) except Exception as e: logger.error(f"Failed to cleanup temp directory: {str(e)}") result.processing_time = time() - start_time return result def process_batch(self, pdf_path: Path) -> PDFProcessResult: """Like process() but processes PDF pages in parallel batches""" # Import inside method to allow dependency to be optional try: from pypdf import PdfReader import pypdf # For type checking except ImportError: raise ImportError("pypdf is required for PDF processing. Install with 'pip install crawl4ai[pdf]'") import concurrent.futures import threading # Initialize pypdf thread support if not hasattr(threading.current_thread(), "_children"): threading.current_thread()._children = set() start_time = time() result = PDFProcessResult( metadata=PDFMetadata(), pages=[], version="1.1" ) try: # Get metadata and page count from main thread with pdf_path.open('rb') as file: reader = PdfReader(file) result.metadata = self._extract_metadata(pdf_path, reader) total_pages = len(reader.pages) # Handle image directory setup image_dir = None if self.extract_images and self.save_images_locally: if self.image_save_dir: image_dir = Path(self.image_save_dir) image_dir.mkdir(exist_ok=True, parents=True) else: self._temp_dir = tempfile.mkdtemp(prefix='pdf_images_') image_dir = Path(self._temp_dir) def process_page_safely(page_num: int): # Each thread opens its own file handle with pdf_path.open('rb') as file: thread_reader = PdfReader(file) page = thread_reader.pages[page_num] self.current_page_number = page_num + 1 return self._process_page(page, image_dir) # Process pages in parallel batches with concurrent.futures.ThreadPoolExecutor(max_workers=self.batch_size) as executor: futures = [] for page_num in range(total_pages): future = executor.submit(process_page_safely, page_num) futures.append((page_num + 1, future)) # Collect results in order result.pages = [None] * total_pages for page_num, future in futures: try: pdf_page = future.result() result.pages[page_num - 1] = pdf_page except Exception as e: logger.error(f"Failed to process page {page_num}: {str(e)}") raise except Exception as e: logger.error(f"Failed to process PDF: {str(e)}") raise finally: # Cleanup temp directory if it was created if self._temp_dir and not self.image_save_dir: import shutil try: shutil.rmtree(self._temp_dir) except Exception as e: logger.error(f"Failed to cleanup temp directory: {str(e)}") result.processing_time = time() - start_time return result def _process_page(self, page, image_dir: Optional[Path]) -> PDFPage: pdf_page = PDFPage( page_number=self.current_page_number, ) # Text and font extraction def visitor_text(text, cm, tm, font_dict, font_size): pdf_page.raw_text += text pdf_page.layout.append({ "type": "text", "text": text, "x": tm[4], "y": tm[5], }) page.extract_text(visitor_text=visitor_text) # Image extraction if self.extract_images: pdf_page.images = self._extract_images(page, image_dir) # Link extraction pdf_page.links = self._extract_links(page) # Add markdown content pdf_page.markdown = clean_pdf_text(self.current_page_number, pdf_page.raw_text) pdf_page.html = clean_pdf_text_to_html(self.current_page_number, pdf_page.raw_text) return pdf_page def _extract_images(self, page, image_dir: Optional[Path]) -> List[Dict]: # Import pypdf for type checking only when needed try: from pypdf.generic import IndirectObject except ImportError: raise ImportError("pypdf is required for PDF processing. Install with 'pip install crawl4ai[pdf]'") if not self.extract_images: return [] images = [] try: resources = page.get("/Resources") if resources: # Check if resources exist resources = resources.get_object() # Resolve IndirectObject if '/XObject' in resources: xobjects = resources['/XObject'].get_object() img_count = 0 for obj_name in xobjects: xobj = xobjects[obj_name] if hasattr(xobj, 'get_object') and callable(xobj.get_object): xobj = xobj.get_object() if xobj.get('/Subtype') == '/Image': try: img_count += 1 img_filename = f"page_{self.current_page_number}_img_{img_count}" data = xobj.get_data() filters = xobj.get('/Filter', []) if not isinstance(filters, list): filters = [filters] # Resolve IndirectObjects in properties width = xobj.get('/Width', 0) height = xobj.get('/Height', 0) color_space = xobj.get('/ColorSpace', '/DeviceRGB') if isinstance(color_space, IndirectObject): color_space = color_space.get_object() # Handle different image encodings success = False image_format = 'bin' image_data = None if '/FlateDecode' in filters: try: decode_parms = xobj.get('/DecodeParms', {}) if isinstance(decode_parms, IndirectObject): decode_parms = decode_parms.get_object() predictor = decode_parms.get('/Predictor', 1) bits = xobj.get('/BitsPerComponent', 8) colors = 3 if color_space == '/DeviceRGB' else 1 if predictor >= 10: data = apply_png_predictor(data, width, bits, colors) # Create PIL Image from PIL import Image mode = 'RGB' if color_space == '/DeviceRGB' else 'L' img = Image.frombytes(mode, (width, height), data) if self.save_images_locally: final_path = (image_dir / img_filename).with_suffix('.png') img.save(final_path) image_data = str(final_path) else: import io img_byte_arr = io.BytesIO() img.save(img_byte_arr, format='PNG') image_data = base64.b64encode(img_byte_arr.getvalue()).decode('utf-8') success = True image_format = 'png' except Exception as e: logger.error(f"FlateDecode error: {str(e)}") elif '/DCTDecode' in filters: # JPEG image try: if self.save_images_locally: final_path = (image_dir / img_filename).with_suffix('.jpg') with open(final_path, 'wb') as f: f.write(data) image_data = str(final_path) else: image_data = base64.b64encode(data).decode('utf-8') success = True image_format = 'jpeg' except Exception as e: logger.error(f"JPEG save error: {str(e)}") elif '/CCITTFaxDecode' in filters: try: if data[:4] != b'II*\x00': # Add TIFF header if missing tiff_header = b'II*\x00\x08\x00\x00\x00\x0e\x00\x00\x01\x03\x00\x01\x00\x00\x00' + \ width.to_bytes(4, 'little') + \ b'\x01\x03\x00\x01\x00\x00\x00' + \ height.to_bytes(4, 'little') + \ b'\x01\x12\x00\x03\x00\x00\x00\x01\x00\x01\x00\x00\x01\x17\x00\x04\x00\x00\x00\x01\x00\x00\x00J\x01\x1B\x00\x05\x00\x00\x00\x01\x00\x00\x00R\x01\x28\x00\x03\x00\x00\x00\x01\x00\x02\x00\x00' data = tiff_header + data if self.save_images_locally: final_path = (image_dir / img_filename).with_suffix('.tiff') with open(final_path, 'wb') as f: f.write(data) image_data = str(final_path) else: image_data = base64.b64encode(data).decode('utf-8') success = True image_format = 'tiff' except Exception as e: logger.error(f"CCITT save error: {str(e)}") elif '/JPXDecode' in filters: # JPEG 2000 try: if self.save_images_locally: final_path = (image_dir / img_filename).with_suffix('.jp2') with open(final_path, 'wb') as f: f.write(data) image_data = str(final_path) else: image_data = base64.b64encode(data).decode('utf-8') success = True image_format = 'jpeg2000' except Exception as e: logger.error(f"JPEG2000 save error: {str(e)}") if success and image_data: image_info = { "format": image_format, "width": width, "height": height, "color_space": str(color_space), "bits_per_component": xobj.get('/BitsPerComponent', 1) } if self.save_images_locally: image_info["path"] = image_data else: image_info["data"] = image_data images.append(image_info) else: # Fallback: Save raw data if self.save_images_locally: final_path = (image_dir / img_filename).with_suffix('.bin') with open(final_path, 'wb') as f: f.write(data) logger.warning(f"Saved raw image data to {final_path}") else: image_data = base64.b64encode(data).decode('utf-8') images.append({ "format": "bin", "width": width, "height": height, "color_space": str(color_space), "bits_per_component": xobj.get('/BitsPerComponent', 1), "data": image_data }) except Exception as e: logger.error(f"Error processing image: {str(e)}") except Exception as e: logger.error(f"Image extraction error: {str(e)}") return images def _extract_links(self, page) -> List[str]: links = [] if '/Annots' in page: try: for annot in page['/Annots']: a = annot.get_object() if '/A' in a and '/URI' in a['/A']: links.append(a['/A']['/URI']) except Exception as e: print(f"Link error: {str(e)}") return links def _extract_metadata(self, pdf_path: Path, reader = None) -> PDFMetadata: # Import inside method to allow dependency to be optional if reader is None: try: from pypdf import PdfReader reader = PdfReader(pdf_path) except ImportError: raise ImportError("pypdf is required for PDF processing. Install with 'pip install crawl4ai[pdf]'") meta = reader.metadata or {} created = self._parse_pdf_date(meta.get('/CreationDate', '')) modified = self._parse_pdf_date(meta.get('/ModDate', '')) return PDFMetadata( title=meta.get('/Title'), author=meta.get('/Author'), producer=meta.get('/Producer'), created=created, modified=modified, pages=len(reader.pages), encrypted=reader.is_encrypted, file_size=pdf_path.stat().st_size ) def _parse_pdf_date(self, date_str: str) -> Optional[datetime]: try: match = re.match(r'D:(\d{4})(\d{2})(\d{2})(\d{2})(\d{2})(\d{2})', date_str) if not match: return None return datetime( year=int(match[1]), month=int(match[2]), day=int(match[3]), hour=int(match[4]), minute=int(match[5]), second=int(match[6]) ) except: return None # Usage example if __name__ == "__main__": import json from pathlib import Path try: # Import pypdf only when running the file directly import pypdf from pypdf import PdfReader except ImportError: print("pypdf is required for PDF processing. Install with 'pip install crawl4ai[pdf]'") exit(1) current_dir = Path(__file__).resolve().parent pdf_path = f'{current_dir}/test.pdf' strategy = NaivePDFProcessorStrategy() result = strategy.process(Path(pdf_path)) # Convert to JSON json_output = asdict(result) print(json.dumps(json_output, indent=2, default=str)) with open(f'{current_dir}/test.html', 'w') as f: for page in result.pages: f.write(f'<h1>Page {page["page_number"]}</h1>') f.write(page['html']) with open(f'{current_dir}/test.md', 'w') as f: for page in result.pages: f.write(f'# Page {page["page_number"]}\n\n') f.write(clean_pdf_text(page["page_number"], page['raw_text'])) f.write('\n\n')
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/processors/pdf/utils.py
crawl4ai/processors/pdf/utils.py
import re def apply_png_predictor(data, width, bits, color_channels): """Decode PNG predictor (PDF 1.5+ filter)""" bytes_per_pixel = (bits * color_channels) // 8 if (bits * color_channels) % 8 != 0: bytes_per_pixel += 1 stride = width * bytes_per_pixel scanline_length = stride + 1 # +1 for filter byte if len(data) % scanline_length != 0: raise ValueError("Invalid scanline structure") num_lines = len(data) // scanline_length output = bytearray() prev_line = b'\x00' * stride for i in range(num_lines): line = data[i*scanline_length:(i+1)*scanline_length] filter_type = line[0] filtered = line[1:] if filter_type == 0: # None decoded = filtered elif filter_type == 1: # Sub decoded = bytearray(filtered) for j in range(bytes_per_pixel, len(decoded)): decoded[j] = (decoded[j] + decoded[j - bytes_per_pixel]) % 256 elif filter_type == 2: # Up decoded = bytearray([(filtered[j] + prev_line[j]) % 256 for j in range(len(filtered))]) elif filter_type == 3: # Average decoded = bytearray(filtered) for j in range(len(decoded)): left = decoded[j - bytes_per_pixel] if j >= bytes_per_pixel else 0 up = prev_line[j] avg = (left + up) // 2 decoded[j] = (decoded[j] + avg) % 256 elif filter_type == 4: # Paeth decoded = bytearray(filtered) for j in range(len(decoded)): left = decoded[j - bytes_per_pixel] if j >= bytes_per_pixel else 0 up = prev_line[j] up_left = prev_line[j - bytes_per_pixel] if j >= bytes_per_pixel else 0 paeth = paeth_predictor(left, up, up_left) decoded[j] = (decoded[j] + paeth) % 256 else: raise ValueError(f"Unsupported filter type: {filter_type}") output.extend(decoded) prev_line = decoded return bytes(output) def paeth_predictor(a, b, c): p = a + b - c pa = abs(p - a) pb = abs(p - b) pc = abs(p - c) if pa <= pb and pa <= pc: return a elif pb <= pc: return b else: return c import re import html def clean_pdf_text_to_html(page_number, text): # Decode Unicode escapes and handle surrogate pairs try: decoded = text.encode('latin-1').decode('unicode-escape') decoded = decoded.encode('utf-16', 'surrogatepass').decode('utf-16') except Exception as e: decoded = text # Fallback if decoding fails article_title_detected = False # decoded = re.sub(r'\.\n', '.\n\n', decoded) # decoded = re.sub(r'\.\n', '<|break|>', decoded) lines = decoded.split('\n') output = [] current_paragraph = [] in_header = False email_pattern = re.compile(r'\{.*?\}') affiliation_pattern = re.compile(r'^†') quote_pattern = re.compile(r'^["β€œ]') author_pattern = re.compile( r'^\s*[A-Z][a-zA-Z]+(?:\s+[A-Z][a-zA-Z]+)*\s*(?:[†*0-9]+)?' r'(?:,\s*[A-Z][a-zA-Z]+(?:\s+[A-Z][a-zA-Z]+)*\s*(?:[†*0-9]+)?)*' r'(?:,\s*(?:and|&)\s+[A-Z][a-zA-Z]+(?:\s+[A-Z][a-zA-Z]+)*\s*(?:[†*0-9]+)?)?\s*$' ) def flush_paragraph(): if current_paragraph: para = ' '.join(current_paragraph) para = re.sub(r'\s+', ' ', para).strip() if para: # escaped_para = html.escape(para) escaped_para = para # escaped_para = re.sub(r'\.\n', '.\n\n', escaped_para) # Split escaped_para by <|break|> to avoid HTML escaping escaped_para = escaped_para.split('.\n\n') # Wrap each part in <p> tag escaped_para = [f'<p>{part}</p>' for part in escaped_para] output.append(f'<div class="paragraph">{"".join(escaped_para)}</div><hr/>') current_paragraph.clear() for i, line in enumerate(lines): line = line.strip() # Handle empty lines if not line: flush_paragraph() continue # Detect article title (first line with reasonable length) if not article_title_detected and i == 0 and 3 <= len(line.split()) <= 8 and len(lines) > 1: flush_paragraph() escaped_line = html.escape(line) output.append(f'<h2>{escaped_line}</h2>') article_title_detected = True continue # Detect numbered headers like "2.1 Background" numbered_header = re.match(r'^(\d+(?:\.\d+)*)\s+(.+)$', line) if i > 0 and not lines[i-1].strip() and numbered_header: flush_paragraph() level = numbered_header.group(1).count('.') + 1 header_text = numbered_header.group(2) md_level = min(level + 1, 6) escaped_header = html.escape(header_text) output.append(f'<h{md_level}>{escaped_header}</h{md_level}>') in_header = True continue # Detect authors if page_number == 1 and author_pattern.match(line): authors = re.sub(r'[†Ò€]', '', line) authors = re.split(r', | and ', authors) formatted_authors = [] for author in authors: if author.strip(): parts = [p for p in author.strip().split() if p] formatted = ' '.join(parts) escaped_author = html.escape(formatted) formatted_authors.append(f'<strong>{escaped_author}</strong>') if len(formatted_authors) > 1: joined = ', '.join(formatted_authors[:-1]) + ' and ' + formatted_authors[-1] else: joined = formatted_authors[0] output.append(f'<p>{joined}</p>') continue # Detect affiliation if affiliation_pattern.match(line): escaped_line = html.escape(line) output.append(f'<p><em>{escaped_line}</em></p>') continue # Detect emails if email_pattern.match(line): escaped_line = html.escape(line) output.append(f'<p><code>{escaped_line}</code></p>') continue # Detect section headers if re.match(r'^(Abstract|\d+\s+[A-Z]|References|Appendix|Figure|Table)', line): flush_paragraph() escaped_line = html.escape(line) output.append(f'<h2 class="section-header"><em>{escaped_line}</em></h2>') in_header = True continue # Handle quotes if quote_pattern.match(line): flush_paragraph() escaped_line = html.escape(line) output.append(f'<blockquote><p>{escaped_line}</p></blockquote>') continue # Handle hyphenated words if line.endswith('-'): current_paragraph.append(line[:-1].strip()) else: current_paragraph.append(line) # Handle paragraph breaks after headers if in_header and not line.endswith(('.', '!', '?')): flush_paragraph() in_header = False flush_paragraph() # Post-process HTML html_output = '\n'.join(output) # Fix common citation patterns html_output = re.sub(r'\(([A-Z][a-z]+ et al\. \d{4})\)', r'<cite>\1</cite>', html_output) # Fix escaped characters html_output = html_output.replace('\\ud835', '').replace('\\u2020', '†') # Remove leftover hyphens and fix spacing html_output = re.sub(r'\s+-\s+', '', html_output) html_output = re.sub(r'\s+([.,!?)])', r'\1', html_output) return html_output def clean_pdf_text(page_number, text): # Decode Unicode escapes and handle surrogate pairs try: decoded = text.encode('latin-1').decode('unicode-escape') decoded = decoded.encode('utf-16', 'surrogatepass').decode('utf-16') except Exception as e: decoded = text # Fallback if decoding fails article_title_detected = False decoded = re.sub(r'\.\n', '.\n\n', decoded) lines = decoded.split('\n') output = [] current_paragraph = [] in_header = False email_pattern = re.compile(r'\{.*?\}') affiliation_pattern = re.compile(r'^†') quote_pattern = re.compile(r'^["β€œ]') author_pattern = re.compile( r'^\s*[A-Z][a-zA-Z]+(?:\s+[A-Z][a-zA-Z]+)*\s*(?:[†*0-9]+)?' r'(?:,\s*[A-Z][a-zA-Z]+(?:\s+[A-Z][a-zA-Z]+)*\s*(?:[†*0-9]+)?)*' r'(?:,\s*(?:and|&)\s+[A-Z][a-zA-Z]+(?:\s+[A-Z][a-zA-Z]+)*\s*(?:[†*0-9]+)?)?\s*$' ) def flush_paragraph(): if current_paragraph: para = ' '.join(current_paragraph) para = re.sub(r'\s+', ' ', para).strip() if para: output.append(para) current_paragraph.clear() for i, line in enumerate(lines): line = line.strip() # Handle special patterns if not line: flush_paragraph() continue # Detect headline (first line, reasonable length, surrounded by empty lines) if not article_title_detected and i == 0 and 3 <= len(line.split()) <= 8 and (len(lines) > 1): flush_paragraph() output.append(f'## {line}') continue # Detect paragraph breaks for ALL paragraphs if not line and current_paragraph: flush_paragraph() output.append('') # Add empty line between paragraphs continue # Detect numbered headers like "2.1 Background" numbered_header = re.match(r'^(\d+(?:\.\d+)*)\s+(.+)$', line) if not lines[i-1].strip() and numbered_header: flush_paragraph() level = numbered_header.group(1).count('.') + 1 # Convert 2.1 β†’ level 2 header_text = numbered_header.group(2) # Never go beyond ### for subsections md_level = min(level + 1, 6) # 1 β†’ ##, 2 β†’ ###, 3 β†’ #### etc output.append(f'{"#" * md_level} {header_text}') in_header = True continue # Detect authors if page_number == 1 and author_pattern.match(line): # Clean and format author names authors = re.sub(r'[†Ò€]', '', line) # Remove affiliation markers authors = re.split(r', | and ', authors) formatted_authors = [] for author in authors: if author.strip(): # Handle "First Last" formatting parts = [p for p in author.strip().split() if p] formatted = ' '.join(parts) formatted_authors.append(f'**{formatted}**') # Join with commas and "and" if len(formatted_authors) > 1: joined = ', '.join(formatted_authors[:-1]) + ' and ' + formatted_authors[-1] else: joined = formatted_authors[0] output.append(joined) continue # Detect affiliation if affiliation_pattern.match(line): output.append(f'*{line}*') continue # Detect emails if email_pattern.match(line): output.append(f'`{line}`') continue # Detect section headers if re.match(r'^(Abstract|\d+\s+[A-Z]|References|Appendix|Figure|Table)', line): flush_paragraph() output.append(f'_[{line}]_') in_header = True continue # Handle quotes if quote_pattern.match(line): flush_paragraph() output.append(f'> {line}') continue # Handle hyphenated words if line.endswith('-'): current_paragraph.append(line[:-1].strip()) else: current_paragraph.append(line) # Handle paragraph breaks after headers if in_header and not line.endswith(('.', '!', '?')): flush_paragraph() in_header = False flush_paragraph() # Post-processing markdown = '\n\n'.join(output) # Fix common citation patterns markdown = re.sub(r'\(([A-Z][a-z]+ et al\. \d{4})\)', r'[\1]', markdown) # Fix escaped characters markdown = markdown.replace('\\ud835', '').replace('\\u2020', '†') # Remove leftover hyphens and fix spacing markdown = re.sub(r'\s+-\s+', '', markdown) # Join hyphenated words markdown = re.sub(r'\s+([.,!?)])', r'\1', markdown) # Fix punctuation spacing return markdown
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/crawl4ai/processors/pdf/__init__.py
crawl4ai/processors/pdf/__init__.py
from pathlib import Path import asyncio from dataclasses import asdict from crawl4ai.async_logger import AsyncLogger from crawl4ai.async_crawler_strategy import AsyncCrawlerStrategy from crawl4ai.models import AsyncCrawlResponse, ScrapingResult from crawl4ai.content_scraping_strategy import ContentScrapingStrategy from .processor import NaivePDFProcessorStrategy # Assuming your current PDF code is in pdf_processor.py class PDFCrawlerStrategy(AsyncCrawlerStrategy): def __init__(self, logger: AsyncLogger = None): self.logger = logger async def crawl(self, url: str, **kwargs) -> AsyncCrawlResponse: # Just pass through with empty HTML - scraper will handle actual processing return AsyncCrawlResponse( html="Scraper will handle the real work", # Scraper will handle the real work response_headers={"Content-Type": "application/pdf"}, status_code=200 ) async def close(self): pass async def __aenter__(self): return self async def __aexit__(self, exc_type, exc_val, exc_tb): await self.close() class PDFContentScrapingStrategy(ContentScrapingStrategy): """ A content scraping strategy for PDF files. Attributes: save_images_locally (bool): Whether to save images locally. extract_images (bool): Whether to extract images from PDF. image_save_dir (str): Directory to save extracted images. logger (AsyncLogger): Logger instance for recording events and errors. Methods: scrap(url: str, html: str, **params) -> ScrapingResult: Scrap content from a PDF file. ascrap(url: str, html: str, **kwargs) -> ScrapingResult: Asynchronous version of scrap. Usage: strategy = PDFContentScrapingStrategy( save_images_locally=False, extract_images=False, image_save_dir=None, logger=logger ) """ def __init__(self, save_images_locally : bool = False, extract_images : bool = False, image_save_dir : str = None, batch_size: int = 4, logger: AsyncLogger = None): self.logger = logger self.pdf_processor = NaivePDFProcessorStrategy( save_images_locally=save_images_locally, extract_images=extract_images, image_save_dir=image_save_dir, batch_size=batch_size ) self._temp_files = [] # Track temp files for cleanup def scrap(self, url: str, html: str, **params) -> ScrapingResult: """ Scrap content from a PDF file. Args: url (str): The URL of the PDF file. html (str): The HTML content of the page. **params: Additional parameters. Returns: ScrapingResult: The scraped content. """ # Download if URL or use local path pdf_path = self._get_pdf_path(url) try: # Process PDF # result = self.pdf_processor.process(Path(pdf_path)) result = self.pdf_processor.process_batch(Path(pdf_path)) # Combine page HTML cleaned_html = f""" <html> <head><meta name="pdf-pages" content="{len(result.pages)}"></head> <body> {''.join(f'<div class="pdf-page" data-page="{i+1}">{page.html}</div>' for i, page in enumerate(result.pages))} </body> </html> """ # Accumulate media and links with page numbers media = {"images": []} links = {"urls": []} for page in result.pages: # Add page number to each image for img in page.images: img["page"] = page.page_number media["images"].append(img) # Add page number to each link for link in page.links: links["urls"].append({ "url": link, "page": page.page_number }) return ScrapingResult( cleaned_html=cleaned_html, success=True, media=media, links=links, metadata=asdict(result.metadata) ) finally: # Cleanup temp file if downloaded if url.startswith(("http://", "https://")): try: Path(pdf_path).unlink(missing_ok=True) if pdf_path in self._temp_files: self._temp_files.remove(pdf_path) except Exception as e: if self.logger: self.logger.warning(f"Failed to cleanup temp file {pdf_path}: {e}") async def ascrap(self, url: str, html: str, **kwargs) -> ScrapingResult: # For simple cases, you can use the sync version return await asyncio.to_thread(self.scrap, url, html, **kwargs) def _get_pdf_path(self, url: str) -> str: if url.startswith(("http://", "https://")): import tempfile import requests # Create temp file with .pdf extension temp_file = tempfile.NamedTemporaryFile(suffix='.pdf', delete=False) self._temp_files.append(temp_file.name) try: if self.logger: self.logger.info(f"Downloading PDF from {url}...") # Download PDF with streaming and timeout # Connection timeout: 10s, Read timeout: 300s (5 minutes for large PDFs) response = requests.get(url, stream=True, timeout=(20, 60 * 10)) response.raise_for_status() # Get file size if available total_size = int(response.headers.get('content-length', 0)) downloaded = 0 # Write to temp file with open(temp_file.name, 'wb') as f: for chunk in response.iter_content(chunk_size=8192): f.write(chunk) downloaded += len(chunk) if self.logger and total_size > 0: progress = (downloaded / total_size) * 100 if progress % 10 < 0.1: # Log every 10% self.logger.debug(f"PDF download progress: {progress:.0f}%") if self.logger: self.logger.info(f"PDF downloaded successfully: {temp_file.name}") return temp_file.name except requests.exceptions.Timeout as e: # Clean up temp file if download fails Path(temp_file.name).unlink(missing_ok=True) self._temp_files.remove(temp_file.name) raise RuntimeError(f"Timeout downloading PDF from {url}: {str(e)}") except Exception as e: # Clean up temp file if download fails Path(temp_file.name).unlink(missing_ok=True) self._temp_files.remove(temp_file.name) raise RuntimeError(f"Failed to download PDF from {url}: {str(e)}") elif url.startswith("file://"): return url[7:] # Strip file:// prefix return url # Assume local path __all__ = ["PDFCrawlerStrategy", "PDFContentScrapingStrategy"]
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/apps/linkdin/c4ai_discover.py
docs/apps/linkdin/c4ai_discover.py
#!/usr/bin/env python3 """ c4ai-discover β€” Stage‑1 Discovery CLI Scrapes LinkedIn company search + their people pages and dumps two newline‑delimited JSON files: companies.jsonl and people.jsonl. Key design rules ---------------- * No BeautifulSoup β€” Crawl4AI only for network + HTML fetch. * JsonCssExtractionStrategy for structured scraping; schema auto‑generated once from sample HTML provided by user and then cached under ./schemas/. * Defaults are embedded so the file runs inside VSΒ Code debugger without CLI args. * If executed as a console script (argv > 1), CLI flags win. * Lightweight deps: argparse + Crawl4AI stack. Author: Tom @ Kidocode 2025‑04‑26 """ from __future__ import annotations import warnings, re warnings.filterwarnings( "ignore", message=r"The pseudo class ':contains' is deprecated, ':-soup-contains' should be used.*", category=FutureWarning, module=r"soupsieve" ) # ─────────────────────────────────────────────────────────────────────────────── # Imports # ─────────────────────────────────────────────────────────────────────────────── import argparse import random import asyncio import json import logging import os import pathlib import sys # 3rd-party rich for pretty logging from rich.console import Console from rich.logging import RichHandler from datetime import datetime, UTC from textwrap import dedent from types import SimpleNamespace from typing import Dict, List, Optional from urllib.parse import quote from pathlib import Path from glob import glob from crawl4ai import ( AsyncWebCrawler, BrowserConfig, CacheMode, CrawlerRunConfig, JsonCssExtractionStrategy, BrowserProfiler, LLMConfig, ) # ─────────────────────────────────────────────────────────────────────────────── # Constants / paths # ─────────────────────────────────────────────────────────────────────────────── BASE_DIR = pathlib.Path(__file__).resolve().parent SCHEMA_DIR = BASE_DIR / "schemas" SCHEMA_DIR.mkdir(parents=True, exist_ok=True) COMPANY_SCHEMA_PATH = SCHEMA_DIR / "company_card.json" PEOPLE_SCHEMA_PATH = SCHEMA_DIR / "people_card.json" # ---------- deterministic target JSON examples ---------- _COMPANY_SCHEMA_EXAMPLE = { "handle": "/company/posify/", "profile_image": "https://media.licdn.com/dms/image/v2/.../logo.jpg", "name": "Management Research Services, Inc. (MRS, Inc)", "descriptor": "Insurance β€’ Milwaukee, Wisconsin", "about": "Insurance β€’ Milwaukee, Wisconsin", "followers": 1000 } _PEOPLE_SCHEMA_EXAMPLE = { "profile_url": "https://www.linkedin.com/in/lily-ng/", "name": "Lily Ng", "headline": "VP Product @ Posify", "followers": 890, "connection_degree": "2nd", "avatar_url": "https://media.licdn.com/dms/image/v2/.../lily.jpg" } # Provided sample HTML snippets (trimmed) β€” used exactly once to cold‑generate schema. _SAMPLE_COMPANY_HTML = (Path(__file__).resolve().parent / "snippets/company.html").read_text() _SAMPLE_PEOPLE_HTML = (Path(__file__).resolve().parent / "snippets/people.html").read_text() # --------- tighter schema prompts ---------- _COMPANY_SCHEMA_QUERY = dedent( """ Using the supplied <li> company-card HTML, build a JsonCssExtractionStrategy schema that, for every card, outputs *exactly* the keys shown in the example JSON below. JSON spec: β€’ handle – href of the outermost <a> that wraps the logo/title, e.g. "/company/posify/" β€’ profile_image – absolute URL of the <img> inside that link β€’ name – text of the <a> inside the <span class*='t-16'> β€’ descriptor – text line with industry β€’ location β€’ about – text of the <div class*='t-normal'> below the name (industry + geo) β€’ followers – integer parsed from the <div> containing 'followers' IMPORTANT: Do not use the base64 kind of classes to target element. It's not reliable. The main div parent contains these li element is "div.search-results-container" you can use this. The <ul> parent has "role" equal to "list". Using these two should be enough to target the <li> elements. IMPORTANT: Remember there might be multiple <a> tags that start with https://www.linkedin.com/company/[NAME], so in case you refer to them for different fields, make sure to be more specific. One has the image, and one has the person's name. IMPORTANT: Be very smart in selecting the correct and unique way to address the element. You should ensure your selector points to a single element and is unique to the place that contains the information. """ ) _PEOPLE_SCHEMA_QUERY = dedent( """ Using the supplied <li> people-card HTML, build a JsonCssExtractionStrategy schema that outputs exactly the keys in the example JSON below. Fields: β€’ profile_url – href of the outermost profile link β€’ name – text inside artdeco-entity-lockup__title β€’ headline – inner text of artdeco-entity-lockup__subtitle β€’ followers – integer parsed from the span inside lt-line-clamp--multi-line β€’ connection_degree – '1st', '2nd', etc. from artdeco-entity-lockup__badge β€’ avatar_url – src of the <img> within artdeco-entity-lockup__image IMPORTANT: Do not use the base64 kind of classes to target element. It's not reliable. The main div parent contains these li element is a "div" has these classes "artdeco-card org-people-profile-card__card-spacing org-people__card-margin-bottom". """ ) # --------------------------------------------------------------------------- # Utility helpers # --------------------------------------------------------------------------- def _load_or_build_schema( path: pathlib.Path, sample_html: str, query: str, example_json: Dict, force = False ) -> Dict: """Load schema from path, else call generate_schema once and persist.""" if path.exists() and not force: return json.loads(path.read_text()) logging.info("[SCHEMA] Generating schema %s", path.name) schema = JsonCssExtractionStrategy.generate_schema( html=sample_html, llm_config=LLMConfig( provider=os.getenv("C4AI_SCHEMA_PROVIDER", "openai/gpt-4o"), api_token=os.getenv("OPENAI_API_KEY", "env:OPENAI_API_KEY"), ), query=query, target_json_example=json.dumps(example_json, indent=2), ) path.write_text(json.dumps(schema, indent=2)) return schema def _openai_friendly_number(text: str) -> Optional[int]: """Extract first int from text like '1K followers' (returns 1000).""" import re m = re.search(r"(\d[\d,]*)", text.replace(",", "")) if not m: return None val = int(m.group(1)) if "k" in text.lower(): val *= 1000 if "m" in text.lower(): val *= 1_000_000 return val # --------------------------------------------------------------------------- # Core async workers # --------------------------------------------------------------------------- async def crawl_company_search(crawler: AsyncWebCrawler, url: str, schema: Dict, limit: int) -> List[Dict]: """Paginate 10-item company search pages until `limit` reached.""" extraction = JsonCssExtractionStrategy(schema) cfg = CrawlerRunConfig( extraction_strategy=extraction, cache_mode=CacheMode.BYPASS, wait_for = ".search-marvel-srp", session_id="company_search", delay_before_return_html=1, magic = True, verbose= False, ) companies, page = [], 1 while len(companies) < max(limit, 10): paged_url = f"{url}&page={page}" res = await crawler.arun(paged_url, config=cfg) batch = json.loads(res[0].extracted_content) if not batch: break for item in batch: name = item.get("name", "").strip() handle = item.get("handle", "").strip() if not handle or not name: continue descriptor = item.get("descriptor") about = item.get("about") followers = _openai_friendly_number(str(item.get("followers", ""))) companies.append( { "handle": handle, "name": name, "descriptor": descriptor, "about": about, "followers": followers, "people_url": f"{handle}people/", "captured_at": datetime.now(UTC).isoformat(timespec="seconds") + "Z", } ) page += 1 logging.info( f"[dim]Page {page}[/] β€” running total: {len(companies)}/{limit} companies" ) return companies[:max(limit, 10)] async def crawl_people_page( crawler: AsyncWebCrawler, people_url: str, schema: Dict, limit: int, title_kw: str, ) -> List[Dict]: people_u = f"{people_url}?keywords={quote(title_kw)}" extraction = JsonCssExtractionStrategy(schema) cfg = CrawlerRunConfig( extraction_strategy=extraction, # scan_full_page=True, cache_mode=CacheMode.BYPASS, magic=True, wait_for=".org-people-profile-card__card-spacing", wait_for_images=5000, delay_before_return_html=1, session_id="people_search", ) res = await crawler.arun(people_u, config=cfg) if not res[0].success: return [] raw = json.loads(res[0].extracted_content) people = [] for p in raw[:limit]: followers = _openai_friendly_number(str(p.get("followers", ""))) people.append( { "profile_url": p.get("profile_url"), "name": p.get("name"), "headline": p.get("headline"), "followers": followers, "connection_degree": p.get("connection_degree"), "avatar_url": p.get("avatar_url"), } ) return people # --------------------------------------------------------------------------- # CLI + main # --------------------------------------------------------------------------- def build_arg_parser() -> argparse.ArgumentParser: ap = argparse.ArgumentParser("c4ai-discover β€” Crawl4AI LinkedIn discovery") sub = ap.add_subparsers(dest="cmd", required=False, help="run scope") def add_flags(parser: argparse.ArgumentParser): parser.add_argument("--query", required=False, help="query keyword(s)") parser.add_argument("--geo", required=False, type=int, help="LinkedIn geoUrn") parser.add_argument("--title-filters", default="Product,Engineering", help="comma list of job keywords") parser.add_argument("--max-companies", type=int, default=1000) parser.add_argument("--max-people", type=int, default=500) parser.add_argument("--profile-name", default=str(pathlib.Path.home() / ".crawl4ai/profiles/profile_linkedin_uc")) parser.add_argument("--outdir", default="./output") parser.add_argument("--concurrency", type=int, default=4) parser.add_argument("--log-level", default="info", choices=["debug", "info", "warn", "error"]) add_flags(sub.add_parser("full")) add_flags(sub.add_parser("companies")) add_flags(sub.add_parser("people")) # global flags ap.add_argument( "--debug", action="store_true", help="Use built-in demo defaults (same as C4AI_DEMO_DEBUG=1)", ) return ap def detect_debug_defaults(force = False) -> SimpleNamespace: if not force and sys.gettrace() is None and not os.getenv("C4AI_DEMO_DEBUG"): return SimpleNamespace() # ----- debug‑friendly defaults ----- return SimpleNamespace( cmd="full", query="health insurance management", geo=102713980, # title_filters="Product,Engineering", title_filters="", max_companies=10, max_people=5, profile_name="profile_linkedin_uc", outdir="./debug_out", concurrency=2, log_level="debug", ) async def async_main(opts): # ─────────── logging setup ─────────── console = Console() logging.basicConfig( level=opts.log_level.upper(), format="%(message)s", handlers=[RichHandler(console=console, markup=True, rich_tracebacks=True)], ) # ------------------------------------------------------------------- # Load or build schemas (one‑time LLM call each) # ------------------------------------------------------------------- company_schema = _load_or_build_schema( COMPANY_SCHEMA_PATH, _SAMPLE_COMPANY_HTML, _COMPANY_SCHEMA_QUERY, _COMPANY_SCHEMA_EXAMPLE, # True ) people_schema = _load_or_build_schema( PEOPLE_SCHEMA_PATH, _SAMPLE_PEOPLE_HTML, _PEOPLE_SCHEMA_QUERY, _PEOPLE_SCHEMA_EXAMPLE, # True ) outdir = BASE_DIR / pathlib.Path(opts.outdir) outdir.mkdir(parents=True, exist_ok=True) f_companies = (BASE_DIR / outdir / "companies.jsonl").open("a", encoding="utf-8") f_people = (BASE_DIR / outdir / "people.jsonl").open("a", encoding="utf-8") # ------------------------------------------------------------------- # Prepare crawler with cookie pool rotation # ------------------------------------------------------------------- profiler = BrowserProfiler() path = profiler.get_profile_path(opts.profile_name) bc = BrowserConfig( headless=False, verbose=False, user_data_dir=path, use_managed_browser=True, user_agent_mode = "random", user_agent_generator_config= { "platforms": "mobile", "os": "Android" } ) crawler = AsyncWebCrawler(config=bc) await crawler.start() # Single worker for simplicity; concurrency can be scaled by arun_many if needed. # crawler = await next_crawler().start() try: # Build LinkedIn search URL search_url = f'https://www.linkedin.com/search/results/companies/?keywords={quote(opts.query)}&companyHqGeo="{opts.geo}"' logging.info("Seed URL => %s", search_url) companies: List[Dict] = [] if opts.cmd in ("companies", "full"): companies = await crawl_company_search( crawler, search_url, company_schema, opts.max_companies ) for c in companies: f_companies.write(json.dumps(c, ensure_ascii=False) + "\n") logging.info(f"[bold green]βœ“[/] Companies scraped so far: {len(companies)}") if opts.cmd in ("people", "full"): if not companies: # load from previous run src = outdir / "companies.jsonl" if not src.exists(): logging.error("companies.jsonl missing β€” run companies/full first") return 10 companies = [json.loads(l) for l in src.read_text().splitlines()] total_people = 0 title_kw = " ".join([t.strip() for t in opts.title_filters.split(",") if t.strip()]) if opts.title_filters else "" for comp in companies: people = await crawl_people_page( crawler, comp["people_url"], people_schema, opts.max_people, title_kw, ) for p in people: rec = p | { "company_handle": comp["handle"], # "captured_at": datetime.now(UTC).isoformat(timespec="seconds") + "Z", "captured_at": datetime.now(UTC).isoformat(timespec="seconds") + "Z", } f_people.write(json.dumps(rec, ensure_ascii=False) + "\n") total_people += len(people) logging.info( f"{comp['name']} β€” [cyan]{len(people)}[/] people extracted" ) await asyncio.sleep(random.uniform(0.5, 1)) logging.info("Total people scraped: %d", total_people) finally: await crawler.close() f_companies.close() f_people.close() return 0 def main(): parser = build_arg_parser() cli_opts = parser.parse_args() # decide on debug defaults if cli_opts.debug: opts = detect_debug_defaults(force=True) cli_opts = opts else: env_defaults = detect_debug_defaults() opts = env_defaults if env_defaults else cli_opts if not getattr(opts, "cmd", None): opts.cmd = "full" exit_code = asyncio.run(async_main(cli_opts)) sys.exit(exit_code) if __name__ == "__main__": main()
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/apps/linkdin/c4ai_insights.py
docs/apps/linkdin/c4ai_insights.py
#!/usr/bin/env python3 """ Stage-2 Insights builder ------------------------ Reads companies.jsonl & people.jsonl (Stage-1 output) and produces: β€’ company_graph.json β€’ org_chart_<handle>.json (one per company) β€’ decision_makers.csv β€’ graph_view.html (interactive visualisation) Run: python c4ai_insights.py --in ./stage1_out --out ./stage2_out Author : Tom @ Kidocode, 2025-04-28 """ from __future__ import annotations # ─────────────────────────────────────────────────────────────────────────────── # Imports & Third-party # ─────────────────────────────────────────────────────────────────────────────── import argparse, asyncio, json, pathlib, random from datetime import datetime, UTC from types import SimpleNamespace from pathlib import Path from typing import List, Dict, Any # Pretty CLI UX from rich.console import Console from rich.logging import RichHandler from rich.progress import Progress, SpinnerColumn, BarColumn, TextColumn, TimeElapsedColumn # ─────────────────────────────────────────────────────────────────────────────── BASE_DIR = pathlib.Path(__file__).resolve().parent # ─────────────────────────────────────────────────────────────────────────────── # 3rd-party deps # ─────────────────────────────────────────────────────────────────────────────── import numpy as np # from sentence_transformers import SentenceTransformer # from sklearn.metrics.pairwise import cosine_similarity import pandas as pd import hashlib from litellm import completion #Support any LLM Provider # ─────────────────────────────────────────────────────────────────────────────── # Utils # ─────────────────────────────────────────────────────────────────────────────── def load_jsonl(path: Path) -> List[Dict[str, Any]]: with open(path, "r", encoding="utf-8") as f: return [json.loads(l) for l in f] def dump_json(obj, path: Path): with open(path, "w", encoding="utf-8") as f: json.dump(obj, f, ensure_ascii=False, indent=2) # ─────────────────────────────────────────────────────────────────────────────── # Constants # ─────────────────────────────────────────────────────────────────────────────── BASE_DIR = pathlib.Path(__file__).resolve().parent # ─────────────────────────────────────────────────────────────────────────────── # Debug defaults (mirrors Stage-1 trick) # ─────────────────────────────────────────────────────────────────────────────── def dev_defaults() -> SimpleNamespace: return SimpleNamespace( in_dir="./samples", out_dir="./samples/insights", embed_model="all-MiniLM-L6-v2", top_k=10, llm_provider="openai/gpt-4.1", llm_api_key=None, max_llm_tokens=8000, llm_temperature=1.0, stub=False, # Set to True to use a stub for org-chart inference llm_base_url=None, # e.g., "https://api.openai.com/v1" for OpenAI workers=4 ) # ─────────────────────────────────────────────────────────────────────────────── # Graph builders # ─────────────────────────────────────────────────────────────────────────────── def embed_descriptions(companies, model_name:str, opts) -> np.ndarray: from sentence_transformers import SentenceTransformer console = Console() console.print(f"Using embedding model: [bold cyan]{model_name}[/]") cache_path = BASE_DIR / Path(opts.out_dir) / "embeds_cache.json" cache = {} if cache_path.exists(): with open(cache_path) as f: cache = json.load(f) # flush cache if model differs if cache.get("_model") != model_name: cache = {} model = SentenceTransformer(model_name) new_texts, new_indices = [], [] vectors = np.zeros((len(companies), 384), dtype=np.float32) for idx, comp in enumerate(companies): text = comp.get("about") or comp.get("descriptor","") h = hashlib.sha1(text.encode("utf-8")).hexdigest() cached = cache.get(comp["handle"]) if cached and cached["hash"] == h: vectors[idx] = np.array(cached["vector"], dtype=np.float32) else: new_texts.append(text) new_indices.append((idx, comp["handle"], h)) if new_texts: embeds = model.encode(new_texts, show_progress_bar=False, convert_to_numpy=True) for vec, (idx, handle, h) in zip(embeds, new_indices): vectors[idx] = vec cache[handle] = {"hash": h, "vector": vec.tolist()} cache["_model"] = model_name with open(cache_path, "w") as f: json.dump(cache, f) return vectors def build_company_graph(companies, embeds:np.ndarray, top_k:int) -> Dict[str,Any]: from sklearn.metrics.pairwise import cosine_similarity sims = cosine_similarity(embeds) nodes, edges = [], [] for i,c in enumerate(companies): node = dict( id=c["handle"].strip("/"), name=c["name"], handle=c["handle"], about=c.get("about",""), people_url=c.get("people_url",""), industry=c.get("descriptor","").split("β€’")[0].strip(), geoUrn=c.get("geoUrn"), followers=c.get("followers",0), # desc_embed=embeds[i].tolist(), desc_embed=[], ) nodes.append(node) # pick top-k most similar except itself top_idx = np.argsort(sims[i])[::-1][1:top_k+1] for j in top_idx: tgt = companies[j] weight = float(sims[i,j]) if node["industry"] == tgt.get("descriptor","").split("β€’")[0].strip(): weight += 0.10 if node["geoUrn"] == tgt.get("geoUrn"): weight += 0.05 tgt['followers'] = tgt.get("followers", None) or 1 node["followers"] = node.get("followers", None) or 1 follower_ratio = min(node["followers"], tgt.get("followers",1)) / max(node["followers"] or 1, tgt.get("followers",1)) weight += 0.05 * follower_ratio edges.append(dict( source=node["id"], target=tgt["handle"].strip("/"), weight=round(weight,4), drivers=dict( embed_sim=round(float(sims[i,j]),4), industry_match=0.10 if node["industry"] == tgt.get("descriptor","").split("β€’")[0].strip() else 0, geo_overlap=0.05 if node["geoUrn"] == tgt.get("geoUrn") else 0, ) )) # return {"nodes":nodes,"edges":edges,"meta":{"generated_at":datetime.now(UTC).isoformat()}} return {"nodes":nodes,"edges":edges,"meta":{"generated_at":datetime.now(UTC).isoformat()}} # ─────────────────────────────────────────────────────────────────────────────── # Org-chart via LLM # ─────────────────────────────────────────────────────────────────────────────── async def infer_org_chart_llm(company, people, llm_provider:str, api_key:str, max_tokens:int, temperature:float, stub:bool=False, base_url:str=None): if stub: # Tiny fake org-chart when debugging offline chief = random.choice(people) nodes = [{ "id": chief["profile_url"], "name": chief["name"], "title": chief["headline"], "dept": chief["headline"].split()[:1][0], "yoe_total": 8, "yoe_current": 2, "seniority_score": 0.8, "decision_score": 0.9, "avatar_url": chief.get("avatar_url") }] return {"nodes":nodes,"edges":[],"meta":{"debug_stub":True,"generated_at":datetime.now(UTC).isoformat()}} prompt = [ {"role":"system","content":"You are an expert B2B org-chart reasoner."}, {"role":"user","content":f"""Here is the company description: <company> {json.dumps(company, ensure_ascii=False)} </company> Here is a JSON list of employees: <employees> {json.dumps(people, ensure_ascii=False)} </employees> 1) Build a reporting tree (manager -> direct reports) 2) For each person output a decision_score 0-1 for buying new software Return JSON: {{ "nodes":[{{id,name,title,dept,yoe_total,yoe_current,seniority_score,decision_score,avatar_url,profile_url}}], "edges":[{{source,target,type,confidence}}] }} """} ] resp = completion( model=llm_provider, messages=prompt, max_tokens=max_tokens, temperature=temperature, response_format={"type":"json_object"}, api_key=api_key, base_url=base_url ) chart = json.loads(resp.choices[0].message.content) chart["meta"] = dict( model=llm_provider, generated_at=datetime.now(UTC).isoformat() ) return chart # ─────────────────────────────────────────────────────────────────────────────── # CSV flatten # ─────────────────────────────────────────────────────────────────────────────── def export_decision_makers(charts_dir:Path, csv_path:Path, threshold:float=0.5): rows=[] for p in charts_dir.glob("org_chart_*.json"): data=json.loads(p.read_text()) comp = p.stem.split("org_chart_")[1] for n in data.get("nodes",[]): if n.get("decision_score",0)>=threshold: rows.append(dict( company=comp, person=n["name"], title=n["title"], decision_score=n["decision_score"], profile_url=n["id"] )) pd.DataFrame(rows).to_csv(csv_path,index=False) # ─────────────────────────────────────────────────────────────────────────────── # HTML rendering # ─────────────────────────────────────────────────────────────────────────────── def render_html(out:Path, template_dir:Path): # From template folder cp graph_view.html and ai.js in out folder import shutil shutil.copy(template_dir/"graph_view_template.html", out / "graph_view.html") shutil.copy(template_dir/"ai.js", out) # ─────────────────────────────────────────────────────────────────────────────── # Main async pipeline # ─────────────────────────────────────────────────────────────────────────────── async def run(opts): # ── silence SDK noise ────────────────────────────────────────────────────── # for noisy in ("openai", "httpx", "httpcore"): # lg = logging.getLogger(noisy) # lg.setLevel(logging.WARNING) # or ERROR if you want total silence # lg.propagate = False # optional: stop them reaching root # ────────────── logging bootstrap ────────────── console = Console() # logging.basicConfig( # level="INFO", # format="%(message)s", # handlers=[RichHandler(console=console, markup=True, rich_tracebacks=True)], # ) in_dir = BASE_DIR / Path(opts.in_dir) out_dir = BASE_DIR / Path(opts.out_dir) out_dir.mkdir(parents=True, exist_ok=True) companies = load_jsonl(in_dir/"companies.jsonl") people = load_jsonl(in_dir/"people.jsonl") console.print(f"[bold cyan]Loaded[/] {len(companies)} companies, {len(people)} people") console.print("[bold]β‡’[/] Embedding company descriptions…") embeds = embed_descriptions(companies, opts.embed_model, opts) console.print("[bold]β‡’[/] Building similarity graph") company_graph = build_company_graph(companies, embeds, opts.top_k) dump_json(company_graph, out_dir/"company_graph.json") # Filter companies that need processing to_process = [] for comp in companies: handle = comp["handle"].strip("/").replace("/","_") out_file = out_dir/f"org_chart_{handle}.json" if out_file.exists(): console.print(f"[green]βœ“[/] Skipping existing {comp['name']}") continue to_process.append(comp) if not to_process: console.print("[yellow]All companies already processed[/]") else: workers = getattr(opts, 'workers', 1) parallel = workers > 1 console.print(f"[bold]β‡’[/] Inferring org-charts via LLM {f'(parallel={workers} workers)' if parallel else ''}") with Progress( SpinnerColumn(), BarColumn(), TextColumn("[progress.description]{task.description}"), TimeElapsedColumn(), console=console, ) as progress: task = progress.add_task("Org charts", total=len(to_process)) async def process_one(comp): handle = comp["handle"].strip("/").replace("/","_") persons = [p for p in people if p["company_handle"].strip("/") == comp["handle"].strip("/")] chart = await infer_org_chart_llm( comp, persons, llm_provider=opts.llm_provider, api_key=opts.llm_api_key or None, max_tokens=opts.max_llm_tokens, temperature=opts.llm_temperature, stub=opts.stub or False, base_url=opts.llm_base_url or None ) chart["meta"]["company"] = comp["name"] # Save the result immediately dump_json(chart, out_dir/f"org_chart_{handle}.json") progress.update(task, advance=1, description=f"{comp['name']} ({len(persons)} ppl)") # Create tasks for all companies tasks = [process_one(comp) for comp in to_process] # Process in batches based on worker count semaphore = asyncio.Semaphore(workers) async def bounded_process(coro): async with semaphore: return await coro # Run with concurrency control await asyncio.gather(*(bounded_process(task) for task in tasks)) console.print("[bold]β‡’[/] Flattening decision-makers CSV") export_decision_makers(out_dir, out_dir/"decision_makers.csv") render_html(out_dir, template_dir=BASE_DIR/"templates") console.print(f"[bold green]βœ“[/] Stage-2 artefacts written to {out_dir}") # ─────────────────────────────────────────────────────────────────────────────── # CLI # ─────────────────────────────────────────────────────────────────────────────── def build_arg_parser(): p = argparse.ArgumentParser(description="Build graphs & visualisation from Stage-1 output") p.add_argument("--in", dest="in_dir", required=False, help="Stage-1 output dir", default=".") p.add_argument("--out", dest="out_dir", required=False, help="Destination dir", default=".") p.add_argument("--embed-model", default="all-MiniLM-L6-v2") p.add_argument("--top-k", type=int, default=10, help="Top-k neighbours per company") p.add_argument("--llm-provider", default="openai/gpt-4.1", help="LLM model to use in format 'provider/model_name' (e.g., 'anthropic/claude-3')") p.add_argument("--llm-api-key", help="API key for LLM provider (defaults to env vars)") p.add_argument("--llm-base-url", help="Base URL for LLM API endpoint") p.add_argument("--max-llm-tokens", type=int, default=8024) p.add_argument("--llm-temperature", type=float, default=1.0) p.add_argument("--stub", action="store_true", help="Skip OpenAI call and generate tiny fake org charts") p.add_argument("--workers", type=int, default=4, help="Number of parallel workers for LLM inference") return p def main(): dbg = dev_defaults() opts = dbg if True else build_arg_parser().parse_args() # opts = build_arg_parser().parse_args() asyncio.run(run(opts)) if __name__ == "__main__": main()
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/snippets/deep_crawl/1.intro.py
docs/snippets/deep_crawl/1.intro.py
import asyncio from typing import List from crawl4ai import ( AsyncWebCrawler, CrawlerRunConfig, BFSDeepCrawlStrategy, CrawlResult, FilterChain, DomainFilter, URLPatternFilter, ) # Import necessary classes from crawl4ai library: # - AsyncWebCrawler: The main class for web crawling. # - CrawlerRunConfig: Configuration class for crawler behavior. # - BFSDeepCrawlStrategy: Breadth-First Search deep crawling strategy. # - CrawlResult: Data model for individual crawl results. # - FilterChain: Used to chain multiple URL filters. # - URLPatternFilter: Filter URLs based on patterns. # You had from crawl4ai.deep_crawling.filters import FilterChain, URLPatternFilter, which is also correct, # but for simplicity and consistency, we will use the direct import from crawl4ai in this example, as it is re-exported in __init__.py async def basic_deep_crawl(): """ Performs a basic deep crawl starting from a seed URL, demonstrating: - Breadth-First Search (BFS) deep crawling strategy. - Filtering URLs based on URL patterns. - Accessing crawl results and metadata. """ # 1. Define URL Filters: # Create a URLPatternFilter to include only URLs containing "text". # This filter will be used to restrict crawling to URLs that are likely to contain textual content. url_filter = URLPatternFilter( patterns=[ "*text*", # Include URLs that contain "text" in their path or URL ] ) # Create a DomainFilter to allow only URLs from the "groq.com" domain and block URLs from the "example.com" domain. # This filter will be used to restrict crawling to URLs within the "groq.com" domain. domain_filter = DomainFilter( allowed_domains=["groq.com"], blocked_domains=["example.com"], ) # 2. Configure CrawlerRunConfig for Deep Crawling: # Configure CrawlerRunConfig to use BFSDeepCrawlStrategy for deep crawling. config = CrawlerRunConfig( deep_crawl_strategy=BFSDeepCrawlStrategy( max_depth=2, # Set the maximum depth of crawling to 2 levels from the start URL max_pages=10, # Limit the total number of pages to crawl to 10, to prevent excessive crawling include_external=False, # Set to False to only crawl URLs within the same domain as the start URL filter_chain=FilterChain(filters=[url_filter, domain_filter]), # Apply the URLPatternFilter and DomainFilter to filter URLs during deep crawl ), verbose=True, # Enable verbose logging to see detailed output during crawling ) # 3. Initialize and Run AsyncWebCrawler: # Use AsyncWebCrawler as a context manager for automatic start and close. async with AsyncWebCrawler() as crawler: results: List[CrawlResult] = await crawler.arun( # url="https://docs.crawl4ai.com", # Uncomment to use crawl4ai documentation as start URL url="https://console.groq.com/docs", # Set the start URL for deep crawling to Groq documentation config=config, # Pass the configured CrawlerRunConfig to arun method ) # 4. Process and Print Crawl Results: # Iterate through the list of CrawlResult objects returned by the deep crawl. for result in results: # Print the URL and its crawl depth from the metadata for each crawled URL. print(f"URL: {result.url}, Depth: {result.metadata.get('depth', 0)}") if __name__ == "__main__": import asyncio asyncio.run(basic_deep_crawl())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/snippets/deep_crawl/2.filters.py
docs/snippets/deep_crawl/2.filters.py
import asyncio from typing import List from crawl4ai import ( AsyncWebCrawler, CrawlerRunConfig, BFSDeepCrawlStrategy, CrawlResult, URLFilter, # Base class for filters, not directly used in examples but good to import for context ContentTypeFilter, DomainFilter, FilterChain, URLPatternFilter, SEOFilter # Advanced filter, can be introduced later or as bonus ) async def deep_crawl_filter_tutorial_part_2(): """ Tutorial demonstrating URL filters in Crawl4AI, focusing on isolated filter behavior before integrating them into a deep crawl. This tutorial covers: - Testing individual filters with synthetic URLs. - Understanding filter logic and behavior in isolation. - Combining filters using FilterChain. - Integrating filters into a deep crawling example. """ # === Introduction: URL Filters in Isolation === print("\n" + "=" * 40) print("=== Introduction: URL Filters in Isolation ===") print("=" * 40 + "\n") print("In this section, we will explore each filter individually using synthetic URLs.") print("This allows us to understand exactly how each filter works before using them in a crawl.\n") # === 2. ContentTypeFilter - Testing in Isolation === print("\n" + "=" * 40) print("=== 2. ContentTypeFilter - Testing in Isolation ===") print("=" * 40 + "\n") # 2.1. Create ContentTypeFilter: # Create a ContentTypeFilter to allow only 'text/html' and 'application/json' content types # BASED ON URL EXTENSIONS. content_type_filter = ContentTypeFilter(allowed_types=["text/html", "application/json"]) print("ContentTypeFilter created, allowing types (by extension): ['text/html', 'application/json']") print("Note: ContentTypeFilter in Crawl4ai works by checking URL file extensions, not HTTP headers.") # 2.2. Synthetic URLs for Testing: # ContentTypeFilter checks URL extensions. We provide URLs with different extensions to test. test_urls_content_type = [ "https://example.com/page.html", # Should pass: .html extension (text/html) "https://example.com/data.json", # Should pass: .json extension (application/json) "https://example.com/image.png", # Should reject: .png extension (not allowed type) "https://example.com/document.pdf", # Should reject: .pdf extension (not allowed type) "https://example.com/page", # Should pass: no extension (defaults to allow) - check default behaviour! "https://example.com/page.xhtml", # Should pass: .xhtml extension (text/html) ] # 2.3. Apply Filter and Show Results: print("\n=== Testing ContentTypeFilter (URL Extension based) ===") for url in test_urls_content_type: passed = content_type_filter.apply(url) result = "PASSED" if passed else "REJECTED" extension = ContentTypeFilter._extract_extension(url) # Show extracted extension for clarity print(f"- URL: {url} - {result} (Extension: '{extension or 'No Extension'}')") print("=" * 40) input("Press Enter to continue to DomainFilter example...") # === 3. DomainFilter - Testing in Isolation === print("\n" + "=" * 40) print("=== 3. DomainFilter - Testing in Isolation ===") print("=" * 40 + "\n") # 3.1. Create DomainFilter: domain_filter = DomainFilter(allowed_domains=["crawl4ai.com", "example.com"]) print("DomainFilter created, allowing domains: ['crawl4ai.com', 'example.com']") # 3.2. Synthetic URLs for Testing: test_urls_domain = [ "https://docs.crawl4ai.com/api", "https://example.com/products", "https://another-website.org/blog", "https://sub.example.com/about", "https://crawl4ai.com.attacker.net", # Corrected example: now should be rejected ] # 3.3. Apply Filter and Show Results: print("\n=== Testing DomainFilter ===") for url in test_urls_domain: passed = domain_filter.apply(url) result = "PASSED" if passed else "REJECTED" print(f"- URL: {url} - {result}") print("=" * 40) input("Press Enter to continue to FilterChain example...") # === 4. FilterChain - Combining Filters === print("\n" + "=" * 40) print("=== 4. FilterChain - Combining Filters ===") print("=" * 40 + "\n") combined_filter = FilterChain( filters=[ URLPatternFilter(patterns=["*api*"]), ContentTypeFilter(allowed_types=["text/html"]), # Still URL extension based DomainFilter(allowed_domains=["docs.crawl4ai.com"]), ] ) print("FilterChain created, combining URLPatternFilter, ContentTypeFilter, and DomainFilter.") test_urls_combined = [ "https://docs.crawl4ai.com/api/async-webcrawler", "https://example.com/api/products", "https://docs.crawl4ai.com/core/crawling", "https://another-website.org/api/data", ] # 4.3. Apply FilterChain and Show Results print("\n=== Testing FilterChain (URLPatternFilter + ContentTypeFilter + DomainFilter) ===") for url in test_urls_combined: passed = await combined_filter.apply(url) result = "PASSED" if passed else "REJECTED" print(f"- URL: {url} - {result}") print("=" * 40) input("Press Enter to continue to Deep Crawl with FilterChain example...") # === 5. Deep Crawl with FilterChain === print("\n" + "=" * 40) print("=== 5. Deep Crawl with FilterChain ===") print("=" * 40 + "\n") print("Finally, let's integrate the FilterChain into a deep crawl example.") config_final_crawl = CrawlerRunConfig( deep_crawl_strategy=BFSDeepCrawlStrategy( max_depth=2, max_pages=10, include_external=False, filter_chain=combined_filter ), verbose=False, ) async with AsyncWebCrawler() as crawler: results_final_crawl: List[CrawlResult] = await crawler.arun( url="https://docs.crawl4ai.com", config=config_final_crawl ) print("=== Crawled URLs (Deep Crawl with FilterChain) ===") for result in results_final_crawl: print(f"- {result.url}, Depth: {result.metadata.get('depth', 0)}") print("=" * 40) print("\nTutorial Completed! Review the output of each section to understand URL filters.") if __name__ == "__main__": asyncio.run(deep_crawl_filter_tutorial_part_2())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/llm_table_extraction_example.py
docs/examples/llm_table_extraction_example.py
#!/usr/bin/env python3 """ Example demonstrating LLM-based table extraction in Crawl4AI. This example shows how to use the LLMTableExtraction strategy to extract complex tables from web pages, including handling rowspan, colspan, and nested tables. """ import os import sys # Get the grandparent directory grandparent_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.append(grandparent_dir) __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) import asyncio from crawl4ai import ( AsyncWebCrawler, CrawlerRunConfig, LLMConfig, LLMTableExtraction, CacheMode ) import pandas as pd # Example 1: Basic LLM Table Extraction async def basic_llm_extraction(): """Extract tables using LLM with default settings.""" print("\n=== Example 1: Basic LLM Table Extraction ===") # Configure LLM (using OpenAI GPT-4o-mini for cost efficiency) llm_config = LLMConfig( provider="openai/gpt-4.1-mini", api_token="env:OPENAI_API_KEY", # Uses environment variable temperature=0.1, # Low temperature for consistency max_tokens=32000 ) # Create LLM table extraction strategy table_strategy = LLMTableExtraction( llm_config=llm_config, verbose=True, # css_selector="div.mw-content-ltr", max_tries=2, enable_chunking=True, chunk_token_threshold=5000, # Lower threshold to force chunking min_rows_per_chunk=10, max_parallel_chunks=3 ) # Configure crawler with the strategy config = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, table_extraction=table_strategy ) async with AsyncWebCrawler() as crawler: # Extract tables from a Wikipedia page result = await crawler.arun( url="https://en.wikipedia.org/wiki/List_of_chemical_elements", config=config ) if result.success: print(f"βœ“ Found {len(result.tables)} tables") # Display first table if result.tables: first_table = result.tables[0] print(f"\nFirst table:") print(f" Headers: {first_table['headers'][:5]}...") print(f" Rows: {len(first_table['rows'])}") # Convert to pandas DataFrame df = pd.DataFrame( first_table['rows'], columns=first_table['headers'] ) print(f"\nDataFrame shape: {df.shape}") print(df.head()) else: print(f"βœ— Extraction failed: {result.error}") # Example 2: Focused Extraction with CSS Selector async def focused_extraction(): """Extract tables from specific page sections using CSS selectors.""" print("\n=== Example 2: Focused Extraction with CSS Selector ===") # HTML with multiple tables test_html = """ <html> <body> <div class="sidebar"> <table role="presentation"> <tr><td>Navigation</td></tr> </table> </div> <div class="main-content"> <table id="data-table"> <caption>Quarterly Sales Report</caption> <thead> <tr> <th rowspan="2">Product</th> <th colspan="3">Q1 2024</th> </tr> <tr> <th>Jan</th> <th>Feb</th> <th>Mar</th> </tr> </thead> <tbody> <tr> <td>Widget A</td> <td>100</td> <td>120</td> <td>140</td> </tr> <tr> <td>Widget B</td> <td>200</td> <td>180</td> <td>220</td> </tr> </tbody> </table> </div> </body> </html> """ llm_config = LLMConfig( provider="openai/gpt-4.1-mini", api_token="env:OPENAI_API_KEY" ) # Focus only on main content area table_strategy = LLMTableExtraction( llm_config=llm_config, css_selector=".main-content", # Only extract from main content verbose=True ) config = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, table_extraction=table_strategy ) async with AsyncWebCrawler() as crawler: result = await crawler.arun( url=f"raw:{test_html}", config=config ) if result.success and result.tables: table = result.tables[0] print(f"βœ“ Extracted table: {table.get('caption', 'No caption')}") print(f" Headers: {table['headers']}") print(f" Metadata: {table['metadata']}") # The LLM should have handled the rowspan/colspan correctly print("\nProcessed data (rowspan/colspan handled):") for i, row in enumerate(table['rows']): print(f" Row {i+1}: {row}") # Example 3: Comparing with Default Extraction async def compare_strategies(): """Compare LLM extraction with default extraction on complex tables.""" print("\n=== Example 3: Comparing LLM vs Default Extraction ===") # Complex table with nested structure complex_html = """ <html> <body> <table> <tr> <th rowspan="3">Category</th> <th colspan="2">2023</th> <th colspan="2">2024</th> </tr> <tr> <th>H1</th> <th>H2</th> <th>H1</th> <th>H2</th> </tr> <tr> <td colspan="4">All values in millions</td> </tr> <tr> <td>Revenue</td> <td>100</td> <td>120</td> <td>130</td> <td>145</td> </tr> <tr> <td>Profit</td> <td>20</td> <td>25</td> <td>28</td> <td>32</td> </tr> </table> </body> </html> """ async with AsyncWebCrawler() as crawler: # Test with default extraction from crawl4ai import DefaultTableExtraction default_strategy = DefaultTableExtraction( table_score_threshold=3, verbose=True ) config_default = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, table_extraction=default_strategy ) result_default = await crawler.arun( url=f"raw:{complex_html}", config=config_default ) # Test with LLM extraction llm_strategy = LLMTableExtraction( llm_config=LLMConfig( provider="openai/gpt-4.1-mini", api_token="env:OPENAI_API_KEY" ), verbose=True ) config_llm = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, table_extraction=llm_strategy ) result_llm = await crawler.arun( url=f"raw:{complex_html}", config=config_llm ) # Compare results print("\nDefault Extraction:") if result_default.tables: table = result_default.tables[0] print(f" Headers: {table.get('headers', [])}") print(f" Rows: {len(table.get('rows', []))}") for i, row in enumerate(table.get('rows', [])[:3]): print(f" Row {i+1}: {row}") print("\nLLM Extraction (handles complex structure better):") if result_llm.tables: table = result_llm.tables[0] print(f" Headers: {table.get('headers', [])}") print(f" Rows: {len(table.get('rows', []))}") for i, row in enumerate(table.get('rows', [])): print(f" Row {i+1}: {row}") print(f" Metadata: {table.get('metadata', {})}") # Example 4: Batch Processing Multiple Pages async def batch_extraction(): """Extract tables from multiple pages efficiently.""" print("\n=== Example 4: Batch Table Extraction ===") urls = [ "https://www.worldometers.info/geography/alphabetical-list-of-countries/", # "https://en.wikipedia.org/wiki/List_of_chemical_elements", ] llm_config = LLMConfig( provider="openai/gpt-4.1-mini", api_token="env:OPENAI_API_KEY", temperature=0.1, max_tokens=1500 ) table_strategy = LLMTableExtraction( llm_config=llm_config, css_selector="div.datatable-container", # Wikipedia data tables verbose=False, enable_chunking=True, chunk_token_threshold=5000, # Lower threshold to force chunking min_rows_per_chunk=10, max_parallel_chunks=3 ) config = CrawlerRunConfig( table_extraction=table_strategy, cache_mode=CacheMode.BYPASS ) all_tables = [] async with AsyncWebCrawler() as crawler: for url in urls: print(f"\nProcessing: {url.split('/')[-1][:50]}...") result = await crawler.arun(url=url, config=config) if result.success and result.tables: print(f" βœ“ Found {len(result.tables)} tables") # Store first table from each page if result.tables: all_tables.append({ 'url': url, 'table': result.tables[0] }) # Summary print(f"\n=== Summary ===") print(f"Extracted {len(all_tables)} tables from {len(urls)} pages") for item in all_tables: table = item['table'] print(f"\nFrom {item['url'].split('/')[-1][:30]}:") print(f" Columns: {len(table['headers'])}") print(f" Rows: {len(table['rows'])}") async def main(): """Run all examples.""" print("=" * 60) print("LLM TABLE EXTRACTION EXAMPLES") print("=" * 60) # Run examples (comment out ones you don't want to run) # Basic extraction await basic_llm_extraction() # # Focused extraction with CSS # await focused_extraction() # # Compare strategies # await compare_strategies() # # Batch processing # await batch_extraction() print("\n" + "=" * 60) print("ALL EXAMPLES COMPLETED") print("=" * 60) if __name__ == "__main__": asyncio.run(main())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/deepcrawl_example.py
docs/examples/deepcrawl_example.py
import asyncio import time from crawl4ai import CrawlerRunConfig, AsyncWebCrawler, CacheMode from crawl4ai.content_scraping_strategy import LXMLWebScrapingStrategy from crawl4ai.deep_crawling import BFSDeepCrawlStrategy, BestFirstCrawlingStrategy from crawl4ai.deep_crawling.filters import ( FilterChain, URLPatternFilter, DomainFilter, ContentTypeFilter, ContentRelevanceFilter, SEOFilter, ) from crawl4ai.deep_crawling.scorers import ( KeywordRelevanceScorer, ) # 1️⃣ Basic Deep Crawl Setup async def basic_deep_crawl(): """ PART 1: Basic Deep Crawl setup - Demonstrates a simple two-level deep crawl. This function shows: - How to set up BFSDeepCrawlStrategy (Breadth-First Search) - Setting depth and domain parameters - Processing the results to show the hierarchy """ print("\n===== BASIC DEEP CRAWL SETUP =====") # Configure a 2-level deep crawl using Breadth-First Search strategy # max_depth=2 means: initial page (depth 0) + 2 more levels # include_external=False means: only follow links within the same domain config = CrawlerRunConfig( deep_crawl_strategy=BFSDeepCrawlStrategy(max_depth=2, include_external=False), scraping_strategy=LXMLWebScrapingStrategy(), verbose=True, # Show progress during crawling ) async with AsyncWebCrawler() as crawler: start_time = time.perf_counter() results = await crawler.arun(url="https://docs.crawl4ai.com", config=config) # Group results by depth to visualize the crawl tree pages_by_depth = {} for result in results: depth = result.metadata.get("depth", 0) if depth not in pages_by_depth: pages_by_depth[depth] = [] pages_by_depth[depth].append(result.url) print(f"βœ… Crawled {len(results)} pages total") # Display crawl structure by depth for depth, urls in sorted(pages_by_depth.items()): print(f"\nDepth {depth}: {len(urls)} pages") # Show first 3 URLs for each depth as examples for url in urls[:3]: print(f" β†’ {url}") if len(urls) > 3: print(f" ... and {len(urls) - 3} more") print( f"\nβœ… Performance: {len(results)} pages in {time.perf_counter() - start_time:.2f} seconds" ) # 2️⃣ Stream vs. Non-Stream Execution async def stream_vs_nonstream(): """ PART 2: Demonstrates the difference between stream and non-stream execution. Non-stream: Waits for all results before processing Stream: Processes results as they become available """ print("\n===== STREAM VS. NON-STREAM EXECUTION =====") # Common configuration for both examples base_config = CrawlerRunConfig( deep_crawl_strategy=BFSDeepCrawlStrategy(max_depth=1, include_external=False), scraping_strategy=LXMLWebScrapingStrategy(), verbose=False, ) async with AsyncWebCrawler() as crawler: # NON-STREAMING MODE print("\nπŸ“Š NON-STREAMING MODE:") print(" In this mode, all results are collected before being returned.") non_stream_config = base_config.clone() non_stream_config.stream = False start_time = time.perf_counter() results = await crawler.arun( url="https://docs.crawl4ai.com", config=non_stream_config ) print(f" βœ… Received all {len(results)} results at once") print(f" βœ… Total duration: {time.perf_counter() - start_time:.2f} seconds") # STREAMING MODE print("\nπŸ“Š STREAMING MODE:") print(" In this mode, results are processed as they become available.") stream_config = base_config.clone() stream_config.stream = True start_time = time.perf_counter() result_count = 0 first_result_time = None async for result in await crawler.arun( url="https://docs.crawl4ai.com", config=stream_config ): result_count += 1 if result_count == 1: first_result_time = time.perf_counter() - start_time print( f" βœ… First result received after {first_result_time:.2f} seconds: {result.url}" ) elif result_count % 5 == 0: # Show every 5th result for brevity print(f" β†’ Result #{result_count}: {result.url}") print(f" βœ… Total: {result_count} results") print(f" βœ… First result: {first_result_time:.2f} seconds") print(f" βœ… All results: {time.perf_counter() - start_time:.2f} seconds") print("\nπŸ” Key Takeaway: Streaming allows processing results immediately") # 3️⃣ Introduce Filters & Scorers async def filters_and_scorers(): """ PART 3: Demonstrates the use of filters and scorers for more targeted crawling. This function progressively adds: 1. A single URL pattern filter 2. Multiple filters in a chain 3. Scorers for prioritizing pages """ print("\n===== FILTERS AND SCORERS =====") async with AsyncWebCrawler() as crawler: # SINGLE FILTER EXAMPLE print("\nπŸ“Š EXAMPLE 1: SINGLE URL PATTERN FILTER") print(" Only crawl pages containing 'core' in the URL") # Create a filter that only allows URLs with 'guide' in them url_filter = URLPatternFilter(patterns=["*core*"]) config = CrawlerRunConfig( deep_crawl_strategy=BFSDeepCrawlStrategy( max_depth=1, include_external=False, filter_chain=FilterChain([url_filter]), # Single filter ), scraping_strategy=LXMLWebScrapingStrategy(), cache_mode=CacheMode.BYPASS, verbose=True, ) results = await crawler.arun(url="https://docs.crawl4ai.com", config=config) print(f" βœ… Crawled {len(results)} pages matching '*core*'") for result in results[:3]: # Show first 3 results print(f" β†’ {result.url}") if len(results) > 3: print(f" ... and {len(results) - 3} more") # MULTIPLE FILTERS EXAMPLE print("\nπŸ“Š EXAMPLE 2: MULTIPLE FILTERS IN A CHAIN") print(" Only crawl pages that:") print(" 1. Contain '2024' in the URL") print(" 2. Are from 'techcrunch.com'") print(" 3. Are of text/html or application/javascript content type") # Create a chain of filters filter_chain = FilterChain( [ URLPatternFilter(patterns=["*2024*"]), DomainFilter( allowed_domains=["techcrunch.com"], blocked_domains=["guce.techcrunch.com", "oidc.techcrunch.com"], ), ContentTypeFilter( allowed_types=["text/html", "application/javascript"] ), ] ) config = CrawlerRunConfig( deep_crawl_strategy=BFSDeepCrawlStrategy( max_depth=1, include_external=False, filter_chain=filter_chain ), scraping_strategy=LXMLWebScrapingStrategy(), verbose=True, ) results = await crawler.arun(url="https://techcrunch.com", config=config) print(f" βœ… Crawled {len(results)} pages after applying all filters") for result in results[:3]: print(f" β†’ {result.url}") if len(results) > 3: print(f" ... and {len(results) - 3} more") # SCORERS EXAMPLE print("\nπŸ“Š EXAMPLE 3: USING A KEYWORD RELEVANCE SCORER") print( "Score pages based on relevance to keywords: 'crawl', 'example', 'async', 'configuration','javascript','css'" ) # Create a keyword relevance scorer keyword_scorer = KeywordRelevanceScorer( keywords=["crawl", "example", "async", "configuration","javascript","css"], weight=1 ) config = CrawlerRunConfig( deep_crawl_strategy=BestFirstCrawlingStrategy( max_depth=1, include_external=False, url_scorer=keyword_scorer ), scraping_strategy=LXMLWebScrapingStrategy(), cache_mode=CacheMode.BYPASS, verbose=True, stream=True, ) results = [] async for result in await crawler.arun( url="https://docs.crawl4ai.com", config=config ): results.append(result) score = result.metadata.get("score") print(f" β†’ Score: {score:.2f} | {result.url}") print(f" βœ… Crawler prioritized {len(results)} pages by relevance score") print(" πŸ” Note: BestFirstCrawlingStrategy visits highest-scoring pages first") # 4️⃣ Advanced Filters async def advanced_filters(): """ PART 4: Demonstrates advanced filtering techniques for specialized crawling. This function covers: - SEO filters - Text relevancy filtering - Combining advanced filters """ print("\n===== ADVANCED FILTERS =====") async with AsyncWebCrawler() as crawler: # SEO FILTER EXAMPLE print("\nπŸ“Š EXAMPLE 1: SEO FILTERS") print( "Quantitative SEO quality assessment filter based searching keywords in the head section" ) seo_filter = SEOFilter( threshold=0.5, keywords=["dynamic", "interaction", "javascript"] ) config = CrawlerRunConfig( deep_crawl_strategy=BFSDeepCrawlStrategy( max_depth=1, filter_chain=FilterChain([seo_filter]) ), scraping_strategy=LXMLWebScrapingStrategy(), verbose=True, cache_mode=CacheMode.BYPASS, ) results = await crawler.arun(url="https://docs.crawl4ai.com", config=config) print(f" βœ… Found {len(results)} pages with relevant keywords") for result in results: print(f" β†’ {result.url}") # ADVANCED TEXT RELEVANCY FILTER print("\nπŸ“Š EXAMPLE 2: ADVANCED TEXT RELEVANCY FILTER") # More sophisticated content relevance filter relevance_filter = ContentRelevanceFilter( query="Interact with the web using your authentic digital identity", threshold=0.7, ) config = CrawlerRunConfig( deep_crawl_strategy=BFSDeepCrawlStrategy( max_depth=1, filter_chain=FilterChain([relevance_filter]) ), scraping_strategy=LXMLWebScrapingStrategy(), verbose=True, cache_mode=CacheMode.BYPASS, ) results = await crawler.arun(url="https://docs.crawl4ai.com", config=config) print(f" βœ… Found {len(results)} pages") for result in results: relevance_score = result.metadata.get("relevance_score", 0) print(f" β†’ Score: {relevance_score:.2f} | {result.url}") # 5️⃣ Max Pages and Score Thresholds async def max_pages_and_thresholds(): """ PART 5: Demonstrates using max_pages and score_threshold parameters with different strategies. This function shows: - How to limit the number of pages crawled - How to set score thresholds for more targeted crawling - Comparing BFS, DFS, and Best-First strategies with these parameters """ print("\n===== MAX PAGES AND SCORE THRESHOLDS =====") from crawl4ai.deep_crawling import DFSDeepCrawlStrategy async with AsyncWebCrawler() as crawler: # Define a common keyword scorer for all examples keyword_scorer = KeywordRelevanceScorer( keywords=["browser", "crawler", "web", "automation"], weight=1.0 ) # EXAMPLE 1: BFS WITH MAX PAGES print("\nπŸ“Š EXAMPLE 1: BFS STRATEGY WITH MAX PAGES LIMIT") print(" Limit the crawler to a maximum of 5 pages") bfs_config = CrawlerRunConfig( deep_crawl_strategy=BFSDeepCrawlStrategy( max_depth=2, include_external=False, url_scorer=keyword_scorer, max_pages=5 # Only crawl 5 pages ), scraping_strategy=LXMLWebScrapingStrategy(), verbose=True, cache_mode=CacheMode.BYPASS, ) results = await crawler.arun(url="https://docs.crawl4ai.com", config=bfs_config) print(f" βœ… Crawled exactly {len(results)} pages as specified by max_pages") for result in results: depth = result.metadata.get("depth", 0) print(f" β†’ Depth: {depth} | {result.url}") # EXAMPLE 2: DFS WITH SCORE THRESHOLD print("\nπŸ“Š EXAMPLE 2: DFS STRATEGY WITH SCORE THRESHOLD") print(" Only crawl pages with a relevance score above 0.5") dfs_config = CrawlerRunConfig( deep_crawl_strategy=DFSDeepCrawlStrategy( max_depth=2, include_external=False, url_scorer=keyword_scorer, score_threshold=0.7, # Only process URLs with scores above 0.5 max_pages=10 ), scraping_strategy=LXMLWebScrapingStrategy(), verbose=True, cache_mode=CacheMode.BYPASS, ) results = await crawler.arun(url="https://docs.crawl4ai.com", config=dfs_config) print(f" βœ… Crawled {len(results)} pages with scores above threshold") for result in results: score = result.metadata.get("score", 0) depth = result.metadata.get("depth", 0) print(f" β†’ Depth: {depth} | Score: {score:.2f} | {result.url}") # EXAMPLE 3: BEST-FIRST WITH BOTH CONSTRAINTS print("\nπŸ“Š EXAMPLE 3: BEST-FIRST STRATEGY WITH BOTH CONSTRAINTS") print(" Limit to 7 pages with scores above 0.3, prioritizing highest scores") bf_config = CrawlerRunConfig( deep_crawl_strategy=BestFirstCrawlingStrategy( max_depth=2, include_external=False, url_scorer=keyword_scorer, max_pages=7, # Limit to 7 pages total ), scraping_strategy=LXMLWebScrapingStrategy(), verbose=True, cache_mode=CacheMode.BYPASS, stream=True, ) results = [] async for result in await crawler.arun(url="https://docs.crawl4ai.com", config=bf_config): results.append(result) score = result.metadata.get("score", 0) depth = result.metadata.get("depth", 0) print(f" β†’ Depth: {depth} | Score: {score:.2f} | {result.url}") print(f" βœ… Crawled {len(results)} high-value pages with scores above 0.3") if results: avg_score = sum(r.metadata.get('score', 0) for r in results) / len(results) print(f" βœ… Average score: {avg_score:.2f}") print(" πŸ” Note: BestFirstCrawlingStrategy visited highest-scoring pages first") # 6️⃣ Wrap-Up and Key Takeaways async def wrap_up(): """ PART 6: Wrap-Up and Key Takeaways Summarize the key concepts learned in this tutorial. """ print("\n===== COMPLETE CRAWLER EXAMPLE =====") print("Combining filters, scorers, and streaming for an optimized crawl") # Create a sophisticated filter chain filter_chain = FilterChain( [ DomainFilter( allowed_domains=["docs.crawl4ai.com"], blocked_domains=["old.docs.crawl4ai.com"], ), URLPatternFilter(patterns=["*core*", "*advanced*", "*blog*"]), ContentTypeFilter(allowed_types=["text/html"]), ] ) # Create a composite scorer that combines multiple scoring strategies keyword_scorer = KeywordRelevanceScorer( keywords=["crawl", "example", "async", "configuration"], weight=0.7 ) # Set up the configuration config = CrawlerRunConfig( deep_crawl_strategy=BestFirstCrawlingStrategy( max_depth=1, include_external=False, filter_chain=filter_chain, url_scorer=keyword_scorer, ), scraping_strategy=LXMLWebScrapingStrategy(), stream=True, verbose=True, ) # Execute the crawl results = [] start_time = time.perf_counter() async with AsyncWebCrawler() as crawler: async for result in await crawler.arun( url="https://docs.crawl4ai.com", config=config ): results.append(result) score = result.metadata.get("score", 0) depth = result.metadata.get("depth", 0) print(f"β†’ Depth: {depth} | Score: {score:.2f} | {result.url}") duration = time.perf_counter() - start_time # Summarize the results print(f"\nβœ… Crawled {len(results)} high-value pages in {duration:.2f} seconds") print( f"βœ… Average score: {sum(r.metadata.get('score', 0) for r in results) / len(results):.2f}" ) # Group by depth depth_counts = {} for result in results: depth = result.metadata.get("depth", 0) depth_counts[depth] = depth_counts.get(depth, 0) + 1 print("\nπŸ“Š Pages crawled by depth:") for depth, count in sorted(depth_counts.items()): print(f" Depth {depth}: {count} pages") async def run_tutorial(): """ Executes all tutorial sections in sequence. """ print("\nπŸš€ CRAWL4AI DEEP CRAWLING TUTORIAL πŸš€") print("======================================") print("This tutorial will walk you through deep crawling techniques,") print("from basic to advanced, using the Crawl4AI library.") # Define sections - uncomment to run specific parts during development tutorial_sections = [ basic_deep_crawl, stream_vs_nonstream, filters_and_scorers, max_pages_and_thresholds, advanced_filters, wrap_up, ] for section in tutorial_sections: await section() print("\nπŸŽ‰ TUTORIAL COMPLETE! πŸŽ‰") print("You now have a comprehensive understanding of deep crawling with Crawl4AI.") print("For more information, check out https://docs.crawl4ai.com") # Execute the tutorial when run directly if __name__ == "__main__": asyncio.run(run_tutorial())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/llm_markdown_generator.py
docs/examples/llm_markdown_generator.py
import os import asyncio from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode from crawl4ai import LLMConfig from crawl4ai.content_filter_strategy import LLMContentFilter async def test_llm_filter(): # Create an HTML source that needs intelligent filtering url = "https://docs.python.org/3/tutorial/classes.html" browser_config = BrowserConfig( headless=True, verbose=True ) # run_config = CrawlerRunConfig(cache_mode=CacheMode.BYPASS) run_config = CrawlerRunConfig(cache_mode=CacheMode.ENABLED) async with AsyncWebCrawler(config=browser_config) as crawler: # First get the raw HTML result = await crawler.arun(url, config=run_config) html = result.cleaned_html # Initialize LLM filter with focused instruction filter = LLMContentFilter( llm_config=LLMConfig(provider="openai/gpt-4o", api_token=os.getenv('OPENAI_API_KEY')), instruction=""" Focus on extracting the core educational content about Python classes. Include: - Key concepts and their explanations - Important code examples - Essential technical details Exclude: - Navigation elements - Sidebars - Footer content - Version information - Any non-essential UI elements Format the output as clean markdown with proper code blocks and headers. """, verbose=True ) filter = LLMContentFilter( llm_config=LLMConfig(provider="openai/gpt-4o",api_token=os.getenv('OPENAI_API_KEY')), chunk_token_threshold=2 ** 12 * 2, # 2048 * 2 ignore_cache = True, instruction=""" Extract the main educational content while preserving its original wording and substance completely. Your task is to: 1. Maintain the exact language and terminology used in the main content 2. Keep all technical explanations, examples, and educational content intact 3. Preserve the original flow and structure of the core content 4. Remove only clearly irrelevant elements like: - Navigation menus - Advertisement sections - Cookie notices - Footers with site information - Sidebars with external links - Any UI elements that don't contribute to learning The goal is to create a clean markdown version that reads exactly like the original article, keeping all valuable content but free from distracting elements. Imagine you're creating a perfect reading experience where nothing valuable is lost, but all noise is removed. """, verbose=True ) # Apply filtering filtered_content = filter.filter_content(html) # Show results print("\nFiltered Content Length:", len(filtered_content)) print("\nFirst 500 chars of filtered content:") if filtered_content: print(filtered_content[0][:500]) # Save on disc the markdown version with open("filtered_content.md", "w", encoding="utf-8") as f: f.write("\n".join(filtered_content)) # Show token usage filter.show_usage() if __name__ == "__main__": asyncio.run(test_llm_filter())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/async_webcrawler_multiple_urls_example.py
docs/examples/async_webcrawler_multiple_urls_example.py
# File: async_webcrawler_multiple_urls_example.py import os, sys # append 2 parent directories to sys.path to import crawl4ai parent_dir = os.path.dirname( os.path.dirname(os.path.dirname(os.path.abspath(__file__))) ) sys.path.append(parent_dir) import asyncio from crawl4ai import AsyncWebCrawler async def main(): # Initialize the AsyncWebCrawler async with AsyncWebCrawler(verbose=True) as crawler: # List of URLs to crawl urls = [ "https://example.com", "https://python.org", "https://github.com", "https://stackoverflow.com", "https://news.ycombinator.com", ] # Set up crawling parameters word_count_threshold = 100 # Run the crawling process for multiple URLs results = await crawler.arun_many( urls=urls, word_count_threshold=word_count_threshold, bypass_cache=True, verbose=True, ) # Process the results for result in results: if result.success: print(f"Successfully crawled: {result.url}") print(f"Title: {result.metadata.get('title', 'N/A')}") print(f"Word count: {len(result.markdown.split())}") print( f"Number of links: {len(result.links.get('internal', [])) + len(result.links.get('external', []))}" ) print(f"Number of images: {len(result.media.get('images', []))}") print("---") else: print(f"Failed to crawl: {result.url}") print(f"Error: {result.error_message}") print("---") if __name__ == "__main__": asyncio.run(main())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/summarize_page.py
docs/examples/summarize_page.py
import os import json from crawl4ai.web_crawler import WebCrawler from crawl4ai.chunking_strategy import * from crawl4ai import * from crawl4ai.crawler_strategy import * url = r"https://marketplace.visualstudio.com/items?itemName=Unclecode.groqopilot" crawler = WebCrawler() crawler.warmup() from pydantic import BaseModel, Field class PageSummary(BaseModel): title: str = Field(..., description="Title of the page.") summary: str = Field(..., description="Summary of the page.") brief_summary: str = Field(..., description="Brief summary of the page.") keywords: list = Field(..., description="Keywords assigned to the page.") result = crawler.run( url=url, word_count_threshold=1, extraction_strategy=LLMExtractionStrategy( provider="openai/gpt-4o", api_token=os.getenv("OPENAI_API_KEY"), schema=PageSummary.model_json_schema(), extraction_type="schema", apply_chunking=False, instruction="From the crawled content, extract the following details: " "1. Title of the page " "2. Summary of the page, which is a detailed summary " "3. Brief summary of the page, which is a paragraph text " "4. Keywords assigned to the page, which is a list of keywords. " "The extracted JSON format should look like this: " '{ "title": "Page Title", "summary": "Detailed summary of the page.", "brief_summary": "Brief summary in a paragraph.", "keywords": ["keyword1", "keyword2", "keyword3"] }', ), bypass_cache=True, ) page_summary = json.loads(result.extracted_content) print(page_summary) with open(".data/page_summary.json", "w", encoding="utf-8") as f: f.write(result.extracted_content)
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/docker_example.py
docs/examples/docker_example.py
import requests import json import time import sys import base64 import os from typing import Dict, Any class Crawl4AiTester: def __init__(self, base_url: str = "http://localhost:11235"): self.base_url = base_url def submit_and_wait( self, request_data: Dict[str, Any], timeout: int = 300 ) -> Dict[str, Any]: # Submit crawl job using async endpoint response = requests.post( f"{self.base_url}/crawl/job", json=request_data ) response.raise_for_status() job_response = response.json() task_id = job_response["task_id"] print(f"Submitted job with task_id: {task_id}") # Poll for result start_time = time.time() while True: if time.time() - start_time > timeout: raise TimeoutError( f"Task {task_id} did not complete within {timeout} seconds" ) result = requests.get( f"{self.base_url}/crawl/job/{task_id}" ) result.raise_for_status() status = result.json() if status["status"] == "failed": print("Task failed:", status.get("error")) raise Exception(f"Task failed: {status.get('error')}") if status["status"] == "completed": return status time.sleep(2) def submit_sync(self, request_data: Dict[str, Any]) -> Dict[str, Any]: # Use synchronous crawl endpoint response = requests.post( f"{self.base_url}/crawl", json=request_data, timeout=60, ) if response.status_code == 408: raise TimeoutError("Task did not complete within server timeout") response.raise_for_status() return response.json() def test_docker_deployment(version="basic"): tester = Crawl4AiTester( base_url="http://localhost:11235", ) print(f"Testing Crawl4AI Docker {version} version") # Health check with timeout and retry max_retries = 5 for i in range(max_retries): try: health = requests.get(f"{tester.base_url}/health", timeout=10) print("Health check:", health.json()) break except requests.exceptions.RequestException: if i == max_retries - 1: print(f"Failed to connect after {max_retries} attempts") sys.exit(1) print(f"Waiting for service to start (attempt {i+1}/{max_retries})...") time.sleep(5) # Test cases based on version test_basic_crawl(tester) test_basic_crawl_sync(tester) if version in ["full", "transformer"]: test_cosine_extraction(tester) test_js_execution(tester) test_css_selector(tester) test_structured_extraction(tester) test_llm_extraction(tester) test_llm_with_ollama(tester) test_screenshot(tester) def test_basic_crawl(tester: Crawl4AiTester): print("\n=== Testing Basic Crawl (Async) ===") request = { "urls": ["https://www.nbcnews.com/business"], "browser_config": {}, "crawler_config": {} } result = tester.submit_and_wait(request) print(f"Basic crawl result count: {len(result['result']['results'])}") assert result["result"]["success"] assert len(result["result"]["results"]) > 0 assert len(result["result"]["results"][0]["markdown"]) > 0 def test_basic_crawl_sync(tester: Crawl4AiTester): print("\n=== Testing Basic Crawl (Sync) ===") request = { "urls": ["https://www.nbcnews.com/business"], "browser_config": {}, "crawler_config": {} } result = tester.submit_sync(request) print(f"Basic crawl result count: {len(result['results'])}") assert result["success"] assert len(result["results"]) > 0 assert len(result["results"][0]["markdown"]) > 0 def test_js_execution(tester: Crawl4AiTester): print("\n=== Testing JS Execution ===") request = { "urls": ["https://www.nbcnews.com/business"], "browser_config": {"headless": True}, "crawler_config": { "js_code": [ "const loadMoreButton = Array.from(document.querySelectorAll('button')).find(button => button.textContent.includes('Load More')); if(loadMoreButton) loadMoreButton.click();" ], "wait_for": "wide-tease-item__wrapper df flex-column flex-row-m flex-nowrap-m enable-new-sports-feed-mobile-design(10)" } } result = tester.submit_and_wait(request) print(f"JS execution result count: {len(result['result']['results'])}") assert result["result"]["success"] def test_css_selector(tester: Crawl4AiTester): print("\n=== Testing CSS Selector ===") request = { "urls": ["https://www.nbcnews.com/business"], "browser_config": {"headless": True}, "crawler_config": { "css_selector": ".wide-tease-item__description", "word_count_threshold": 10 } } result = tester.submit_and_wait(request) print(f"CSS selector result count: {len(result['result']['results'])}") assert result["result"]["success"] def test_structured_extraction(tester: Crawl4AiTester): print("\n=== Testing Structured Extraction ===") schema = { "name": "Cryptocurrency Prices", "baseSelector": "table[data-testid=\"prices-table\"] tbody tr", "fields": [ { "name": "asset_name", "selector": "td:nth-child(2) p.cds-headline-h4steop", "type": "text" }, { "name": "asset_symbol", "selector": "td:nth-child(2) p.cds-label2-l1sm09ec", "type": "text" }, { "name": "asset_image_url", "selector": "td:nth-child(2) img[alt=\"Asset Symbol\"]", "type": "attribute", "attribute": "src" }, { "name": "asset_url", "selector": "td:nth-child(2) a[aria-label^=\"Asset page for\"]", "type": "attribute", "attribute": "href" }, { "name": "price", "selector": "td:nth-child(3) div.cds-typographyResets-t6muwls.cds-body-bwup3gq", "type": "text" }, { "name": "change", "selector": "td:nth-child(7) p.cds-body-bwup3gq", "type": "text" } ] } request = { "urls": ["https://www.coinbase.com/explore"], "browser_config": {}, "crawler_config": { "type": "CrawlerRunConfig", "params": { "extraction_strategy": { "type": "JsonCssExtractionStrategy", "params": {"schema": schema} } } } } result = tester.submit_and_wait(request) extracted = json.loads(result["result"]["results"][0]["extracted_content"]) print(f"Extracted {len(extracted)} items") if extracted: print("Sample item:", json.dumps(extracted[0], indent=2)) assert result["result"]["success"] assert len(extracted) > 0 def test_llm_extraction(tester: Crawl4AiTester): print("\n=== Testing LLM Extraction ===") schema = { "type": "object", "properties": { "asset_name": { "type": "string", "description": "Name of the asset.", }, "price": { "type": "string", "description": "Price of the asset.", }, "change": { "type": "string", "description": "Change in price of the asset.", }, }, "required": ["asset_name", "price", "change"], } request = { "urls": ["https://www.coinbase.com/en-in/explore"], "browser_config": {}, "crawler_config": { "type": "CrawlerRunConfig", "params": { "extraction_strategy": { "type": "LLMExtractionStrategy", "params": { "llm_config": { "type": "LLMConfig", "params": { "provider": "gemini/gemini-2.0-flash-exp", "api_token": os.getenv("GEMINI_API_KEY") } }, "schema": schema, "extraction_type": "schema", "instruction": "From the crawled content, extract asset names along with their prices and change in price.", } }, "word_count_threshold": 1 } } } try: result = tester.submit_and_wait(request) extracted = json.loads(result["result"]["results"][0]["extracted_content"]) print(f"Extracted {len(extracted)} asset pricing entries") if extracted: print("Sample entry:", json.dumps(extracted[0], indent=2)) assert result["result"]["success"] except Exception as e: print(f"LLM extraction test failed (might be due to missing API key): {str(e)}") def test_llm_with_ollama(tester: Crawl4AiTester): print("\n=== Testing LLM with Ollama ===") # Check if Ollama is accessible first try: ollama_response = requests.get("http://localhost:11434/api/tags", timeout=5) ollama_response.raise_for_status() print("Ollama is accessible") except: print("Ollama is not accessible, skipping test") return schema = { "type": "object", "properties": { "article_title": { "type": "string", "description": "The main title of the news article", }, "summary": { "type": "string", "description": "A brief summary of the article content", }, "main_topics": { "type": "array", "items": {"type": "string"}, "description": "Main topics or themes discussed in the article", }, }, } request = { "urls": ["https://www.nbcnews.com/business"], "browser_config": {"verbose": True}, "crawler_config": { "type": "CrawlerRunConfig", "params": { "extraction_strategy": { "type": "LLMExtractionStrategy", "params": { "llm_config": { "type": "LLMConfig", "params": { "provider": "ollama/llama3.2:latest", } }, "schema": schema, "extraction_type": "schema", "instruction": "Extract the main article information including title, summary, and main topics.", } }, "word_count_threshold": 1 } } } try: result = tester.submit_and_wait(request) extracted = json.loads(result["result"]["results"][0]["extracted_content"]) print("Extracted content:", json.dumps(extracted, indent=2)) assert result["result"]["success"] except Exception as e: print(f"Ollama extraction test failed: {str(e)}") def test_cosine_extraction(tester: Crawl4AiTester): print("\n=== Testing Cosine Extraction ===") request = { "urls": ["https://www.nbcnews.com/business"], "browser_config": {}, "crawler_config": { "type": "CrawlerRunConfig", "params": { "extraction_strategy": { "type": "CosineStrategy", "params": { "semantic_filter": "business finance economy", "word_count_threshold": 10, "max_dist": 0.2, "top_k": 3, } } } } } try: result = tester.submit_and_wait(request) extracted = json.loads(result["result"]["results"][0]["extracted_content"]) print(f"Extracted {len(extracted)} text clusters") if extracted: print("First cluster tags:", extracted[0]["tags"]) assert result["result"]["success"] except Exception as e: print(f"Cosine extraction test failed: {str(e)}") def test_screenshot(tester: Crawl4AiTester): print("\n=== Testing Screenshot ===") request = { "urls": ["https://www.nbcnews.com/business"], "browser_config": {"headless": True}, "crawler_config": { "type": "CrawlerRunConfig", "params": { "screenshot": True } } } result = tester.submit_and_wait(request) screenshot_data = result["result"]["results"][0]["screenshot"] print("Screenshot captured:", bool(screenshot_data)) if screenshot_data: # Save screenshot screenshot_bytes = base64.b64decode(screenshot_data) with open("test_screenshot.jpg", "wb") as f: f.write(screenshot_bytes) print("Screenshot saved as test_screenshot.jpg") assert result["result"]["success"] if __name__ == "__main__": version = sys.argv[1] if len(sys.argv) > 1 else "basic" test_docker_deployment(version)
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/docker_python_rest_api.py
docs/examples/docker_python_rest_api.py
import asyncio import json from typing import Optional from urllib.parse import quote async def get_token(session, email: str = "test@example.com") -> str: """Fetch a JWT token from the /token endpoint.""" url = "http://localhost:8000/token" payload = {"email": email} print(f"\nFetching token from {url} with email: {email}") try: async with session.post(url, json=payload) as response: status = response.status data = await response.json() print(f"Token Response Status: {status}") print(f"Token Response: {json.dumps(data, indent=2)}") if status == 200: return data["access_token"] else: raise Exception(f"Failed to get token: {data.get('detail', 'Unknown error')}") except Exception as e: print(f"Error fetching token: {str(e)}") raise async def test_endpoint( session, endpoint: str, url: str, token: str, params: Optional[dict] = None, expected_status: int = 200 ) -> Optional[dict]: """Test an endpoint with token and print results.""" params = params or {} param_str = "&".join(f"{k}={v}" for k, v in params.items()) full_url = f"http://localhost:8000/{endpoint}/{quote(url)}" if param_str: full_url += f"?{param_str}" headers = {"Authorization": f"Bearer {token}"} print(f"\nTesting: {full_url}") try: async with session.get(full_url, headers=headers) as response: status = response.status try: data = await response.json() except: data = await response.text() print(f"Status: {status} (Expected: {expected_status})") if isinstance(data, dict): print(f"Response: {json.dumps(data, indent=2)}") else: print(f"Response: {data[:500]}...") # First 500 chars assert status == expected_status, f"Expected {expected_status}, got {status}" return data except Exception as e: print(f"Error: {str(e)}") return None async def test_stream_crawl(session, token: str): """Test the /crawl/stream endpoint with multiple URLs.""" url = "http://localhost:8000/crawl/stream" payload = { "urls": [ "https://example.com", "https://example.com/page1", # Replicated example.com with variation "https://example.com/page2", # Replicated example.com with variation "https://example.com/page3", # Replicated example.com with variation # "https://www.python.org", # "https://news.ycombinator.com/news" ], "browser_config": {"headless": True, "viewport": {"width": 1200}}, "crawler_config": {"stream": True, "cache_mode": "bypass"} } headers = {"Authorization": f"Bearer {token}"} print(f"\nTesting Streaming Crawl: {url}") print(f"Payload: {json.dumps(payload, indent=2)}") try: async with session.post(url, json=payload, headers=headers) as response: status = response.status print(f"Status: {status} (Expected: 200)") assert status == 200, f"Expected 200, got {status}" # Read streaming response line-by-line (NDJSON) async for line in response.content: if line: data = json.loads(line.decode('utf-8').strip()) print(f"Streamed Result: {json.dumps(data, indent=2)}") except Exception as e: print(f"Error in streaming crawl test: {str(e)}") async def run_tests(): import aiohttp print("Starting API Tests...") # Test URLs urls = [ "example.com", "https://www.python.org", "https://news.ycombinator.com/news", "https://github.com/trending" ] async with aiohttp.ClientSession() as session: token = "test_token" # If jwt is enabled, authenticate first # Fetch token once and reuse it # token = await get_token(session) # if not token: # print("Aborting tests due to token failure!") # return print("\n=== Testing Crawl Endpoint ===") crawl_payload = { "urls": ["https://example.com"], "browser_config": {"headless": True}, "crawler_config": {"stream": False} } async with session.post( "http://localhost:8000/crawl", json=crawl_payload, headers={"Authorization": f"Bearer {token}"} ) as response: status = response.status data = await response.json() print(f"\nCrawl Endpoint Status: {status}") print(f"Crawl Response: {json.dumps(data, indent=2)}") print("\n=== Testing Crawl Stream Endpoint ===") await test_stream_crawl(session, token) print("\n=== Testing Markdown Endpoint ===") for url in []: #urls: for filter_type in ["raw", "fit", "bm25", "llm"]: params = {"f": filter_type} if filter_type in ["bm25", "llm"]: params["q"] = "extract main content" for cache in ["0", "1"]: params["c"] = cache await test_endpoint(session, "md", url, token, params) await asyncio.sleep(1) # Be nice to the server print("\n=== Testing LLM Endpoint ===") for url in urls: # Test basic extraction (direct response now) result = await test_endpoint( session, "llm", url, token, {"q": "Extract title and main content"} ) # Test with schema (direct response) schema = { "type": "object", "properties": { "title": {"type": "string"}, "content": {"type": "string"}, "links": {"type": "array", "items": {"type": "string"}} } } result = await test_endpoint( session, "llm", url, token, { "q": "Extract content with links", "s": json.dumps(schema), "c": "1" # Test with cache } ) await asyncio.sleep(2) # Be nice to the server print("\n=== Testing Error Cases ===") # Test invalid URL await test_endpoint( session, "md", "not_a_real_url", token, expected_status=500 ) # Test invalid filter type await test_endpoint( session, "md", "example.com", token, {"f": "invalid"}, expected_status=422 ) # Test LLM without query (should fail per your server logic) await test_endpoint( session, "llm", "example.com", token, expected_status=400 ) print("\nAll tests completed!") if __name__ == "__main__": asyncio.run(run_tests())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/builtin_browser_example.py
docs/examples/builtin_browser_example.py
#!/usr/bin/env python3 """ Builtin Browser Example This example demonstrates how to use Crawl4AI's builtin browser feature, which simplifies the browser management process. With builtin mode: - No need to manually start or connect to a browser - No need to manage CDP URLs or browser processes - Automatically connects to an existing browser or launches one if needed - Browser persists between script runs, reducing startup time - No explicit cleanup or close() calls needed The example also demonstrates "auto-starting" where you don't need to explicitly call start() method on the crawler. """ import asyncio from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode import time async def crawl_with_builtin_browser(): """ Simple example of crawling with the builtin browser. Key features: 1. browser_mode="builtin" in BrowserConfig 2. No explicit start() call needed 3. No explicit close() needed """ print("\n=== Crawl4AI Builtin Browser Example ===\n") # Create a browser configuration with builtin mode browser_config = BrowserConfig( browser_mode="builtin", # This is the key setting! headless=True # Can run headless for background operation ) # Create crawler run configuration crawler_config = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, # Skip cache for this demo screenshot=True, # Take a screenshot verbose=True # Show verbose logging ) # Create the crawler instance # Note: We don't need to use "async with" context manager crawler = AsyncWebCrawler(config=browser_config) # Start crawling several URLs - no explicit start() needed! # The crawler will automatically connect to the builtin browser print("\n➑️ Crawling first URL...") t0 = time.time() result1 = await crawler.arun( url="https://crawl4ai.com", config=crawler_config ) t1 = time.time() print(f"βœ… First URL crawled in {t1-t0:.2f} seconds") print(f" Got {len(result1.markdown.raw_markdown)} characters of content") print(f" Title: {result1.metadata.get('title', 'No title')}") # Try another URL - the browser is already running, so this should be faster print("\n➑️ Crawling second URL...") t0 = time.time() result2 = await crawler.arun( url="https://example.com", config=crawler_config ) t1 = time.time() print(f"βœ… Second URL crawled in {t1-t0:.2f} seconds") print(f" Got {len(result2.markdown.raw_markdown)} characters of content") print(f" Title: {result2.metadata.get('title', 'No title')}") # The builtin browser continues running in the background # No need to explicitly close it print("\nπŸ”„ The builtin browser remains running for future use") print(" You can use 'crwl browser status' to check its status") print(" or 'crwl browser stop' to stop it when completely done") async def main(): """Run the example""" await crawl_with_builtin_browser() if __name__ == "__main__": asyncio.run(main())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/dfs_crawl_demo.py
docs/examples/dfs_crawl_demo.py
""" Simple demonstration of the DFS deep crawler visiting multiple pages. Run with: python docs/examples/dfs_crawl_demo.py """ import asyncio from crawl4ai.async_configs import BrowserConfig, CrawlerRunConfig from crawl4ai.async_webcrawler import AsyncWebCrawler from crawl4ai.cache_context import CacheMode from crawl4ai.deep_crawling.dfs_strategy import DFSDeepCrawlStrategy from crawl4ai.markdown_generation_strategy import DefaultMarkdownGenerator async def main() -> None: dfs_strategy = DFSDeepCrawlStrategy( max_depth=3, max_pages=50, include_external=False, ) config = CrawlerRunConfig( deep_crawl_strategy=dfs_strategy, cache_mode=CacheMode.BYPASS, markdown_generator=DefaultMarkdownGenerator(), stream=True, ) seed_url = "https://docs.python.org/3/" # Plenty of internal links async with AsyncWebCrawler(config=BrowserConfig(headless=True)) as crawler: async for result in await crawler.arun(url=seed_url, config=config): depth = result.metadata.get("depth") status = "SUCCESS" if result.success else "FAILED" print(f"[{status}] depth={depth} url={result.url}") if __name__ == "__main__": asyncio.run(main())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/identity_based_browsing.py
docs/examples/identity_based_browsing.py
""" Identity-Based Browsing Example with Crawl4AI This example demonstrates how to: 1. Create a persistent browser profile interactively 2. List available profiles 3. Use a saved profile for crawling authenticated sites 4. Delete profiles when no longer needed Uses the new BrowserProfiler class for profile management. """ import asyncio from crawl4ai import AsyncWebCrawler, BrowserConfig from crawl4ai.browser_profiler import BrowserProfiler from crawl4ai.async_logger import AsyncLogger from colorama import Fore, Style, init # Initialize colorama init() # Create a shared logger instance logger = AsyncLogger(verbose=True) # Create a shared BrowserProfiler instance profiler = BrowserProfiler(logger=logger) async def crawl_with_profile(profile_path, url): """Use a profile to crawl an authenticated page""" logger.info(f"\nCrawling {Fore.CYAN}{url}{Style.RESET_ALL} using profile at {Fore.YELLOW}{profile_path}{Style.RESET_ALL}", tag="CRAWL") # Create browser config with the profile path browser_config = BrowserConfig( headless=False, # Set to False if you want to see the browser window use_managed_browser=True, # Required for persistent profiles user_data_dir=profile_path ) start_time = asyncio.get_event_loop().time() # Initialize crawler with the browser config async with AsyncWebCrawler(config=browser_config) as crawler: # Crawl the URL - You should have access to authenticated content now result = await crawler.arun(url) elapsed_time = asyncio.get_event_loop().time() - start_time if result.success: # Use url_status method for consistent logging logger.url_status(url, True, elapsed_time, tag="CRAWL") # Print page title or some indication of success title = result.metadata.get("title", "") logger.success(f"Page title: {Fore.GREEN}{title}{Style.RESET_ALL}", tag="CRAWL") return result else: # Log error status logger.error_status(url, result.error_message, tag="CRAWL") return None async def main(): logger.info(f"{Fore.CYAN}Identity-Based Browsing Example with Crawl4AI{Style.RESET_ALL}", tag="DEMO") logger.info("This example demonstrates using profiles for authenticated browsing", tag="DEMO") # Choose between interactive mode and automatic mode mode = input(f"{Fore.CYAN}Run in [i]nteractive mode or [a]utomatic mode? (i/a): {Style.RESET_ALL}").lower() if mode == 'i': # Interactive profile management - use the interactive_manager method # Pass the crawl_with_profile function as the callback for the "crawl a website" option await profiler.interactive_manager(crawl_callback=crawl_with_profile) else: # Automatic mode - simplified example profiles = profiler.list_profiles() if not profiles: # Create a new profile if none exists logger.info("No profiles found. Creating a new one...", tag="DEMO") profile_path = await profiler.create_profile() if not profile_path: logger.error("Cannot proceed without a valid profile", tag="DEMO") return else: # Use the first (most recent) profile profile_path = profiles[0]["path"] logger.info(f"Using existing profile: {Fore.CYAN}{profiles[0]['name']}{Style.RESET_ALL}", tag="DEMO") # Example: Crawl an authenticated page urls_to_crawl = [ "https://github.com/settings/profile", # GitHub requires login # "https://twitter.com/home", # Twitter requires login # "https://www.linkedin.com/feed/", # LinkedIn requires login ] for url in urls_to_crawl: await crawl_with_profile(profile_path, url) if __name__ == "__main__": try: # Run the async main function asyncio.run(main()) except KeyboardInterrupt: logger.warning("Example interrupted by user", tag="DEMO") except Exception as e: logger.error(f"Error in example: {str(e)}", tag="DEMO")
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/session_id_example.py
docs/examples/session_id_example.py
import asyncio from crawl4ai import ( AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, DefaultMarkdownGenerator, PruningContentFilter, CrawlResult ) async def main(): browser_config = BrowserConfig( headless=False, verbose=True, ) async with AsyncWebCrawler(config=browser_config) as crawler: crawler_config = CrawlerRunConfig( session_id= "hello_world", # This help us to use the same page ) result : CrawlResult = await crawler.arun( url="https://www.helloworld.org", config=crawler_config ) # Add a breakpoint here, then you will the page is open and browser is not closed print(result.markdown.raw_markdown[:500]) new_config = crawler_config.clone(js_code=["(() => ({'data':'hello'}))()"], js_only=True) result : CrawlResult = await crawler.arun( # This time there is no fetch and this only executes JS in the same opened page url="https://www.helloworld.org", config= new_config ) print(result.js_execution_result) # You should see {'data':'hello'} in the console # Get direct access to Playwright paege object. This works only if you use the same session_id and pass same config page, context = crawler.crawler_strategy.get_page(new_config) if __name__ == "__main__": asyncio.run(main())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/docker_python_sdk.py
docs/examples/docker_python_sdk.py
import asyncio from crawl4ai.docker_client import Crawl4aiDockerClient from crawl4ai import ( BrowserConfig, CrawlerRunConfig ) async def main(): async with Crawl4aiDockerClient(base_url="http://localhost:8000", verbose=True) as client: # If jwt is enabled, authenticate first # await client.authenticate("test@example.com") # Non-streaming crawl results = await client.crawl( ["https://example.com", "https://python.org"], browser_config=BrowserConfig(headless=True), crawler_config=CrawlerRunConfig() ) print(f"Non-streaming results: {results}") # Streaming crawl crawler_config = CrawlerRunConfig(stream=True) async for result in await client.crawl( ["https://example.com", "https://python.org"], browser_config=BrowserConfig(headless=True), crawler_config=crawler_config ): print(f"Streamed result: {result}") # Get schema schema = await client.get_schema() print(f"Schema: {schema}") if __name__ == "__main__": asyncio.run(main())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/docker_client_hooks_example.py
docs/examples/docker_client_hooks_example.py
#!/usr/bin/env python3 """ Comprehensive hooks examples using Docker Client with function objects. This approach is recommended because: - Write hooks as regular Python functions - Full IDE support (autocomplete, type checking) - Automatic conversion to API format - Reusable and testable code - Clean, readable syntax """ import asyncio from crawl4ai import Crawl4aiDockerClient # API_BASE_URL = "http://localhost:11235" API_BASE_URL = "http://localhost:11234" # ============================================================================ # Hook Function Definitions # ============================================================================ # --- All Hooks Demo --- async def browser_created_hook(browser, **kwargs): """Called after browser is created""" print("[HOOK] Browser created and ready") return browser async def page_context_hook(page, context, **kwargs): """Setup page environment""" print("[HOOK] Setting up page environment") # Set viewport await page.set_viewport_size({"width": 1920, "height": 1080}) # Add cookies await context.add_cookies([{ "name": "test_session", "value": "abc123xyz", "domain": ".httpbin.org", "path": "/" }]) # Block resources await context.route("**/*.{png,jpg,jpeg,gif}", lambda route: route.abort()) await context.route("**/analytics/*", lambda route: route.abort()) print("[HOOK] Environment configured") return page async def user_agent_hook(page, context, user_agent, **kwargs): """Called when user agent is updated""" print(f"[HOOK] User agent: {user_agent[:50]}...") return page async def before_goto_hook(page, context, url, **kwargs): """Called before navigating to URL""" print(f"[HOOK] Navigating to: {url}") await page.set_extra_http_headers({ "X-Custom-Header": "crawl4ai-test", "Accept-Language": "en-US" }) return page async def after_goto_hook(page, context, url, response, **kwargs): """Called after page loads""" print(f"[HOOK] Page loaded: {url}") await page.wait_for_timeout(1000) try: await page.wait_for_selector("body", timeout=2000) print("[HOOK] Body element ready") except: print("[HOOK] Timeout, continuing") return page async def execution_started_hook(page, context, **kwargs): """Called when custom JS execution starts""" print("[HOOK] JS execution started") await page.evaluate("console.log('[HOOK] Custom JS');") return page async def before_retrieve_hook(page, context, **kwargs): """Called before retrieving HTML""" print("[HOOK] Preparing HTML retrieval") # Scroll for lazy content await page.evaluate("window.scrollTo(0, document.body.scrollHeight);") await page.wait_for_timeout(500) await page.evaluate("window.scrollTo(0, 0);") print("[HOOK] Scrolling complete") return page async def before_return_hook(page, context, html, **kwargs): """Called before returning HTML""" print(f"[HOOK] HTML ready: {len(html)} chars") metrics = await page.evaluate('''() => ({ images: document.images.length, links: document.links.length, scripts: document.scripts.length })''') print(f"[HOOK] Metrics - Images: {metrics['images']}, Links: {metrics['links']}") return page # --- Authentication Hooks --- async def auth_context_hook(page, context, **kwargs): """Setup authentication context""" print("[HOOK] Setting up authentication") # Add auth cookies await context.add_cookies([{ "name": "auth_token", "value": "fake_jwt_token", "domain": ".httpbin.org", "path": "/", "httpOnly": True }]) # Set localStorage await page.evaluate(''' localStorage.setItem('user_id', '12345'); localStorage.setItem('auth_time', new Date().toISOString()); ''') print("[HOOK] Auth context ready") return page async def auth_headers_hook(page, context, url, **kwargs): """Add authentication headers""" print(f"[HOOK] Adding auth headers for {url}") import base64 credentials = base64.b64encode(b"user:passwd").decode('ascii') await page.set_extra_http_headers({ 'Authorization': f'Basic {credentials}', 'X-API-Key': 'test-key-123' }) return page # --- Performance Optimization Hooks --- async def performance_hook(page, context, **kwargs): """Optimize page for performance""" print("[HOOK] Optimizing for performance") # Block resource-heavy content await context.route("**/*.{png,jpg,jpeg,gif,webp,svg}", lambda r: r.abort()) await context.route("**/*.{woff,woff2,ttf}", lambda r: r.abort()) await context.route("**/*.{mp4,webm,ogg}", lambda r: r.abort()) await context.route("**/googletagmanager.com/*", lambda r: r.abort()) await context.route("**/google-analytics.com/*", lambda r: r.abort()) await context.route("**/facebook.com/*", lambda r: r.abort()) # Disable animations await page.add_style_tag(content=''' *, *::before, *::after { animation-duration: 0s !important; transition-duration: 0s !important; } ''') print("[HOOK] Optimizations applied") return page async def cleanup_hook(page, context, **kwargs): """Clean page before extraction""" print("[HOOK] Cleaning page") await page.evaluate('''() => { const selectors = [ '.ad', '.ads', '.advertisement', '.popup', '.modal', '.overlay', '.cookie-banner', '.newsletter' ]; selectors.forEach(sel => { document.querySelectorAll(sel).forEach(el => el.remove()); }); document.querySelectorAll('script, style').forEach(el => el.remove()); }''') print("[HOOK] Page cleaned") return page # --- Content Extraction Hooks --- async def wait_dynamic_content_hook(page, context, url, response, **kwargs): """Wait for dynamic content to load""" print(f"[HOOK] Waiting for dynamic content on {url}") await page.wait_for_timeout(2000) # Click "Load More" if exists try: load_more = await page.query_selector('[class*="load-more"], button:has-text("Load More")') if load_more: await load_more.click() await page.wait_for_timeout(1000) print("[HOOK] Clicked 'Load More'") except: pass return page async def extract_metadata_hook(page, context, **kwargs): """Extract page metadata""" print("[HOOK] Extracting metadata") metadata = await page.evaluate('''() => { const getMeta = (name) => { const el = document.querySelector(`meta[name="${name}"], meta[property="${name}"]`); return el ? el.getAttribute('content') : null; }; return { title: document.title, description: getMeta('description'), author: getMeta('author'), keywords: getMeta('keywords'), }; }''') print(f"[HOOK] Metadata: {metadata}") # Infinite scroll for i in range(3): await page.evaluate("window.scrollTo(0, document.body.scrollHeight);") await page.wait_for_timeout(1000) print(f"[HOOK] Scroll {i+1}/3") return page # --- Multi-URL Hooks --- async def url_specific_hook(page, context, url, **kwargs): """Apply URL-specific logic""" print(f"[HOOK] Processing URL: {url}") # URL-specific headers if 'html' in url: await page.set_extra_http_headers({"X-Type": "HTML"}) elif 'json' in url: await page.set_extra_http_headers({"X-Type": "JSON"}) return page async def track_progress_hook(page, context, url, response, **kwargs): """Track crawl progress""" status = response.status if response else 'unknown' print(f"[HOOK] Loaded {url} - Status: {status}") return page # ============================================================================ # Test Functions # ============================================================================ async def test_all_hooks_comprehensive(): """Test all 8 hook types""" print("=" * 70) print("Test 1: All Hooks Comprehensive Demo (Docker Client)") print("=" * 70) async with Crawl4aiDockerClient(base_url=API_BASE_URL, verbose=False) as client: print("\nCrawling with all 8 hooks...") # Define hooks with function objects hooks = { "on_browser_created": browser_created_hook, "on_page_context_created": page_context_hook, "on_user_agent_updated": user_agent_hook, "before_goto": before_goto_hook, "after_goto": after_goto_hook, "on_execution_started": execution_started_hook, "before_retrieve_html": before_retrieve_hook, "before_return_html": before_return_hook } result = await client.crawl( ["https://httpbin.org/html"], hooks=hooks, hooks_timeout=30 ) print("\nβœ… Success!") print(f" URL: {result.url}") print(f" Success: {result.success}") print(f" HTML: {len(result.html)} chars") async def test_authentication_workflow(): """Test authentication with hooks""" print("\n" + "=" * 70) print("Test 2: Authentication Workflow (Docker Client)") print("=" * 70) async with Crawl4aiDockerClient(base_url=API_BASE_URL, verbose=False) as client: print("\nTesting authentication...") hooks = { "on_page_context_created": auth_context_hook, "before_goto": auth_headers_hook } result = await client.crawl( ["https://httpbin.org/basic-auth/user/passwd"], hooks=hooks, hooks_timeout=15 ) print("\nβœ… Authentication completed") if result.success: if '"authenticated"' in result.html and 'true' in result.html: print(" βœ… Basic auth successful!") else: print(" ⚠️ Auth status unclear") else: print(f" ❌ Failed: {result.error_message}") async def test_performance_optimization(): """Test performance optimization""" print("\n" + "=" * 70) print("Test 3: Performance Optimization (Docker Client)") print("=" * 70) async with Crawl4aiDockerClient(base_url=API_BASE_URL, verbose=False) as client: print("\nTesting performance hooks...") hooks = { "on_page_context_created": performance_hook, "before_retrieve_html": cleanup_hook } result = await client.crawl( ["https://httpbin.org/html"], hooks=hooks, hooks_timeout=10 ) print("\nβœ… Optimization completed") print(f" HTML size: {len(result.html):,} chars") print(" Resources blocked, ads removed") async def test_content_extraction(): """Test content extraction""" print("\n" + "=" * 70) print("Test 4: Content Extraction (Docker Client)") print("=" * 70) async with Crawl4aiDockerClient(base_url=API_BASE_URL, verbose=False) as client: print("\nTesting extraction hooks...") hooks = { "after_goto": wait_dynamic_content_hook, "before_retrieve_html": extract_metadata_hook } result = await client.crawl( ["https://www.kidocode.com/"], hooks=hooks, hooks_timeout=20 ) print("\nβœ… Extraction completed") print(f" URL: {result.url}") print(f" Success: {result.success}") print(f" Metadata: {result.metadata}") async def test_multi_url_crawl(): """Test hooks with multiple URLs""" print("\n" + "=" * 70) print("Test 5: Multi-URL Crawl (Docker Client)") print("=" * 70) async with Crawl4aiDockerClient(base_url=API_BASE_URL, verbose=False) as client: print("\nCrawling multiple URLs...") hooks = { "before_goto": url_specific_hook, "after_goto": track_progress_hook } results = await client.crawl( [ "https://httpbin.org/html", "https://httpbin.org/json", "https://httpbin.org/xml" ], hooks=hooks, hooks_timeout=15 ) print("\nβœ… Multi-URL crawl completed") print(f"\n Crawled {len(results)} URLs:") for i, result in enumerate(results, 1): status = "βœ…" if result.success else "❌" print(f" {status} {i}. {result.url}") async def test_reusable_hook_library(): """Test using reusable hook library""" print("\n" + "=" * 70) print("Test 6: Reusable Hook Library (Docker Client)") print("=" * 70) # Create a library of reusable hooks class HookLibrary: @staticmethod async def block_images(page, context, **kwargs): """Block all images""" await context.route("**/*.{png,jpg,jpeg,gif}", lambda r: r.abort()) print("[LIBRARY] Images blocked") return page @staticmethod async def block_analytics(page, context, **kwargs): """Block analytics""" await context.route("**/analytics/*", lambda r: r.abort()) await context.route("**/google-analytics.com/*", lambda r: r.abort()) print("[LIBRARY] Analytics blocked") return page @staticmethod async def scroll_infinite(page, context, **kwargs): """Handle infinite scroll""" for i in range(5): prev = await page.evaluate("document.body.scrollHeight") await page.evaluate("window.scrollTo(0, document.body.scrollHeight);") await page.wait_for_timeout(1000) curr = await page.evaluate("document.body.scrollHeight") if curr == prev: break print("[LIBRARY] Infinite scroll complete") return page async with Crawl4aiDockerClient(base_url=API_BASE_URL, verbose=False) as client: print("\nUsing hook library...") hooks = { "on_page_context_created": HookLibrary.block_images, "before_retrieve_html": HookLibrary.scroll_infinite } result = await client.crawl( ["https://www.kidocode.com/"], hooks=hooks, hooks_timeout=20 ) print("\nβœ… Library hooks completed") print(f" Success: {result.success}") # ============================================================================ # Main # ============================================================================ async def main(): """Run all Docker client hook examples""" print("πŸ”§ Crawl4AI Docker Client - Hooks Examples (Function-Based)") print("Using Python function objects with automatic conversion") print("=" * 70) tests = [ ("All Hooks Demo", test_all_hooks_comprehensive), ("Authentication", test_authentication_workflow), ("Performance", test_performance_optimization), ("Extraction", test_content_extraction), ("Multi-URL", test_multi_url_crawl), ("Hook Library", test_reusable_hook_library) ] for i, (name, test_func) in enumerate(tests, 1): try: await test_func() print(f"\nβœ… Test {i}/{len(tests)}: {name} completed\n") except Exception as e: print(f"\n❌ Test {i}/{len(tests)}: {name} failed: {e}\n") import traceback traceback.print_exc() print("=" * 70) print("πŸŽ‰ All Docker client hook examples completed!") print("\nπŸ’‘ Key Benefits of Function-Based Hooks:") print(" β€’ Write as regular Python functions") print(" β€’ Full IDE support (autocomplete, types)") print(" β€’ Automatic conversion to API format") print(" β€’ Reusable across projects") print(" β€’ Clean, readable code") print(" β€’ Easy to test and debug") print("=" * 70) if __name__ == "__main__": asyncio.run(main())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/quickstart.py
docs/examples/quickstart.py
import os, sys from crawl4ai import LLMConfig sys.path.append( os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) ) import asyncio import time import json import re from typing import Dict from bs4 import BeautifulSoup from pydantic import BaseModel, Field from crawl4ai import AsyncWebCrawler, CacheMode, BrowserConfig, CrawlerRunConfig from crawl4ai.markdown_generation_strategy import DefaultMarkdownGenerator from crawl4ai.content_filter_strategy import PruningContentFilter from crawl4ai import ( JsonCssExtractionStrategy, LLMExtractionStrategy, ) __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) print("Crawl4AI: Advanced Web Crawling and Data Extraction") print("GitHub Repository: https://github.com/unclecode/crawl4ai") print("Twitter: @unclecode") print("Website: https://crawl4ai.com") # Basic Example - Simple Crawl async def simple_crawl(): print("\n--- Basic Usage ---") browser_config = BrowserConfig(headless=True) crawler_config = CrawlerRunConfig(cache_mode=CacheMode.BYPASS) async with AsyncWebCrawler(config=browser_config) as crawler: result = await crawler.arun( url="https://www.nbcnews.com/business", config=crawler_config ) print(result.markdown[:500]) async def clean_content(): crawler_config = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, excluded_tags=["nav", "footer", "aside"], remove_overlay_elements=True, markdown_generator=DefaultMarkdownGenerator( content_filter=PruningContentFilter( threshold=0.48, threshold_type="fixed", min_word_threshold=0 ), options={"ignore_links": True}, ), ) async with AsyncWebCrawler() as crawler: result = await crawler.arun( url="https://en.wikipedia.org/wiki/Apple", config=crawler_config, ) full_markdown_length = len(result.markdown.raw_markdown) fit_markdown_length = len(result.markdown.fit_markdown) print(f"Full Markdown Length: {full_markdown_length}") print(f"Fit Markdown Length: {fit_markdown_length}") async def link_analysis(): crawler_config = CrawlerRunConfig( cache_mode=CacheMode.ENABLED, exclude_external_links=True, exclude_social_media_links=True, ) async with AsyncWebCrawler() as crawler: result = await crawler.arun( url="https://www.nbcnews.com/business", config=crawler_config, ) print(f"Found {len(result.links['internal'])} internal links") print(f"Found {len(result.links['external'])} external links") for link in result.links["internal"][:5]: print(f"Href: {link['href']}\nText: {link['text']}\n") # JavaScript Execution Example async def simple_example_with_running_js_code(): print("\n--- Executing JavaScript and Using CSS Selectors ---") browser_config = BrowserConfig(headless=True, java_script_enabled=True) crawler_config = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, js_code="const loadMoreButton = Array.from(document.querySelectorAll('button')).find(button => button.textContent.includes('Load More')); loadMoreButton && loadMoreButton.click();", # wait_for="() => { return Array.from(document.querySelectorAll('article.tease-card')).length > 10; }" ) async with AsyncWebCrawler(config=browser_config) as crawler: result = await crawler.arun( url="https://www.nbcnews.com/business", config=crawler_config ) print(result.markdown[:500]) # CSS Selector Example async def simple_example_with_css_selector(): print("\n--- Using CSS Selectors ---") browser_config = BrowserConfig(headless=True) crawler_config = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, css_selector=".wide-tease-item__description" ) async with AsyncWebCrawler(config=browser_config) as crawler: result = await crawler.arun( url="https://www.nbcnews.com/business", config=crawler_config ) print(result.markdown[:500]) async def media_handling(): crawler_config = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, exclude_external_images=True, screenshot=True ) async with AsyncWebCrawler() as crawler: result = await crawler.arun( url="https://www.nbcnews.com/business", config=crawler_config ) for img in result.media["images"][:5]: print(f"Image URL: {img['src']}, Alt: {img['alt']}, Score: {img['score']}") async def custom_hook_workflow(verbose=True): async with AsyncWebCrawler() as crawler: # Set a 'before_goto' hook to run custom code just before navigation crawler.crawler_strategy.set_hook( "before_goto", lambda page, context: print("[Hook] Preparing to navigate..."), ) # Perform the crawl operation result = await crawler.arun(url="https://crawl4ai.com") print(result.markdown.raw_markdown[:500].replace("\n", " -- ")) # Proxy Example async def use_proxy(): print("\n--- Using a Proxy ---") browser_config = BrowserConfig( headless=True, proxy_config={ "server": "http://proxy.example.com:8080", "username": "username", "password": "password", }, ) crawler_config = CrawlerRunConfig(cache_mode=CacheMode.BYPASS) async with AsyncWebCrawler(config=browser_config) as crawler: result = await crawler.arun( url="https://www.nbcnews.com/business", config=crawler_config ) if result.success: print(result.markdown[:500]) # Screenshot Example async def capture_and_save_screenshot(url: str, output_path: str): browser_config = BrowserConfig(headless=True) crawler_config = CrawlerRunConfig(cache_mode=CacheMode.BYPASS, screenshot=True) async with AsyncWebCrawler(config=browser_config) as crawler: result = await crawler.arun(url=url, config=crawler_config) if result.success and result.screenshot: import base64 screenshot_data = base64.b64decode(result.screenshot) with open(output_path, "wb") as f: f.write(screenshot_data) print(f"Screenshot saved successfully to {output_path}") else: print("Failed to capture screenshot") # LLM Extraction Example class OpenAIModelFee(BaseModel): model_name: str = Field(..., description="Name of the OpenAI model.") input_fee: str = Field(..., description="Fee for input token for the OpenAI model.") output_fee: str = Field( ..., description="Fee for output token for the OpenAI model." ) async def extract_structured_data_using_llm( provider: str, api_token: str = None, extra_headers: Dict[str, str] = None ): print(f"\n--- Extracting Structured Data with {provider} ---") if api_token is None and provider != "ollama": print(f"API token is required for {provider}. Skipping this example.") return browser_config = BrowserConfig(headless=True) extra_args = {"temperature": 0, "top_p": 0.9, "max_tokens": 2000} if extra_headers: extra_args["extra_headers"] = extra_headers crawler_config = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, word_count_threshold=1, page_timeout=80000, extraction_strategy=LLMExtractionStrategy( llm_config=LLMConfig(provider=provider,api_token=api_token), schema=OpenAIModelFee.model_json_schema(), extraction_type="schema", instruction="""From the crawled content, extract all mentioned model names along with their fees for input and output tokens. Do not miss any models in the entire content.""", extra_args=extra_args, ), ) async with AsyncWebCrawler(config=browser_config) as crawler: result = await crawler.arun( url="https://openai.com/api/pricing/", config=crawler_config ) print(result.extracted_content) # CSS Extraction Example async def extract_structured_data_using_css_extractor(): print("\n--- Using JsonCssExtractionStrategy for Fast Structured Output ---") schema = { "name": "KidoCode Courses", "baseSelector": "section.charge-methodology .framework-collection-item.w-dyn-item", "fields": [ { "name": "section_title", "selector": "h3.heading-50", "type": "text", }, { "name": "section_description", "selector": ".charge-content", "type": "text", }, { "name": "course_name", "selector": ".text-block-93", "type": "text", }, { "name": "course_description", "selector": ".course-content-text", "type": "text", }, { "name": "course_icon", "selector": ".image-92", "type": "attribute", "attribute": "src", }, ], } browser_config = BrowserConfig(headless=True, java_script_enabled=True) js_click_tabs = """ (async () => { const tabs = document.querySelectorAll("section.charge-methodology .tabs-menu-3 > div"); for(let tab of tabs) { tab.scrollIntoView(); tab.click(); await new Promise(r => setTimeout(r, 500)); } })(); """ crawler_config = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, extraction_strategy=JsonCssExtractionStrategy(schema), js_code=[js_click_tabs], delay_before_return_html=1 ) async with AsyncWebCrawler(config=browser_config) as crawler: result = await crawler.arun( url="https://www.kidocode.com/degrees/technology", config=crawler_config ) companies = json.loads(result.extracted_content) print(f"Successfully extracted {len(companies)} companies") print(json.dumps(companies[0], indent=2)) # Dynamic Content Examples - Method 1 async def crawl_dynamic_content_pages_method_1(): print("\n--- Advanced Multi-Page Crawling with JavaScript Execution ---") first_commit = "" async def on_execution_started(page, **kwargs): nonlocal first_commit try: while True: await page.wait_for_selector("li.Box-sc-g0xbh4-0 h4") commit = await page.query_selector("li.Box-sc-g0xbh4-0 h4") commit = await commit.evaluate("(element) => element.textContent") commit = re.sub(r"\s+", "", commit) if commit and commit != first_commit: first_commit = commit break await asyncio.sleep(0.5) except Exception as e: print(f"Warning: New content didn't appear after JavaScript execution: {e}") browser_config = BrowserConfig(headless=False, java_script_enabled=True) async with AsyncWebCrawler(config=browser_config) as crawler: crawler.crawler_strategy.set_hook("on_execution_started", on_execution_started) url = "https://github.com/microsoft/TypeScript/commits/main" session_id = "typescript_commits_session" all_commits = [] js_next_page = """ const button = document.querySelector('a[data-testid="pagination-next-button"]'); if (button) button.click(); """ for page in range(3): crawler_config = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, css_selector="li.Box-sc-g0xbh4-0", js_code=js_next_page if page > 0 else None, js_only=page > 0, session_id=session_id, ) result = await crawler.arun(url=url, config=crawler_config) assert result.success, f"Failed to crawl page {page + 1}" soup = BeautifulSoup(result.cleaned_html, "html.parser") commits = soup.select("li") all_commits.extend(commits) print(f"Page {page + 1}: Found {len(commits)} commits") print(f"Successfully crawled {len(all_commits)} commits across 3 pages") # Dynamic Content Examples - Method 2 async def crawl_dynamic_content_pages_method_2(): print("\n--- Advanced Multi-Page Crawling with JavaScript Execution ---") browser_config = BrowserConfig(headless=False, java_script_enabled=True) js_next_page_and_wait = """ (async () => { const getCurrentCommit = () => { const commits = document.querySelectorAll('li.Box-sc-g0xbh4-0 h4'); return commits.length > 0 ? commits[0].textContent.trim() : null; }; const initialCommit = getCurrentCommit(); const button = document.querySelector('a[data-testid="pagination-next-button"]'); if (button) button.click(); while (true) { await new Promise(resolve => setTimeout(resolve, 100)); const newCommit = getCurrentCommit(); if (newCommit && newCommit !== initialCommit) { break; } } })(); """ schema = { "name": "Commit Extractor", "baseSelector": "li.Box-sc-g0xbh4-0", "fields": [ { "name": "title", "selector": "h4.markdown-title", "type": "text", "transform": "strip", }, ], } async with AsyncWebCrawler(config=browser_config) as crawler: url = "https://github.com/microsoft/TypeScript/commits/main" session_id = "typescript_commits_session" all_commits = [] extraction_strategy = JsonCssExtractionStrategy(schema) for page in range(3): crawler_config = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, css_selector="li.Box-sc-g0xbh4-0", extraction_strategy=extraction_strategy, js_code=js_next_page_and_wait if page > 0 else None, js_only=page > 0, session_id=session_id, ) result = await crawler.arun(url=url, config=crawler_config) assert result.success, f"Failed to crawl page {page + 1}" commits = json.loads(result.extracted_content) all_commits.extend(commits) print(f"Page {page + 1}: Found {len(commits)} commits") print(f"Successfully crawled {len(all_commits)} commits across 3 pages") async def cosine_similarity_extraction(): from crawl4ai import CosineStrategy crawl_config = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, extraction_strategy=CosineStrategy( word_count_threshold=10, max_dist=0.2, # Maximum distance between two words linkage_method="ward", # Linkage method for hierarchical clustering (ward, complete, average, single) top_k=3, # Number of top keywords to extract sim_threshold=0.3, # Similarity threshold for clustering semantic_filter="McDonald's economic impact, American consumer trends", # Keywords to filter the content semantically using embeddings verbose=True, ), ) async with AsyncWebCrawler() as crawler: result = await crawler.arun( url="https://www.nbcnews.com/business/consumer/how-mcdonalds-e-coli-crisis-inflation-politics-reflect-american-story-rcna177156", config=crawl_config, ) print(json.loads(result.extracted_content)[:5]) # Browser Comparison async def crawl_custom_browser_type(): print("\n--- Browser Comparison ---") # Firefox browser_config_firefox = BrowserConfig(browser_type="firefox", headless=True) start = time.time() async with AsyncWebCrawler(config=browser_config_firefox) as crawler: result = await crawler.arun( url="https://www.example.com", config=CrawlerRunConfig(cache_mode=CacheMode.BYPASS), ) print("Firefox:", time.time() - start) print(result.markdown[:500]) # WebKit browser_config_webkit = BrowserConfig(browser_type="webkit", headless=True) start = time.time() async with AsyncWebCrawler(config=browser_config_webkit) as crawler: result = await crawler.arun( url="https://www.example.com", config=CrawlerRunConfig(cache_mode=CacheMode.BYPASS), ) print("WebKit:", time.time() - start) print(result.markdown[:500]) # Chromium (default) browser_config_chromium = BrowserConfig(browser_type="chromium", headless=True) start = time.time() async with AsyncWebCrawler(config=browser_config_chromium) as crawler: result = await crawler.arun( url="https://www.example.com", config=CrawlerRunConfig(cache_mode=CacheMode.BYPASS), ) print("Chromium:", time.time() - start) print(result.markdown[:500]) # Anti-Bot and User Simulation async def crawl_with_user_simulation(): browser_config = BrowserConfig( headless=True, user_agent_mode="random", user_agent_generator_config={"device_type": "mobile", "os_type": "android"}, ) crawler_config = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, magic=True, simulate_user=True, override_navigator=True, ) async with AsyncWebCrawler(config=browser_config) as crawler: result = await crawler.arun(url="YOUR-URL-HERE", config=crawler_config) print(result.markdown) async def ssl_certification(): # Configure crawler to fetch SSL certificate config = CrawlerRunConfig( fetch_ssl_certificate=True, cache_mode=CacheMode.BYPASS, # Bypass cache to always get fresh certificates ) async with AsyncWebCrawler() as crawler: result = await crawler.arun(url="https://example.com", config=config) if result.success and result.ssl_certificate: cert = result.ssl_certificate tmp_dir = os.path.join(__location__, "tmp") os.makedirs(tmp_dir, exist_ok=True) # 1. Access certificate properties directly print("\nCertificate Information:") print(f"Issuer: {cert.issuer.get('CN', '')}") print(f"Valid until: {cert.valid_until}") print(f"Fingerprint: {cert.fingerprint}") # 2. Export certificate in different formats cert.to_json(os.path.join(tmp_dir, "certificate.json")) # For analysis print("\nCertificate exported to:") print(f"- JSON: {os.path.join(tmp_dir, 'certificate.json')}") pem_data = cert.to_pem( os.path.join(tmp_dir, "certificate.pem") ) # For web servers print(f"- PEM: {os.path.join(tmp_dir, 'certificate.pem')}") der_data = cert.to_der( os.path.join(tmp_dir, "certificate.der") ) # For Java apps print(f"- DER: {os.path.join(tmp_dir, 'certificate.der')}") # Main execution async def main(): # Basic examples await simple_crawl() await simple_example_with_running_js_code() await simple_example_with_css_selector() # Advanced examples await extract_structured_data_using_css_extractor() await extract_structured_data_using_llm( "openai/gpt-4o", os.getenv("OPENAI_API_KEY") ) await crawl_dynamic_content_pages_method_1() await crawl_dynamic_content_pages_method_2() # Browser comparisons await crawl_custom_browser_type() # Screenshot example await capture_and_save_screenshot( "https://www.example.com", os.path.join(__location__, "tmp/example_screenshot.jpg") ) if __name__ == "__main__": asyncio.run(main())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/docker_webhook_example.py
docs/examples/docker_webhook_example.py
""" Docker Webhook Example for Crawl4AI This example demonstrates how to use webhooks with the Crawl4AI job queue API. Instead of polling for results, webhooks notify your application when jobs complete. Supports both: - /crawl/job - Raw crawling with markdown extraction - /llm/job - LLM-powered content extraction Prerequisites: 1. Crawl4AI Docker container running on localhost:11235 2. Flask installed: pip install flask requests 3. LLM API key configured in .llm.env (for LLM extraction examples) Usage: 1. Run this script: python docker_webhook_example.py 2. The webhook server will start on http://localhost:8080 3. Jobs will be submitted and webhooks will be received automatically """ import requests import json import time from flask import Flask, request, jsonify from threading import Thread # Configuration CRAWL4AI_BASE_URL = "http://localhost:11235" WEBHOOK_BASE_URL = "http://localhost:8080" # Your webhook receiver URL # Initialize Flask app for webhook receiver app = Flask(__name__) # Store received webhook data for demonstration received_webhooks = [] @app.route('/webhooks/crawl-complete', methods=['POST']) def handle_crawl_webhook(): """ Webhook handler that receives notifications when crawl jobs complete. Payload structure: { "task_id": "crawl_abc123", "task_type": "crawl", "status": "completed" or "failed", "timestamp": "2025-10-21T10:30:00.000000+00:00", "urls": ["https://example.com"], "error": "error message" (only if failed), "data": {...} (only if webhook_data_in_payload=True) } """ payload = request.json print(f"\n{'='*60}") print(f"πŸ“¬ Webhook received for task: {payload['task_id']}") print(f" Status: {payload['status']}") print(f" Timestamp: {payload['timestamp']}") print(f" URLs: {payload['urls']}") if payload['status'] == 'completed': # If data is in payload, process it directly if 'data' in payload: print(f" βœ… Data included in webhook") data = payload['data'] # Process the crawl results here for result in data.get('results', []): print(f" - Crawled: {result.get('url')}") print(f" - Markdown length: {len(result.get('markdown', ''))}") else: # Fetch results from API if not included print(f" πŸ“₯ Fetching results from API...") task_id = payload['task_id'] result_response = requests.get(f"{CRAWL4AI_BASE_URL}/crawl/job/{task_id}") if result_response.ok: data = result_response.json() print(f" βœ… Results fetched successfully") # Process the crawl results here for result in data['result'].get('results', []): print(f" - Crawled: {result.get('url')}") print(f" - Markdown length: {len(result.get('markdown', ''))}") elif payload['status'] == 'failed': print(f" ❌ Job failed: {payload.get('error', 'Unknown error')}") print(f"{'='*60}\n") # Store webhook for demonstration received_webhooks.append(payload) # Return 200 OK to acknowledge receipt return jsonify({"status": "received"}), 200 @app.route('/webhooks/llm-complete', methods=['POST']) def handle_llm_webhook(): """ Webhook handler that receives notifications when LLM extraction jobs complete. Payload structure: { "task_id": "llm_1698765432_12345", "task_type": "llm_extraction", "status": "completed" or "failed", "timestamp": "2025-10-21T10:30:00.000000+00:00", "urls": ["https://example.com/article"], "error": "error message" (only if failed), "data": {"extracted_content": {...}} (only if webhook_data_in_payload=True) } """ payload = request.json print(f"\n{'='*60}") print(f"πŸ€– LLM Webhook received for task: {payload['task_id']}") print(f" Task Type: {payload['task_type']}") print(f" Status: {payload['status']}") print(f" Timestamp: {payload['timestamp']}") print(f" URL: {payload['urls'][0]}") if payload['status'] == 'completed': # If data is in payload, process it directly if 'data' in payload: print(f" βœ… Data included in webhook") data = payload['data'] # Webhook wraps extracted content in 'extracted_content' field extracted = data.get('extracted_content', {}) print(f" - Extracted content:") print(f" {json.dumps(extracted, indent=8)}") else: # Fetch results from API if not included print(f" πŸ“₯ Fetching results from API...") task_id = payload['task_id'] result_response = requests.get(f"{CRAWL4AI_BASE_URL}/llm/job/{task_id}") if result_response.ok: data = result_response.json() print(f" βœ… Results fetched successfully") # API returns unwrapped content in 'result' field extracted = data['result'] print(f" - Extracted content:") print(f" {json.dumps(extracted, indent=8)}") elif payload['status'] == 'failed': print(f" ❌ Job failed: {payload.get('error', 'Unknown error')}") print(f"{'='*60}\n") # Store webhook for demonstration received_webhooks.append(payload) # Return 200 OK to acknowledge receipt return jsonify({"status": "received"}), 200 def start_webhook_server(): """Start the Flask webhook server in a separate thread""" app.run(host='0.0.0.0', port=8080, debug=False, use_reloader=False) def submit_crawl_job_with_webhook(urls, webhook_url, include_data=False): """ Submit a crawl job with webhook notification. Args: urls: List of URLs to crawl webhook_url: URL to receive webhook notifications include_data: Whether to include full results in webhook payload Returns: task_id: The job's task identifier """ payload = { "urls": urls, "browser_config": {"headless": True}, "crawler_config": {"cache_mode": "bypass"}, "webhook_config": { "webhook_url": webhook_url, "webhook_data_in_payload": include_data, # Optional: Add custom headers for authentication # "webhook_headers": { # "X-Webhook-Secret": "your-secret-token" # } } } print(f"\nπŸš€ Submitting crawl job...") print(f" URLs: {urls}") print(f" Webhook: {webhook_url}") print(f" Include data: {include_data}") response = requests.post( f"{CRAWL4AI_BASE_URL}/crawl/job", json=payload, headers={"Content-Type": "application/json"} ) if response.ok: data = response.json() task_id = data['task_id'] print(f" βœ… Job submitted successfully") print(f" Task ID: {task_id}") return task_id else: print(f" ❌ Failed to submit job: {response.text}") return None def submit_llm_job_with_webhook(url, query, webhook_url, include_data=False, schema=None, provider=None): """ Submit an LLM extraction job with webhook notification. Args: url: URL to extract content from query: Instruction for the LLM (e.g., "Extract article title and author") webhook_url: URL to receive webhook notifications include_data: Whether to include full results in webhook payload schema: Optional JSON schema for structured extraction provider: Optional LLM provider (e.g., "openai/gpt-4o-mini") Returns: task_id: The job's task identifier """ payload = { "url": url, "q": query, "cache": False, "webhook_config": { "webhook_url": webhook_url, "webhook_data_in_payload": include_data, # Optional: Add custom headers for authentication # "webhook_headers": { # "X-Webhook-Secret": "your-secret-token" # } } } if schema: payload["schema"] = schema if provider: payload["provider"] = provider print(f"\nπŸ€– Submitting LLM extraction job...") print(f" URL: {url}") print(f" Query: {query}") print(f" Webhook: {webhook_url}") print(f" Include data: {include_data}") if provider: print(f" Provider: {provider}") response = requests.post( f"{CRAWL4AI_BASE_URL}/llm/job", json=payload, headers={"Content-Type": "application/json"} ) if response.ok: data = response.json() task_id = data['task_id'] print(f" βœ… Job submitted successfully") print(f" Task ID: {task_id}") return task_id else: print(f" ❌ Failed to submit job: {response.text}") return None def submit_job_without_webhook(urls): """ Submit a job without webhook (traditional polling approach). Args: urls: List of URLs to crawl Returns: task_id: The job's task identifier """ payload = { "urls": urls, "browser_config": {"headless": True}, "crawler_config": {"cache_mode": "bypass"} } print(f"\nπŸš€ Submitting crawl job (without webhook)...") print(f" URLs: {urls}") response = requests.post( f"{CRAWL4AI_BASE_URL}/crawl/job", json=payload ) if response.ok: data = response.json() task_id = data['task_id'] print(f" βœ… Job submitted successfully") print(f" Task ID: {task_id}") return task_id else: print(f" ❌ Failed to submit job: {response.text}") return None def poll_job_status(task_id, timeout=60): """ Poll for job status (used when webhook is not configured). Args: task_id: The job's task identifier timeout: Maximum time to wait in seconds """ print(f"\n⏳ Polling for job status...") start_time = time.time() while time.time() - start_time < timeout: response = requests.get(f"{CRAWL4AI_BASE_URL}/crawl/job/{task_id}") if response.ok: data = response.json() status = data.get('status', 'unknown') if status == 'completed': print(f" βœ… Job completed!") return data elif status == 'failed': print(f" ❌ Job failed: {data.get('error', 'Unknown error')}") return data else: print(f" ⏳ Status: {status}, waiting...") time.sleep(2) else: print(f" ❌ Failed to get status: {response.text}") return None print(f" ⏰ Timeout reached") return None def main(): """Run the webhook demonstration""" # Check if Crawl4AI is running try: health = requests.get(f"{CRAWL4AI_BASE_URL}/health", timeout=5) print(f"βœ… Crawl4AI is running: {health.json()}") except: print(f"❌ Cannot connect to Crawl4AI at {CRAWL4AI_BASE_URL}") print(" Please make sure Docker container is running:") print(" docker run -d -p 11235:11235 --name crawl4ai unclecode/crawl4ai:latest") return # Start webhook server in background thread print(f"\n🌐 Starting webhook server at {WEBHOOK_BASE_URL}...") webhook_thread = Thread(target=start_webhook_server, daemon=True) webhook_thread.start() time.sleep(2) # Give server time to start # Example 1: Job with webhook (notification only, fetch data separately) print(f"\n{'='*60}") print("Example 1: Webhook Notification Only") print(f"{'='*60}") task_id_1 = submit_crawl_job_with_webhook( urls=["https://example.com"], webhook_url=f"{WEBHOOK_BASE_URL}/webhooks/crawl-complete", include_data=False ) # Example 2: Job with webhook (data included in payload) time.sleep(5) # Wait a bit between requests print(f"\n{'='*60}") print("Example 2: Webhook with Full Data") print(f"{'='*60}") task_id_2 = submit_crawl_job_with_webhook( urls=["https://www.python.org"], webhook_url=f"{WEBHOOK_BASE_URL}/webhooks/crawl-complete", include_data=True ) # Example 3: LLM extraction with webhook (notification only) time.sleep(5) # Wait a bit between requests print(f"\n{'='*60}") print("Example 3: LLM Extraction with Webhook (Notification Only)") print(f"{'='*60}") task_id_3 = submit_llm_job_with_webhook( url="https://www.example.com", query="Extract the main heading and description from this page.", webhook_url=f"{WEBHOOK_BASE_URL}/webhooks/llm-complete", include_data=False, provider="openai/gpt-4o-mini" ) # Example 4: LLM extraction with webhook (data included + schema) time.sleep(5) # Wait a bit between requests print(f"\n{'='*60}") print("Example 4: LLM Extraction with Schema and Full Data") print(f"{'='*60}") # Define a schema for structured extraction schema = json.dumps({ "type": "object", "properties": { "title": {"type": "string", "description": "Page title"}, "description": {"type": "string", "description": "Page description"} }, "required": ["title"] }) task_id_4 = submit_llm_job_with_webhook( url="https://www.python.org", query="Extract the title and description of this website", webhook_url=f"{WEBHOOK_BASE_URL}/webhooks/llm-complete", include_data=True, schema=schema, provider="openai/gpt-4o-mini" ) # Example 5: Traditional polling (no webhook) time.sleep(5) # Wait a bit between requests print(f"\n{'='*60}") print("Example 5: Traditional Polling (No Webhook)") print(f"{'='*60}") task_id_5 = submit_job_without_webhook( urls=["https://github.com"] ) if task_id_5: result = poll_job_status(task_id_5) if result and result.get('status') == 'completed': print(f" βœ… Results retrieved via polling") # Wait for webhooks to arrive print(f"\n⏳ Waiting for webhooks to be received...") time.sleep(30) # Give jobs time to complete and webhooks to arrive (longer for LLM) # Summary print(f"\n{'='*60}") print("Summary") print(f"{'='*60}") print(f"Total webhooks received: {len(received_webhooks)}") crawl_webhooks = [w for w in received_webhooks if w['task_type'] == 'crawl'] llm_webhooks = [w for w in received_webhooks if w['task_type'] == 'llm_extraction'] print(f"\nπŸ“Š Breakdown:") print(f" - Crawl webhooks: {len(crawl_webhooks)}") print(f" - LLM extraction webhooks: {len(llm_webhooks)}") print(f"\nπŸ“‹ Details:") for i, webhook in enumerate(received_webhooks, 1): task_type = webhook['task_type'] icon = "πŸ•·οΈ" if task_type == "crawl" else "πŸ€–" print(f"{i}. {icon} Task {webhook['task_id']}: {webhook['status']} ({task_type})") print(f"\nβœ… Demo completed!") print(f"\nπŸ’‘ Pro tips:") print(f" - In production, your webhook URL should be publicly accessible") print(f" (e.g., https://myapp.com/webhooks) or use ngrok for testing") print(f" - Both /crawl/job and /llm/job support the same webhook configuration") print(f" - Use webhook_data_in_payload=true to get results directly in the webhook") print(f" - LLM jobs may take longer, adjust timeouts accordingly") if __name__ == "__main__": main()
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/demo_multi_config_clean.py
docs/examples/demo_multi_config_clean.py
""" 🎯 Multi-Config URL Matching Demo ================================= Learn how to use different crawler configurations for different URL patterns in a single crawl batch with Crawl4AI's multi-config feature. Part 1: Understanding URL Matching (Pattern Testing) Part 2: Practical Example with Real Crawling """ import asyncio from crawl4ai import ( AsyncWebCrawler, CrawlerRunConfig, MatchMode ) from crawl4ai.processors.pdf import PDFContentScrapingStrategy from crawl4ai.extraction_strategy import JsonCssExtractionStrategy from crawl4ai.content_filter_strategy import PruningContentFilter from crawl4ai.markdown_generation_strategy import DefaultMarkdownGenerator def print_section(title): """Print a formatted section header""" print(f"\n{'=' * 60}") print(f"{title}") print(f"{'=' * 60}\n") def test_url_matching(config, test_urls, config_name): """Test URL matching for a config and show results""" print(f"Config: {config_name}") print(f"Matcher: {config.url_matcher}") if hasattr(config, 'match_mode'): print(f"Mode: {config.match_mode.value}") print("-" * 40) for url in test_urls: matches = config.is_match(url) symbol = "βœ“" if matches else "βœ—" print(f"{symbol} {url}") print() # ============================================================================== # PART 1: Understanding URL Matching # ============================================================================== def demo_part1_pattern_matching(): """Part 1: Learn how URL matching works without crawling""" print_section("PART 1: Understanding URL Matching") print("Let's explore different ways to match URLs with configs.\n") # Test URLs we'll use throughout test_urls = [ "https://example.com/report.pdf", "https://example.com/data.json", "https://example.com/blog/post-1", "https://example.com/article/news", "https://api.example.com/v1/users", "https://example.com/about" ] # 1.1 Simple String Pattern print("1.1 Simple String Pattern Matching") print("-" * 40) pdf_config = CrawlerRunConfig( url_matcher="*.pdf" ) test_url_matching(pdf_config, test_urls, "PDF Config") # 1.2 Multiple String Patterns print("1.2 Multiple String Patterns (OR logic)") print("-" * 40) blog_config = CrawlerRunConfig( url_matcher=["*/blog/*", "*/article/*", "*/news/*"], match_mode=MatchMode.OR # This is default, shown for clarity ) test_url_matching(blog_config, test_urls, "Blog/Article Config") # 1.3 Single Function Matcher print("1.3 Function-based Matching") print("-" * 40) api_config = CrawlerRunConfig( url_matcher=lambda url: 'api' in url or url.endswith('.json') ) test_url_matching(api_config, test_urls, "API Config") # 1.4 List of Functions print("1.4 Multiple Functions with AND Logic") print("-" * 40) # Must be HTTPS AND contain 'api' AND have version number secure_api_config = CrawlerRunConfig( url_matcher=[ lambda url: url.startswith('https://'), lambda url: 'api' in url, lambda url: '/v' in url # Version indicator ], match_mode=MatchMode.AND ) test_url_matching(secure_api_config, test_urls, "Secure API Config") # 1.5 Mixed: String and Function Together print("1.5 Mixed Patterns: String + Function") print("-" * 40) # Match JSON files OR any API endpoint json_or_api_config = CrawlerRunConfig( url_matcher=[ "*.json", # String pattern lambda url: 'api' in url # Function ], match_mode=MatchMode.OR ) test_url_matching(json_or_api_config, test_urls, "JSON or API Config") # 1.6 Complex: Multiple Strings + Multiple Functions print("1.6 Complex Matcher: Mixed Types with AND Logic") print("-" * 40) # Must be: HTTPS AND (.com domain) AND (blog OR article) AND NOT a PDF complex_config = CrawlerRunConfig( url_matcher=[ lambda url: url.startswith('https://'), # Function: HTTPS check "*.com/*", # String: .com domain lambda url: any(pattern in url for pattern in ['/blog/', '/article/']), # Function: Blog OR article lambda url: not url.endswith('.pdf') # Function: Not PDF ], match_mode=MatchMode.AND ) test_url_matching(complex_config, test_urls, "Complex Mixed Config") print("\nβœ… Key Takeaway: First matching config wins when passed to arun_many()!") # ============================================================================== # PART 2: Practical Multi-URL Crawling # ============================================================================== async def demo_part2_practical_crawling(): """Part 2: Real-world example with different content types""" print_section("PART 2: Practical Multi-URL Crawling") print("Now let's see multi-config in action with real URLs.\n") # Create specialized configs for different content types configs = [ # Config 1: PDF documents - only match files ending with .pdf CrawlerRunConfig( url_matcher="*.pdf", scraping_strategy=PDFContentScrapingStrategy() ), # Config 2: Blog/article pages with content filtering CrawlerRunConfig( url_matcher=["*/blog/*", "*/article/*", "*python.org*"], markdown_generator=DefaultMarkdownGenerator( content_filter=PruningContentFilter(threshold=0.48) ) ), # Config 3: Dynamic pages requiring JavaScript CrawlerRunConfig( url_matcher=lambda url: 'github.com' in url, js_code="window.scrollTo(0, 500);" # Scroll to load content ), # Config 4: Mixed matcher - API endpoints (string OR function) CrawlerRunConfig( url_matcher=[ "*.json", # String pattern for JSON files lambda url: 'api' in url or 'httpbin.org' in url # Function for API endpoints ], match_mode=MatchMode.OR, ), # Config 5: Complex matcher - Secure documentation sites CrawlerRunConfig( url_matcher=[ lambda url: url.startswith('https://'), # Must be HTTPS "*.org/*", # String: .org domain lambda url: any(doc in url for doc in ['docs', 'documentation', 'reference']), # Has docs lambda url: not url.endswith(('.pdf', '.json')) # Not PDF or JSON ], match_mode=MatchMode.AND, # wait_for="css:.content, css:article" # Wait for content to load ), # Default config for everything else # CrawlerRunConfig() # No url_matcher means it matches everything (use it as fallback) ] # URLs to crawl - each will use a different config urls = [ "https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf", # β†’ PDF config "https://blog.python.org/", # β†’ Blog config with content filter "https://github.com/microsoft/playwright", # β†’ JS config "https://httpbin.org/json", # β†’ Mixed matcher config (API) "https://docs.python.org/3/reference/", # β†’ Complex matcher config "https://www.w3schools.com/", # β†’ Default config, if you uncomment the default config line above, if not you will see `Error: No matching configuration` ] print("URLs to crawl:") for i, url in enumerate(urls, 1): print(f"{i}. {url}") print("\nCrawling with appropriate config for each URL...\n") async with AsyncWebCrawler() as crawler: results = await crawler.arun_many( urls=urls, config=configs ) # Display results print("Results:") print("-" * 60) for result in results: if result.success: # Determine which config was used config_type = "Default" if result.url.endswith('.pdf'): config_type = "PDF Strategy" elif any(pattern in result.url for pattern in ['blog', 'python.org']) and 'docs' not in result.url: config_type = "Blog + Content Filter" elif 'github.com' in result.url: config_type = "JavaScript Enabled" elif 'httpbin.org' in result.url or result.url.endswith('.json'): config_type = "Mixed Matcher (API)" elif 'docs.python.org' in result.url: config_type = "Complex Matcher (Secure Docs)" print(f"\nβœ“ {result.url}") print(f" Config used: {config_type}") print(f" Content size: {len(result.markdown)} chars") # Show if we have fit_markdown (from content filter) if hasattr(result.markdown, 'fit_markdown') and result.markdown.fit_markdown: print(f" Fit markdown size: {len(result.markdown.fit_markdown)} chars") reduction = (1 - len(result.markdown.fit_markdown) / len(result.markdown)) * 100 print(f" Content reduced by: {reduction:.1f}%") # Show extracted data if using extraction strategy if hasattr(result, 'extracted_content') and result.extracted_content: print(f" Extracted data: {str(result.extracted_content)[:100]}...") else: print(f"\nβœ— {result.url}") print(f" Error: {result.error_message}") print("\n" + "=" * 60) print("βœ… Multi-config crawling complete!") print("\nBenefits demonstrated:") print("- PDFs handled with specialized scraper") print("- Blog content filtered for relevance") print("- JavaScript executed only where needed") print("- Mixed matchers (string + function) for flexible matching") print("- Complex matchers for precise URL targeting") print("- Each URL got optimal configuration automatically!") async def main(): """Run both parts of the demo""" print(""" 🎯 Multi-Config URL Matching Demo ================================= Learn how Crawl4AI can use different configurations for different URLs in a single batch. """) # Part 1: Pattern matching demo_part1_pattern_matching() print("\nPress Enter to continue to Part 2...") try: input() except EOFError: # Running in non-interactive mode, skip input pass # Part 2: Practical crawling await demo_part2_practical_crawling() if __name__ == "__main__": asyncio.run(main())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/browser_optimization_example.py
docs/examples/browser_optimization_example.py
""" This example demonstrates optimal browser usage patterns in Crawl4AI: 1. Sequential crawling with session reuse 2. Parallel crawling with browser instance reuse 3. Performance optimization settings """ import asyncio from typing import List from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig from crawl4ai.markdown_generation_strategy import DefaultMarkdownGenerator async def crawl_sequential(urls: List[str]): """ Sequential crawling using session reuse - most efficient for moderate workloads """ print("\n=== Sequential Crawling with Session Reuse ===") # Configure browser with optimized settings browser_config = BrowserConfig( headless=True, browser_args=[ "--disable-gpu", # Disable GPU acceleration "--disable-dev-shm-usage", # Disable /dev/shm usage "--no-sandbox", # Required for Docker ], viewport={ "width": 800, "height": 600, }, # Smaller viewport for better performance ) # Configure crawl settings crawl_config = CrawlerRunConfig( markdown_generator=DefaultMarkdownGenerator( # content_filter=PruningContentFilter(), In case you need fit_markdown ), ) # Create single crawler instance crawler = AsyncWebCrawler(config=browser_config) await crawler.start() try: session_id = "session1" # Use same session for all URLs for url in urls: result = await crawler.arun( url=url, config=crawl_config, session_id=session_id, # Reuse same browser tab ) if result.success: print(f"Successfully crawled {url}") print(f"Content length: {len(result.markdown.raw_markdown)}") finally: await crawler.close() async def crawl_parallel(urls: List[str], max_concurrent: int = 3): """ Parallel crawling while reusing browser instance - best for large workloads """ print("\n=== Parallel Crawling with Browser Reuse ===") browser_config = BrowserConfig( headless=True, browser_args=["--disable-gpu", "--disable-dev-shm-usage", "--no-sandbox"], viewport={"width": 800, "height": 600}, ) crawl_config = CrawlerRunConfig( markdown_generator=DefaultMarkdownGenerator( # content_filter=PruningContentFilter(), In case you need fit_markdown ), ) # Create single crawler instance for all parallel tasks crawler = AsyncWebCrawler(config=browser_config) await crawler.start() try: # Create tasks in batches to control concurrency for i in range(0, len(urls), max_concurrent): batch = urls[i : i + max_concurrent] tasks = [] for j, url in enumerate(batch): session_id = ( f"parallel_session_{j}" # Different session per concurrent task ) task = crawler.arun(url=url, config=crawl_config, session_id=session_id) tasks.append(task) # Wait for batch to complete results = await asyncio.gather(*tasks, return_exceptions=True) # Process results for url, result in zip(batch, results): if isinstance(result, Exception): print(f"Error crawling {url}: {str(result)}") elif result.success: print(f"Successfully crawled {url}") print(f"Content length: {len(result.markdown.raw_markdown)}") finally: await crawler.close() async def main(): # Example URLs urls = [ "https://example.com/page1", "https://example.com/page2", "https://example.com/page3", "https://example.com/page4", ] # Demo sequential crawling await crawl_sequential(urls) # Demo parallel crawling await crawl_parallel(urls, max_concurrent=2) if __name__ == "__main__": asyncio.run(main())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/serp_api_project_11_feb.py
docs/examples/serp_api_project_11_feb.py
import asyncio import json from typing import Any, Dict, List, Optional from regex import P from crawl4ai import ( AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode, LLMExtractionStrategy, JsonCssExtractionStrategy, CrawlerHub, CrawlResult, DefaultMarkdownGenerator, PruningContentFilter, ) from pathlib import Path from pydantic import BaseModel __current_dir = Path(__file__).parent # Crawl4ai Hello Web async def little_hello_web(): async with AsyncWebCrawler() as crawler: result : CrawlResult = await crawler.arun( url="https://www.helloworld.org" ) print(result.markdown.raw_markdown[:500]) async def hello_web(): browser_config = BrowserConfig(headless=True, verbose=True) async with AsyncWebCrawler(config=browser_config) as crawler: crawler_config = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, markdown_generator=DefaultMarkdownGenerator( content_filter=PruningContentFilter( threshold=0.48, threshold_type="fixed", min_word_threshold=0 ) ), ) result : CrawlResult = await crawler.arun( url="https://www.helloworld.org", config=crawler_config ) print(result.markdown.fit_markdown[:500]) # Naive Approach Using Large Language Models async def extract_using_llm(): print("Extracting using Large Language Models") browser_config = BrowserConfig(headless=True, verbose=True) crawler = AsyncWebCrawler(config=browser_config) await crawler.start() try: class Sitelink(BaseModel): title: str link: str class GoogleSearchResult(BaseModel): title: str link: str snippet: str sitelinks: Optional[List[Sitelink]] = None llm_extraction_strategy = LLMExtractionStrategy( provider = "openai/gpt-4o", schema = GoogleSearchResult.model_json_schema(), instruction="""I want to extract the title, link, snippet, and sitelinks from a Google search result. I shared here the content of div#search from the search result page. We are just interested in organic search results. Example: { "title": "Google", "link": "https://www.google.com", "snippet": "Google is a search engine.", "sitelinks": [ { "title": "Gmail", "link": "https://mail.google.com" }, { "title": "Google Drive", "link": "https://drive.google.com" } ] }""", # apply_chunking=False, chunk_token_threshold=2 ** 12, # 2^12 = 4096 verbose=True, # input_format="html", # html, markdown, cleaned_html input_format="cleaned_html" ) crawl_config = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, keep_attrs=["id", "class"], keep_data_attributes=True, delay_before_return_html=2, extraction_strategy=llm_extraction_strategy, css_selector="div#search", ) result : CrawlResult = await crawler.arun( url="https://www.google.com/search?q=apple%20inc&start=0&num=10", config=crawl_config, ) search_result = {} if result.success: search_result = json.loads(result.extracted_content) # save search result to file with open(__current_dir / "search_result_using_llm.json", "w") as f: f.write(json.dumps(search_result, indent=4)) print(json.dumps(search_result, indent=4)) finally: await crawler.close() # Example of using CrawlerHub async def schema_generator(): print("Generating schema") html = "" # Load html from file with open(__current_dir / "google_search_item.html", "r") as f: html = f.read() organic_schema = JsonCssExtractionStrategy.generate_schema( html=html, target_json_example="""{ "title": "...", "link": "...", "snippet": "...", "date": "1 hour ago", "sitelinks": [ { "title": "...", "link": "..." } ] }""", query="""The given HTML is the crawled HTML from the Google search result, which refers to one HTML element representing one organic Google search result. Please find the schema for the organic search item based on the given HTML. I am interested in the title, link, snippet text, sitelinks, and date.""", ) print(json.dumps(organic_schema, indent=4)) pass # Golden Standard async def build_schema(html:str, force: bool = False) -> Dict[str, Any]: print("Building schema") schemas = {} if (__current_dir / "organic_schema.json").exists() and not force: with open(__current_dir / "organic_schema.json", "r") as f: schemas["organic"] = json.loads(f.read()) else: # Extract schema from html organic_schema = JsonCssExtractionStrategy.generate_schema( html=html, target_json_example="""{ "title": "...", "link": "...", "snippet": "...", "date": "1 hour ago", "sitelinks": [ { "title": "...", "link": "..." } ] }""", query="""The given html is the crawled html from Google search result. Please find the schema for organic search item in the given html, I am interested in title, link, snippet text, sitelinks and date. Usually they are all inside a div#search.""", ) # Save schema to file current_dir/organic_schema.json with open(__current_dir / "organic_schema.json", "w") as f: f.write(json.dumps(organic_schema, indent=4)) schemas["organic"] = organic_schema # Repeat the same for top_stories_schema if (__current_dir / "top_stories_schema.json").exists(): with open(__current_dir / "top_stories_schema.json", "r") as f: schemas["top_stories"] = json.loads(f.read()) else: top_stories_schema = JsonCssExtractionStrategy.generate_schema( html=html, target_json_example="""{ "title": "...", "link": "...", "source": "Insider Monkey", "date": "1 hour ago", }""", query="""The given HTML is the crawled HTML from the Google search result. Please find the schema for the Top Stories item in the given HTML. I am interested in the title, link, source, and date.""", ) with open(__current_dir / "top_stories_schema.json", "w") as f: f.write(json.dumps(top_stories_schema, indent=4)) schemas["top_stories"] = top_stories_schema # Repeat the same for suggested_queries_schema if (__current_dir / "suggested_queries_schema.json").exists(): with open(__current_dir / "suggested_queries_schema.json", "r") as f: schemas["suggested_queries"] = json.loads(f.read()) else: suggested_queries_schema = JsonCssExtractionStrategy.generate_schema( html=html, target_json_example="""{ "query": "A for Apple", }""", query="""The given HTML contains the crawled HTML from Google search results. Please find the schema for each suggested query in the section "relatedSearches" at the bottom of the page. I am interested in the queries only.""", ) with open(__current_dir / "suggested_queries_schema.json", "w") as f: f.write(json.dumps(suggested_queries_schema, indent=4)) schemas["suggested_queries"] = suggested_queries_schema return schemas async def search(q: str = "apple inc") -> Dict[str, Any]: print("Searching for:", q) browser_config = BrowserConfig(headless=True, verbose=True) crawler = AsyncWebCrawler(config=browser_config) search_result: Dict[str, List[Dict[str, Any]]] = {} await crawler.start() try: crawl_config = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, keep_attrs=["id", "class"], keep_data_attributes=True, delay_before_return_html=2, ) from urllib.parse import quote result: CrawlResult = await crawler.arun( f"https://www.google.com/search?q={quote(q)}&start=0&num=10", config=crawl_config ) if result.success: schemas : Dict[str, Any] = await build_schema(result.html) for schema in schemas.values(): schema_key = schema["name"].lower().replace(' ', '_') search_result[schema_key] = JsonCssExtractionStrategy( schema=schema ).run( url="", sections=[result.html], ) # save search result to file with open(__current_dir / "search_result.json", "w") as f: f.write(json.dumps(search_result, indent=4)) print(json.dumps(search_result, indent=4)) finally: await crawler.close() return search_result # Example of using CrawlerHub async def hub_example(query: str = "apple inc"): print("Using CrawlerHub") crawler_cls = CrawlerHub.get("google_search") crawler = crawler_cls() # Text search text_results = await crawler.run( query=query, search_type="text", schema_cache_path="/Users/unclecode/.crawl4ai" ) # Save search result to file with open(__current_dir / "search_result_using_hub.json", "w") as f: f.write(json.dumps(json.loads(text_results), indent=4)) print(json.dumps(json.loads(text_results), indent=4)) async def demo(): # Step 1: Introduction & Overview # await little_hello_web() # await hello_web() # Step 2: Demo end result, using hub # await hub_example() # Step 3: Using LLm for extraction # await extract_using_llm() # Step 4: GEt familiar with schema generation # await schema_generator() # Step 5: Golden Standard # await search() # Step 6: Introduction to CrawlerHub await hub_example() if __name__ == "__main__": asyncio.run(demo())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/extraction_strategies_examples.py
docs/examples/extraction_strategies_examples.py
""" Example demonstrating different extraction strategies with various input formats. This example shows how to: 1. Use different input formats (markdown, HTML, fit_markdown) 2. Work with JSON-based extractors (CSS and XPath) 3. Use LLM-based extraction with different input formats 4. Configure browser and crawler settings properly """ import asyncio import os from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode from crawl4ai import LLMConfig from crawl4ai import ( LLMExtractionStrategy, JsonCssExtractionStrategy, JsonXPathExtractionStrategy, ) from crawl4ai.content_filter_strategy import PruningContentFilter from crawl4ai.markdown_generation_strategy import DefaultMarkdownGenerator async def run_extraction(crawler: AsyncWebCrawler, url: str, strategy, name: str): """Helper function to run extraction with proper configuration""" try: # Configure the crawler run settings config = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, extraction_strategy=strategy, markdown_generator=DefaultMarkdownGenerator( content_filter=PruningContentFilter() # For fit_markdown support ), ) # Run the crawler result = await crawler.arun(url=url, config=config) if result.success: print(f"\n=== {name} Results ===") print(f"Extracted Content: {result.extracted_content}") print(f"Raw Markdown Length: {len(result.markdown.raw_markdown)}") print( f"Citations Markdown Length: {len(result.markdown.markdown_with_citations)}" ) else: print(f"Error in {name}: Crawl failed") except Exception as e: print(f"Error in {name}: {str(e)}") async def main(): # Example URL (replace with actual URL) url = "https://example.com/product-page" # Configure browser settings browser_config = BrowserConfig(headless=True, verbose=True) # Initialize extraction strategies # 1. LLM Extraction with different input formats markdown_strategy = LLMExtractionStrategy( llm_config = LLMConfig(provider="openai/gpt-4o-mini", api_token=os.getenv("OPENAI_API_KEY")), instruction="Extract product information including name, price, and description", ) html_strategy = LLMExtractionStrategy( input_format="html", llm_config=LLMConfig(provider="openai/gpt-4o-mini", api_token=os.getenv("OPENAI_API_KEY")), instruction="Extract product information from HTML including structured data", ) fit_markdown_strategy = LLMExtractionStrategy( input_format="fit_markdown", llm_config=LLMConfig(provider="openai/gpt-4o-mini",api_token=os.getenv("OPENAI_API_KEY")), instruction="Extract product information from cleaned markdown", ) # 2. JSON CSS Extraction (automatically uses HTML input) css_schema = { "baseSelector": ".product", "fields": [ {"name": "title", "selector": "h1.product-title", "type": "text"}, {"name": "price", "selector": ".price", "type": "text"}, {"name": "description", "selector": ".description", "type": "text"}, ], } css_strategy = JsonCssExtractionStrategy(schema=css_schema) # 3. JSON XPath Extraction (automatically uses HTML input) xpath_schema = { "baseSelector": "//div[@class='product']", "fields": [ { "name": "title", "selector": ".//h1[@class='product-title']/text()", "type": "text", }, { "name": "price", "selector": ".//span[@class='price']/text()", "type": "text", }, { "name": "description", "selector": ".//div[@class='description']/text()", "type": "text", }, ], } xpath_strategy = JsonXPathExtractionStrategy(schema=xpath_schema) # Use context manager for proper resource handling async with AsyncWebCrawler(config=browser_config) as crawler: # Run all strategies await run_extraction(crawler, url, markdown_strategy, "Markdown LLM") await run_extraction(crawler, url, html_strategy, "HTML LLM") await run_extraction(crawler, url, fit_markdown_strategy, "Fit Markdown LLM") await run_extraction(crawler, url, css_strategy, "CSS Extraction") await run_extraction(crawler, url, xpath_strategy, "XPath Extraction") if __name__ == "__main__": asyncio.run(main())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/link_head_extraction_example.py
docs/examples/link_head_extraction_example.py
#!/usr/bin/env python3 """ Link Head Extraction & Scoring Example This example demonstrates Crawl4AI's advanced link analysis capabilities: 1. Basic link head extraction 2. Three-layer scoring system (intrinsic, contextual, total) 3. Pattern-based filtering 4. Multiple practical use cases Requirements: - crawl4ai installed - Internet connection Usage: python link_head_extraction_example.py """ import asyncio from crawl4ai import AsyncWebCrawler, CrawlerRunConfig from crawl4ai import LinkPreviewConfig async def basic_link_head_extraction(): """ Basic example: Extract head content from internal links with scoring """ print("πŸ”— Basic Link Head Extraction Example") print("=" * 50) config = CrawlerRunConfig( # Enable link head extraction link_preview_config=LinkPreviewConfig( include_internal=True, # Process internal links include_external=False, # Skip external links for this demo max_links=5, # Limit to 5 links concurrency=3, # Process 3 links simultaneously timeout=10, # 10 second timeout per link query="API documentation guide", # Query for relevance scoring verbose=True # Show detailed progress ), # Enable intrinsic link scoring score_links=True, only_text=True ) async with AsyncWebCrawler() as crawler: result = await crawler.arun("https://docs.python.org/3/", config=config) if result.success: print(f"\nβœ… Successfully crawled: {result.url}") internal_links = result.links.get("internal", []) links_with_head = [link for link in internal_links if link.get("head_data") is not None] print(f"🧠 Links with head data: {len(links_with_head)}") # Show detailed results for i, link in enumerate(links_with_head[:3]): print(f"\nπŸ“„ Link {i+1}: {link['href']}") print(f" Text: '{link.get('text', 'No text')[:50]}...'") # Show all three score types intrinsic = link.get('intrinsic_score') contextual = link.get('contextual_score') total = link.get('total_score') print(f" πŸ“Š Scores:") if intrinsic is not None: print(f" β€’ Intrinsic: {intrinsic:.2f}/10.0") if contextual is not None: print(f" β€’ Contextual: {contextual:.3f}") if total is not None: print(f" β€’ Total: {total:.3f}") # Show head data head_data = link.get("head_data", {}) if head_data: title = head_data.get("title", "No title") description = head_data.get("meta", {}).get("description", "") print(f" πŸ“° Title: {title[:60]}...") if description: print(f" πŸ“ Description: {description[:80]}...") else: print(f"❌ Crawl failed: {result.error_message}") async def research_assistant_example(): """ Research Assistant: Find highly relevant documentation pages """ print("\n\nπŸ” Research Assistant Example") print("=" * 50) config = CrawlerRunConfig( link_preview_config=LinkPreviewConfig( include_internal=True, include_external=True, include_patterns=["*/docs/*", "*/tutorial/*", "*/guide/*"], exclude_patterns=["*/login*", "*/admin*"], query="machine learning neural networks deep learning", max_links=15, score_threshold=0.4, # Only include high-relevance links concurrency=8, verbose=False # Clean output for this example ), score_links=True ) # Test with scikit-learn documentation async with AsyncWebCrawler() as crawler: result = await crawler.arun("https://scikit-learn.org/stable/", config=config) if result.success: print(f"βœ… Analyzed: {result.url}") all_links = result.links.get("internal", []) + result.links.get("external", []) # Filter for high-scoring links high_scoring_links = [link for link in all_links if link.get("total_score", 0) > 0.6] # Sort by total score (highest first) high_scoring_links.sort(key=lambda x: x.get("total_score", 0), reverse=True) print(f"\n🎯 Found {len(high_scoring_links)} highly relevant links:") print(" (Showing top 5 by relevance score)") for i, link in enumerate(high_scoring_links[:5]): score = link.get("total_score", 0) title = link.get("head_data", {}).get("title", "No title") print(f"\n{i+1}. ⭐ {score:.3f} - {title[:70]}...") print(f" πŸ”— {link['href']}") # Show score breakdown intrinsic = link.get('intrinsic_score', 0) contextual = link.get('contextual_score', 0) print(f" πŸ“Š Quality: {intrinsic:.1f}/10 | Relevance: {contextual:.3f}") else: print(f"❌ Research failed: {result.error_message}") async def api_discovery_example(): """ API Discovery: Find API endpoints and references """ print("\n\nπŸ”§ API Discovery Example") print("=" * 50) config = CrawlerRunConfig( link_preview_config=LinkPreviewConfig( include_internal=True, include_patterns=["*/api/*", "*/reference/*", "*/endpoint/*"], exclude_patterns=["*/deprecated/*", "*/v1/*"], # Skip old versions max_links=25, concurrency=10, timeout=8, verbose=False ), score_links=True ) # Example with a documentation site that has API references async with AsyncWebCrawler() as crawler: result = await crawler.arun("https://httpbin.org/", config=config) if result.success: print(f"βœ… Discovered APIs at: {result.url}") api_links = result.links.get("internal", []) # Categorize by detected content endpoints = {"GET": [], "POST": [], "PUT": [], "DELETE": [], "OTHER": []} for link in api_links: if link.get("head_data"): title = link.get("head_data", {}).get("title", "").upper() text = link.get("text", "").upper() # Simple categorization based on content if "GET" in title or "GET" in text: endpoints["GET"].append(link) elif "POST" in title or "POST" in text: endpoints["POST"].append(link) elif "PUT" in title or "PUT" in text: endpoints["PUT"].append(link) elif "DELETE" in title or "DELETE" in text: endpoints["DELETE"].append(link) else: endpoints["OTHER"].append(link) # Display results total_found = sum(len(links) for links in endpoints.values()) print(f"\nπŸ“‘ Found {total_found} API-related links:") for method, links in endpoints.items(): if links: print(f"\n{method} Endpoints ({len(links)}):") for link in links[:3]: # Show first 3 of each type title = link.get("head_data", {}).get("title", "No title") score = link.get("intrinsic_score", 0) print(f" β€’ [{score:.1f}] {title[:50]}...") print(f" {link['href']}") else: print(f"❌ API discovery failed: {result.error_message}") async def link_quality_analysis(): """ Link Quality Analysis: Analyze website structure and link quality """ print("\n\nπŸ“Š Link Quality Analysis Example") print("=" * 50) config = CrawlerRunConfig( link_preview_config=LinkPreviewConfig( include_internal=True, max_links=30, # Analyze more links for better statistics concurrency=15, timeout=6, verbose=False ), score_links=True ) async with AsyncWebCrawler() as crawler: # Test with a content-rich site result = await crawler.arun("https://docs.python.org/3/", config=config) if result.success: print(f"βœ… Analyzed: {result.url}") links = result.links.get("internal", []) # Extract intrinsic scores for analysis scores = [link.get('intrinsic_score', 0) for link in links if link.get('intrinsic_score') is not None] if scores: avg_score = sum(scores) / len(scores) high_quality = len([s for s in scores if s >= 7.0]) medium_quality = len([s for s in scores if 4.0 <= s < 7.0]) low_quality = len([s for s in scores if s < 4.0]) print(f"\nπŸ“ˆ Quality Analysis Results:") print(f" πŸ“Š Average Score: {avg_score:.2f}/10.0") print(f" 🟒 High Quality (β‰₯7.0): {high_quality} links") print(f" 🟑 Medium Quality (4.0-6.9): {medium_quality} links") print(f" πŸ”΄ Low Quality (<4.0): {low_quality} links") # Show best and worst links scored_links = [(link, link.get('intrinsic_score', 0)) for link in links if link.get('intrinsic_score') is not None] scored_links.sort(key=lambda x: x[1], reverse=True) print(f"\nπŸ† Top 3 Quality Links:") for i, (link, score) in enumerate(scored_links[:3]): text = link.get('text', 'No text')[:40] print(f" {i+1}. [{score:.1f}] {text}...") print(f" {link['href']}") print(f"\n⚠️ Bottom 3 Quality Links:") for i, (link, score) in enumerate(scored_links[-3:]): text = link.get('text', 'No text')[:40] print(f" {i+1}. [{score:.1f}] {text}...") print(f" {link['href']}") else: print("❌ No scoring data available") else: print(f"❌ Analysis failed: {result.error_message}") async def pattern_filtering_example(): """ Pattern Filtering: Demonstrate advanced filtering capabilities """ print("\n\n🎯 Pattern Filtering Example") print("=" * 50) # Example with multiple filtering strategies filters = [ { "name": "Documentation Only", "config": LinkPreviewConfig( include_internal=True, max_links=10, concurrency=5, verbose=False, include_patterns=["*/docs/*", "*/documentation/*"], exclude_patterns=["*/api/*"] ) }, { "name": "API References Only", "config": LinkPreviewConfig( include_internal=True, max_links=10, concurrency=5, verbose=False, include_patterns=["*/api/*", "*/reference/*"], exclude_patterns=["*/tutorial/*"] ) }, { "name": "Exclude Admin Areas", "config": LinkPreviewConfig( include_internal=True, max_links=10, concurrency=5, verbose=False, exclude_patterns=["*/admin/*", "*/login/*", "*/dashboard/*"] ) } ] async with AsyncWebCrawler() as crawler: for filter_example in filters: print(f"\nπŸ” Testing: {filter_example['name']}") config = CrawlerRunConfig( link_preview_config=filter_example['config'], score_links=True ) result = await crawler.arun("https://docs.python.org/3/", config=config) if result.success: links = result.links.get("internal", []) links_with_head = [link for link in links if link.get("head_data")] print(f" πŸ“Š Found {len(links_with_head)} matching links") if links_with_head: # Show sample matches for link in links_with_head[:2]: title = link.get("head_data", {}).get("title", "No title") print(f" β€’ {title[:50]}...") print(f" {link['href']}") else: print(f" ❌ Failed: {result.error_message}") async def main(): """ Run all examples """ print("πŸš€ Crawl4AI Link Head Extraction Examples") print("=" * 60) print("This will demonstrate various link analysis capabilities.\n") try: # Run all examples await basic_link_head_extraction() await research_assistant_example() await api_discovery_example() await link_quality_analysis() await pattern_filtering_example() print("\n" + "=" * 60) print("✨ All examples completed successfully!") print("\nNext steps:") print("1. Try modifying the queries and patterns above") print("2. Test with your own websites") print("3. Experiment with different score thresholds") print("4. Check out the full documentation for more options") except KeyboardInterrupt: print("\n⏹️ Examples interrupted by user") except Exception as e: print(f"\nπŸ’₯ Error running examples: {str(e)}") import traceback traceback.print_exc() if __name__ == "__main__": asyncio.run(main())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/dispatcher_example.py
docs/examples/dispatcher_example.py
import asyncio import time from rich import print from rich.table import Table from crawl4ai import ( AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, MemoryAdaptiveDispatcher, SemaphoreDispatcher, RateLimiter, CrawlerMonitor, DisplayMode, CacheMode, LXMLWebScrapingStrategy, ) async def memory_adaptive(urls, browser_config, run_config): """Memory adaptive crawler with monitoring""" start = time.perf_counter() async with AsyncWebCrawler(config=browser_config) as crawler: dispatcher = MemoryAdaptiveDispatcher( memory_threshold_percent=70.0, max_session_permit=10, monitor=CrawlerMonitor( max_visible_rows=15, display_mode=DisplayMode.DETAILED ), ) results = await crawler.arun_many( urls, config=run_config, dispatcher=dispatcher ) duration = time.perf_counter() - start return len(results), duration async def memory_adaptive_with_rate_limit(urls, browser_config, run_config): """Memory adaptive crawler with rate limiting""" start = time.perf_counter() async with AsyncWebCrawler(config=browser_config) as crawler: dispatcher = MemoryAdaptiveDispatcher( memory_threshold_percent=95.0, max_session_permit=10, rate_limiter=RateLimiter( base_delay=(1.0, 2.0), max_delay=30.0, max_retries=2 ), monitor=CrawlerMonitor( max_visible_rows=15, display_mode=DisplayMode.DETAILED ), ) results = await crawler.arun_many( urls, config=run_config, dispatcher=dispatcher ) duration = time.perf_counter() - start return len(results), duration async def semaphore(urls, browser_config, run_config): """Basic semaphore crawler""" start = time.perf_counter() async with AsyncWebCrawler(config=browser_config) as crawler: dispatcher = SemaphoreDispatcher( semaphore_count=5, monitor=CrawlerMonitor( max_visible_rows=15, display_mode=DisplayMode.DETAILED ), ) results = await crawler.arun_many( urls, config=run_config, dispatcher=dispatcher ) duration = time.perf_counter() - start return len(results), duration async def semaphore_with_rate_limit(urls, browser_config, run_config): """Semaphore crawler with rate limiting""" start = time.perf_counter() async with AsyncWebCrawler(config=browser_config) as crawler: dispatcher = SemaphoreDispatcher( semaphore_count=5, rate_limiter=RateLimiter( base_delay=(1.0, 2.0), max_delay=30.0, max_retries=2 ), monitor=CrawlerMonitor( max_visible_rows=15, display_mode=DisplayMode.DETAILED ), ) results = await crawler.arun_many( urls, config=run_config, dispatcher=dispatcher ) duration = time.perf_counter() - start return len(results), duration def create_performance_table(results): """Creates a rich table showing performance results""" table = Table(title="Crawler Strategy Performance Comparison") table.add_column("Strategy", style="cyan") table.add_column("URLs Crawled", justify="right", style="green") table.add_column("Time (seconds)", justify="right", style="yellow") table.add_column("URLs/second", justify="right", style="magenta") sorted_results = sorted(results.items(), key=lambda x: x[1][1]) for strategy, (urls_crawled, duration) in sorted_results: urls_per_second = urls_crawled / duration table.add_row( strategy, str(urls_crawled), f"{duration:.2f}", f"{urls_per_second:.2f}" ) return table async def main(): urls = [f"https://example.com/page{i}" for i in range(1, 40)] browser_config = BrowserConfig(headless=True, verbose=False) run_config = CrawlerRunConfig(cache_mode=CacheMode.BYPASS, scraping_strategy=LXMLWebScrapingStrategy()) results = { "Memory Adaptive": await memory_adaptive(urls, browser_config, run_config), # "Memory Adaptive + Rate Limit": await memory_adaptive_with_rate_limit( # urls, browser_config, run_config # ), # "Semaphore": await semaphore(urls, browser_config, run_config), # "Semaphore + Rate Limit": await semaphore_with_rate_limit( # urls, browser_config, run_config # ), } table = create_performance_table(results) print("\nPerformance Summary:") print(table) if __name__ == "__main__": asyncio.run(main())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/tutorial_v0.5.py
docs/examples/tutorial_v0.5.py
import asyncio import time import re from crawl4ai import AsyncWebCrawler, CrawlerRunConfig, CacheMode, BrowserConfig, MemoryAdaptiveDispatcher, HTTPCrawlerConfig from crawl4ai.content_scraping_strategy import LXMLWebScrapingStrategy from crawl4ai.deep_crawling import ( BestFirstCrawlingStrategy, FilterChain, URLPatternFilter, DomainFilter, ContentTypeFilter, ) from crawl4ai.deep_crawling.scorers import KeywordRelevanceScorer from crawl4ai.async_crawler_strategy import AsyncHTTPCrawlerStrategy from crawl4ai import ProxyConfig from crawl4ai import RoundRobinProxyStrategy from crawl4ai.content_filter_strategy import LLMContentFilter from crawl4ai import DefaultMarkdownGenerator from crawl4ai import LLMConfig from crawl4ai import JsonCssExtractionStrategy from crawl4ai.processors.pdf import PDFCrawlerStrategy, PDFContentScrapingStrategy from pprint import pprint # 1️⃣ Deep Crawling with Best-First Strategy async def deep_crawl(): """ PART 1: Deep Crawling with Best-First Strategy This function demonstrates: - Using the BestFirstCrawlingStrategy - Creating filter chains to narrow down crawl targets - Using a scorer to prioritize certain URLs - Respecting robots.txt rules """ print("\n===== DEEP CRAWLING =====") print("This example shows how to implement deep crawling with filters, scorers, and robots.txt compliance.") # Create a filter chain to filter urls based on patterns, domains and content type filter_chain = FilterChain( [ DomainFilter( allowed_domains=["docs.crawl4ai.com"], blocked_domains=["old.docs.crawl4ai.com"], ), URLPatternFilter(patterns=["*core*", "*advanced*"],), ContentTypeFilter(allowed_types=["text/html"]), ] ) # Create a keyword scorer that prioritises the pages with certain keywords first keyword_scorer = KeywordRelevanceScorer( keywords=["crawl", "example", "async", "configuration"], weight=0.7 ) # Set up the configuration with robots.txt compliance enabled deep_crawl_config = CrawlerRunConfig( deep_crawl_strategy=BestFirstCrawlingStrategy( max_depth=2, include_external=False, filter_chain=filter_chain, url_scorer=keyword_scorer, ), scraping_strategy=LXMLWebScrapingStrategy(), stream=True, verbose=True, check_robots_txt=True, # Enable robots.txt compliance ) # Execute the crawl async with AsyncWebCrawler() as crawler: print("\nπŸ“Š Starting deep crawl with Best-First strategy...") print(" - Filtering by domain, URL patterns, and content type") print(" - Scoring pages based on keyword relevance") print(" - Respecting robots.txt rules") start_time = time.perf_counter() results = [] async for result in await crawler.arun(url="https://docs.crawl4ai.com", config=deep_crawl_config): # Print each result as it comes in depth = result.metadata.get("depth", 0) score = result.metadata.get("score", 0) print(f"Crawled: {result.url} (Depth: {depth}), score: {score:.2f}") results.append(result) duration = time.perf_counter() - start_time # Print summary statistics print(f"\nβœ… Crawled {len(results)} high-value pages in {duration:.2f} seconds") # Group by depth if results: depth_counts = {} for result in results: depth = result.metadata.get("depth", 0) depth_counts[depth] = depth_counts.get(depth, 0) + 1 print("\nπŸ“Š Pages crawled by depth:") for depth, count in sorted(depth_counts.items()): print(f" Depth {depth}: {count} pages") # 2️⃣ Memory-Adaptive Dispatcher async def memory_adaptive_dispatcher(): """ PART 2: Memory-Adaptive Dispatcher This function demonstrates: - Using MemoryAdaptiveDispatcher to manage system memory - Batch and streaming modes with multiple URLs """ print("\n===== MEMORY-ADAPTIVE DISPATCHER =====") print("This example shows how to use the memory-adaptive dispatcher for resource management.") # Configure the dispatcher (optional, defaults are used if not provided) dispatcher = MemoryAdaptiveDispatcher( memory_threshold_percent=80.0, # Pause if memory usage exceeds 80% check_interval=0.5, # Check memory every 0.5 seconds ) # Test URLs urls = [ "https://docs.crawl4ai.com", "https://github.com/unclecode/crawl4ai" ] async def batch_mode(): print("\nπŸ“Š BATCH MODE:") print(" In this mode, all results are collected before being returned.") async with AsyncWebCrawler() as crawler: start_time = time.perf_counter() results = await crawler.arun_many( urls=urls, config=CrawlerRunConfig(stream=False), # Batch mode dispatcher=dispatcher, ) print(f" βœ… Received all {len(results)} results after {time.perf_counter() - start_time:.2f} seconds") for result in results: print(f" β†’ {result.url} with status code: {result.status_code}") async def stream_mode(): print("\nπŸ“Š STREAMING MODE:") print(" In this mode, results are processed as they become available.") async with AsyncWebCrawler() as crawler: start_time = time.perf_counter() count = 0 first_result_time = None async for result in await crawler.arun_many( urls=urls, config=CrawlerRunConfig(stream=True), # Stream mode dispatcher=dispatcher, ): count += 1 current_time = time.perf_counter() - start_time if count == 1: first_result_time = current_time print(f" βœ… First result after {first_result_time:.2f} seconds: {result.url}") else: print(f" β†’ Result #{count} after {current_time:.2f} seconds: {result.url}") print(f" βœ… Total: {count} results") print(f" βœ… First result: {first_result_time:.2f} seconds") print(f" βœ… All results: {time.perf_counter() - start_time:.2f} seconds") # Run both examples await batch_mode() await stream_mode() print("\nπŸ” Key Takeaway: The memory-adaptive dispatcher prevents OOM errors") print(" and manages concurrency based on system resources.") # 3️⃣ HTTP Crawler Strategy async def http_crawler_strategy(): """ PART 3: HTTP Crawler Strategy This function demonstrates: - Using the lightweight HTTP-only crawler - Setting custom headers and configurations """ print("\n===== HTTP CRAWLER STRATEGY =====") print("This example shows how to use the fast, lightweight HTTP-only crawler.") # Use the HTTP crawler strategy http_config = HTTPCrawlerConfig( method="GET", headers={"User-Agent": "MyCustomBot/1.0"}, follow_redirects=True, verify_ssl=True ) print("\nπŸ“Š Initializing HTTP crawler strategy...") print(" - Using custom User-Agent: MyCustomBot/1.0") print(" - Following redirects: Enabled") print(" - Verifying SSL: Enabled") # Create crawler with HTTP strategy async with AsyncWebCrawler( crawler_strategy=AsyncHTTPCrawlerStrategy(browser_config=http_config) ) as crawler: start_time = time.perf_counter() result = await crawler.arun("https://example.com") duration = time.perf_counter() - start_time print(f"\nβœ… Crawled in {duration:.2f} seconds") print(f"βœ… Status code: {result.status_code}") print(f"βœ… Content length: {len(result.html)} bytes") # Check if there was a redirect if result.redirected_url and result.redirected_url != result.url: print(f"ℹ️ Redirected from {result.url} to {result.redirected_url}") print("\nπŸ” Key Takeaway: HTTP crawler is faster and more memory-efficient") print(" than browser-based crawling for simple pages.") # 4️⃣ Proxy Rotation async def proxy_rotation(): """ PART 4: Proxy Rotation This function demonstrates: - Setting up a proxy rotation strategy - Using multiple proxies in a round-robin fashion """ print("\n===== PROXY ROTATION =====") print("This example shows how to implement proxy rotation for distributed crawling.") # Load proxies and create rotation strategy proxies = ProxyConfig.from_env() #eg: export PROXIES="ip1:port1:username1:password1,ip2:port2:username2:password2" if not proxies: print("No proxies found in environment. Set PROXIES env variable!") return proxy_strategy = RoundRobinProxyStrategy(proxies) # Create configs browser_config = BrowserConfig(headless=True, verbose=False) run_config = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, proxy_rotation_strategy=proxy_strategy ) async with AsyncWebCrawler(config=browser_config) as crawler: urls = ["https://httpbin.org/ip"] * (len(proxies) * 2) # Test each proxy twice print("\nπŸ“ˆ Initializing crawler with proxy rotation...") async with AsyncWebCrawler(config=browser_config) as crawler: print("\nπŸš€ Starting batch crawl with proxy rotation...") results = await crawler.arun_many( urls=urls, config=run_config ) for result in results: if result.success: ip_match = re.search(r'(?:[0-9]{1,3}\.){3}[0-9]{1,3}', result.html) current_proxy = run_config.proxy_config if run_config.proxy_config else None if current_proxy and ip_match: print(f"URL {result.url}") print(f"Proxy {current_proxy.server} -> Response IP: {ip_match.group(0)}") verified = ip_match.group(0) == current_proxy.ip if verified: print(f"βœ… Proxy working! IP matches: {current_proxy.ip}") else: print("❌ Proxy failed or IP mismatch!") print("---") else: print(f"❌ Crawl via proxy failed!: {result.error_message}") # 5️⃣ LLM Content Filter (requires API key) async def llm_content_filter(): """ PART 5: LLM Content Filter This function demonstrates: - Configuring LLM providers via LLMConfig - Using LLM to generate focused markdown - LLMConfig for configuration Note: Requires a valid API key for the chosen LLM provider """ print("\n===== LLM CONTENT FILTER =====") print("This example shows how to use LLM to generate focused markdown content.") print("Note: This example requires an API key. Set it in environment variables.") # Create LLM configuration # Replace with your actual API key or set as environment variable llm_config = LLMConfig( provider="gemini/gemini-1.5-pro", api_token="env:GEMINI_API_KEY" # Will read from GEMINI_API_KEY environment variable ) print("\nπŸ“Š Setting up LLM content filter...") print(f" - Provider: {llm_config.provider}") print(" - API token: Using environment variable") print(" - Instruction: Extract key concepts and summaries") # Create markdown generator with LLM filter markdown_generator = DefaultMarkdownGenerator( content_filter=LLMContentFilter( llm_config=llm_config, instruction="Extract key concepts and summaries" ) ) config = CrawlerRunConfig(markdown_generator=markdown_generator) async with AsyncWebCrawler() as crawler: result = await crawler.arun("https://docs.crawl4ai.com", config=config) pprint(result.markdown.fit_markdown) print("\nβœ… Generated focused markdown:") # 6️⃣ PDF Processing async def pdf_processing(): """ PART 6: PDF Processing This function demonstrates: - Using PDFCrawlerStrategy and PDFContentScrapingStrategy - Extracting text and metadata from PDFs """ print("\n===== PDF PROCESSING =====") print("This example shows how to extract text and metadata from PDF files.") # Sample PDF URL pdf_url = "https://arxiv.org/pdf/2310.06825.pdf" print("\nπŸ“Š Initializing PDF crawler...") print(f" - Target PDF: {pdf_url}") print(" - Using PDFCrawlerStrategy and PDFContentScrapingStrategy") # Create crawler with PDF strategy async with AsyncWebCrawler(crawler_strategy=PDFCrawlerStrategy()) as crawler: print("\nπŸš€ Starting PDF processing...") start_time = time.perf_counter() result = await crawler.arun( pdf_url, config=CrawlerRunConfig(scraping_strategy=PDFContentScrapingStrategy()) ) duration = time.perf_counter() - start_time print(f"\nβœ… Processed PDF in {duration:.2f} seconds") # Show metadata print("\nπŸ“„ PDF Metadata:") if result.metadata: for key, value in result.metadata.items(): if key not in ["html", "text", "markdown"] and value: print(f" - {key}: {value}") else: print(" No metadata available") # Show sample of content if result.markdown: print("\nπŸ“ PDF Content Sample:") content_sample = result.markdown[:500] + "..." if len(result.markdown) > 500 else result.markdown print(f"---\n{content_sample}\n---") else: print("\n⚠️ No content extracted") print("\nπŸ” Key Takeaway: Crawl4AI can now process PDF files") print(" to extract both text content and metadata.") # 7️⃣ LLM Schema Generation (requires API key) async def llm_schema_generation(): """ PART 7: LLM Schema Generation This function demonstrates: - Configuring LLM providers via LLMConfig - Using LLM to generate extraction schemas - JsonCssExtractionStrategy Note: Requires a valid API key for the chosen LLM provider """ print("\n===== LLM SCHEMA GENERATION =====") print("This example shows how to use LLM to automatically generate extraction schemas.") print("Note: This example requires an API key. Set it in environment variables.") # Sample HTML sample_html = """ <div class="product"> <h2 class="title">Awesome Gaming Laptop</h2> <div class="price">$1,299.99</div> <div class="specs"> <ul> <li>16GB RAM</li> <li>512GB SSD</li> <li>RTX 3080</li> </ul> </div> <div class="rating">4.7/5</div> </div> """ print("\nπŸ“Š Setting up LLMConfig...") # Create LLM configuration llm_config = LLMConfig( provider="gemini/gemini-1.5-pro", api_token="env:GEMINI_API_KEY" ) print("\nπŸš€ Generating schema for product extraction...") print(" This would use the LLM to analyze HTML and create an extraction schema") schema = JsonCssExtractionStrategy.generate_schema( html=sample_html, llm_config = llm_config, query="Extract product name and price" ) print("\nβœ… Generated Schema:") pprint(schema) # Run all sections async def run_tutorial(): """ Main function to run all tutorial sections. """ print("\nπŸš€ CRAWL4AI v0.5.0 TUTORIAL πŸš€") print("===============================") print("This tutorial demonstrates the key features of Crawl4AI v0.5.0") print("Including deep crawling, memory-adaptive dispatching, advanced filtering,") print("and more powerful extraction capabilities.") # Sections to run sections = [ deep_crawl, # 1. Deep Crawling with Best-First Strategy memory_adaptive_dispatcher, # 2. Memory-Adaptive Dispatcher http_crawler_strategy, # 3. HTTP Crawler Strategy proxy_rotation, # 4. Proxy Rotation llm_content_filter, # 5. LLM Content Filter pdf_processing, # 6. PDF Processing llm_schema_generation, # 7. Schema Generation using LLM ] for section in sections: try: await section() except Exception as e: print(f"⚠️ Error in {section.__name__}: {e}") print("\nπŸŽ‰ TUTORIAL COMPLETE! πŸŽ‰") print("You've now explored the key features of Crawl4AI v0.5.0") print("For more information, visit https://docs.crawl4ai.com") # Run the tutorial if __name__ == "__main__": asyncio.run(run_tutorial())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/crawler_monitor_example.py
docs/examples/crawler_monitor_example.py
""" CrawlerMonitor Example This example demonstrates how to use the CrawlerMonitor component to visualize and track web crawler operations in real-time. """ import time import uuid import random import threading from crawl4ai.components.crawler_monitor import CrawlerMonitor from crawl4ai.models import CrawlStatus def simulate_webcrawler_operations(monitor, num_tasks=20): """ Simulates a web crawler's operations with multiple tasks and different states. Args: monitor: The CrawlerMonitor instance num_tasks: Number of tasks to simulate """ print(f"Starting simulation with {num_tasks} tasks...") # Create and register all tasks first task_ids = [] for i in range(num_tasks): task_id = str(uuid.uuid4()) url = f"https://example.com/page{i}" monitor.add_task(task_id, url) task_ids.append((task_id, url)) # Small delay between task creation time.sleep(0.2) # Process tasks with a variety of different behaviors threads = [] for i, (task_id, url) in enumerate(task_ids): # Create a thread for each task thread = threading.Thread( target=process_task, args=(monitor, task_id, url, i) ) thread.daemon = True threads.append(thread) # Start threads in batches to simulate concurrent processing batch_size = 4 # Process 4 tasks at a time for i in range(0, len(threads), batch_size): batch = threads[i:i+batch_size] for thread in batch: thread.start() time.sleep(0.5) # Stagger thread start times # Wait a bit before starting next batch time.sleep(random.uniform(1.0, 3.0)) # Update queue statistics update_queue_stats(monitor) # Simulate memory pressure changes active_threads = [t for t in threads if t.is_alive()] if len(active_threads) > 8: monitor.update_memory_status("CRITICAL") elif len(active_threads) > 4: monitor.update_memory_status("PRESSURE") else: monitor.update_memory_status("NORMAL") # Wait for all threads to complete for thread in threads: thread.join() # Final updates update_queue_stats(monitor) monitor.update_memory_status("NORMAL") print("Simulation completed!") def process_task(monitor, task_id, url, index): """Simulate processing of a single task.""" # Tasks start in queued state (already added) # Simulate waiting in queue wait_time = random.uniform(0.5, 3.0) time.sleep(wait_time) # Start processing - move to IN_PROGRESS monitor.update_task( task_id=task_id, status=CrawlStatus.IN_PROGRESS, start_time=time.time(), wait_time=wait_time ) # Simulate task processing with memory usage changes total_process_time = random.uniform(2.0, 10.0) step_time = total_process_time / 5 # Update in 5 steps for step in range(5): # Simulate increasing then decreasing memory usage if step < 3: # First 3 steps - increasing memory_usage = random.uniform(5.0, 20.0) * (step + 1) else: # Last 2 steps - decreasing memory_usage = random.uniform(5.0, 20.0) * (5 - step) # Update peak memory if this is higher peak = max(memory_usage, monitor.get_task_stats(task_id).get("peak_memory", 0)) monitor.update_task( task_id=task_id, memory_usage=memory_usage, peak_memory=peak ) time.sleep(step_time) # Determine final state - 80% success, 20% failure if index % 5 == 0: # Every 5th task fails monitor.update_task( task_id=task_id, status=CrawlStatus.FAILED, end_time=time.time(), memory_usage=0.0, error_message="Connection timeout" ) else: monitor.update_task( task_id=task_id, status=CrawlStatus.COMPLETED, end_time=time.time(), memory_usage=0.0 ) def update_queue_stats(monitor): """Update queue statistics based on current tasks.""" task_stats = monitor.get_all_task_stats() # Count queued tasks queued_tasks = [ stats for stats in task_stats.values() if stats["status"] == CrawlStatus.QUEUED.name ] total_queued = len(queued_tasks) if total_queued > 0: current_time = time.time() # Calculate wait times wait_times = [ current_time - stats.get("enqueue_time", current_time) for stats in queued_tasks ] highest_wait_time = max(wait_times) if wait_times else 0.0 avg_wait_time = sum(wait_times) / len(wait_times) if wait_times else 0.0 else: highest_wait_time = 0.0 avg_wait_time = 0.0 # Update monitor monitor.update_queue_statistics( total_queued=total_queued, highest_wait_time=highest_wait_time, avg_wait_time=avg_wait_time ) def main(): # Initialize the monitor monitor = CrawlerMonitor( urls_total=20, # Total URLs to process refresh_rate=0.5, # Update UI twice per second enable_ui=True, # Enable terminal UI max_width=120 # Set maximum width to 120 characters ) # Start the monitor monitor.start() try: # Run simulation simulate_webcrawler_operations(monitor) # Keep monitor running a bit to see final state print("Waiting to view final state...") time.sleep(5) except KeyboardInterrupt: print("\nExample interrupted by user") finally: # Stop the monitor monitor.stop() print("Example completed!") # Print some statistics summary = monitor.get_summary() print("\nCrawler Statistics Summary:") print(f"Total URLs: {summary['urls_total']}") print(f"Completed: {summary['urls_completed']}") print(f"Completion percentage: {summary['completion_percentage']:.1f}%") print(f"Peak memory usage: {summary['peak_memory_percent']:.1f}%") # Print task status counts status_counts = summary['status_counts'] print("\nTask Status Counts:") for status, count in status_counts.items(): print(f" {status}: {count}") if __name__ == "__main__": main()
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/regex_extraction_quickstart.py
docs/examples/regex_extraction_quickstart.py
# == File: regex_extraction_quickstart.py == """ Mini–quick-start for RegexExtractionStrategy ──────────────────────────────────────────── 3 bite-sized demos that parallel the style of *quickstart_examples_set_1.py*: 1. **Default catalog** – scrape a page and pull out e-mails / phones / URLs, etc. 2. **Custom pattern** – add your own regex at instantiation time. 3. **LLM-assisted schema** – ask the model to write a pattern, cache it, then run extraction _without_ further LLM calls. Run the whole thing with:: python regex_extraction_quickstart.py """ import os, json, asyncio from pathlib import Path from typing import List from crawl4ai import ( AsyncWebCrawler, CrawlerRunConfig, CrawlResult, RegexExtractionStrategy, LLMConfig, ) # ──────────────────────────────────────────────────────────────────────────── # 1. Default-catalog extraction # ──────────────────────────────────────────────────────────────────────────── async def demo_regex_default() -> None: print("\n=== 1. Regex extraction – default patterns ===") url = "https://www.iana.org/domains/example" # has e-mail + URLs strategy = RegexExtractionStrategy( pattern = RegexExtractionStrategy.Url | RegexExtractionStrategy.Currency ) config = CrawlerRunConfig(extraction_strategy=strategy) async with AsyncWebCrawler() as crawler: result: CrawlResult = await crawler.arun(url, config=config) print(f"Fetched {url} - success={result.success}") if result.success: data = json.loads(result.extracted_content) for d in data[:10]: print(f" {d['label']:<12} {d['value']}") print(f"... total matches: {len(data)}") else: print(" !!! crawl failed") # ──────────────────────────────────────────────────────────────────────────── # 2. Custom pattern override / extension # ──────────────────────────────────────────────────────────────────────────── async def demo_regex_custom() -> None: print("\n=== 2. Regex extraction – custom price pattern ===") url = "https://www.apple.com/shop/buy-mac/macbook-pro" price_pattern = {"usd_price": r"\$\s?\d{1,3}(?:,\d{3})*(?:\.\d{2})?"} strategy = RegexExtractionStrategy(custom = price_pattern) config = CrawlerRunConfig(extraction_strategy=strategy) async with AsyncWebCrawler() as crawler: result: CrawlResult = await crawler.arun(url, config=config) if result.success: data = json.loads(result.extracted_content) for d in data: print(f" {d['value']}") if not data: print(" (No prices found - page layout may have changed)") else: print(" !!! crawl failed") # ──────────────────────────────────────────────────────────────────────────── # 3. One-shot LLM pattern generation, then fast extraction # ──────────────────────────────────────────────────────────────────────────── async def demo_regex_generate_pattern() -> None: print("\n=== 3. generate_pattern β†’ regex extraction ===") cache_dir = Path(__file__).parent / "tmp" cache_dir.mkdir(exist_ok=True) pattern_file = cache_dir / "price_pattern.json" url = "https://www.lazada.sg/tag/smartphone/" # ── 3-A. build or load the cached pattern if pattern_file.exists(): pattern = json.load(pattern_file.open(encoding="utf-8")) print("Loaded cached pattern:", pattern) else: print("Generating pattern via LLM…") llm_cfg = LLMConfig( provider="openai/gpt-4o-mini", api_token="env:OPENAI_API_KEY", ) # pull one sample page as HTML context async with AsyncWebCrawler() as crawler: html = (await crawler.arun(url)).fit_html pattern = RegexExtractionStrategy.generate_pattern( label="price", html=html, query="Prices in Malaysian Ringgit (e.g. RM1,299.00 or RM200)", llm_config=llm_cfg, ) json.dump(pattern, pattern_file.open("w", encoding="utf-8"), indent=2) print("Saved pattern:", pattern_file) # ── 3-B. extraction pass – zero LLM calls strategy = RegexExtractionStrategy(custom=pattern) config = CrawlerRunConfig(extraction_strategy=strategy, delay_before_return_html=3) async with AsyncWebCrawler() as crawler: result: CrawlResult = await crawler.arun(url, config=config) if result.success: data = json.loads(result.extracted_content) for d in data[:15]: print(f" {d['value']}") print(f"... total matches: {len(data)}") else: print(" !!! crawl failed") # ──────────────────────────────────────────────────────────────────────────── # Entrypoint # ──────────────────────────────────────────────────────────────────────────── async def main() -> None: # await demo_regex_default() # await demo_regex_custom() await demo_regex_generate_pattern() if __name__ == "__main__": asyncio.run(main())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/hello_world.py
docs/examples/hello_world.py
import asyncio from crawl4ai import ( AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, DefaultMarkdownGenerator, PruningContentFilter, CrawlResult ) async def main(): browser_config = BrowserConfig( headless=False, verbose=True, ) async with AsyncWebCrawler(config=browser_config) as crawler: crawler_config = CrawlerRunConfig( markdown_generator=DefaultMarkdownGenerator( content_filter=PruningContentFilter() ), ) result: CrawlResult = await crawler.arun( url="https://www.helloworld.org", config=crawler_config ) print(result.markdown.raw_markdown[:500]) if __name__ == "__main__": asyncio.run(main())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/llm_extraction_openai_pricing.py
docs/examples/llm_extraction_openai_pricing.py
import asyncio from pydantic import BaseModel, Field from crawl4ai import AsyncWebCrawler, CrawlerRunConfig, LLMConfig, BrowserConfig, CacheMode from crawl4ai.extraction_strategy import LLMExtractionStrategy from typing import Dict import os class OpenAIModelFee(BaseModel): model_name: str = Field(..., description="Name of the OpenAI model.") input_fee: str = Field(..., description="Fee for input token for the OpenAI model.") output_fee: str = Field(..., description="Fee for output token for the OpenAI model.") async def extract_structured_data_using_llm(provider: str, api_token: str = None, extra_headers: Dict[str, str] = None): print(f"\n--- Extracting Structured Data with {provider} ---") if api_token is None and provider != "ollama": print(f"API token is required for {provider}. Skipping this example.") return browser_config = BrowserConfig(headless=True) extra_args = {"temperature": 0, "top_p": 0.9, "max_tokens": 2000} if extra_headers: extra_args["extra_headers"] = extra_headers crawler_config = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, word_count_threshold=1, page_timeout=80000, extraction_strategy=LLMExtractionStrategy( llm_config=LLMConfig(provider=provider, api_token=api_token), schema=OpenAIModelFee.model_json_schema(), extraction_type="schema", instruction="""From the crawled content, extract all mentioned model names along with their fees for input and output tokens. Do not miss any models in the entire content.""", extra_args=extra_args, ), ) async with AsyncWebCrawler(config=browser_config) as crawler: result = await crawler.arun( url="https://openai.com/api/pricing/", config=crawler_config ) print(result.extracted_content) if __name__ == "__main__": asyncio.run( extract_structured_data_using_llm( provider="openai/gpt-4o", api_token=os.getenv("OPENAI_API_KEY") ) )
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/rest_call.py
docs/examples/rest_call.py
import requests, base64, os data = { "urls": ["https://www.nbcnews.com/business"], "screenshot": True, } response = requests.post("https://crawl4ai.com/crawl", json=data) result = response.json()["results"][0] print(result.keys()) # dict_keys(['url', 'html', 'success', 'cleaned_html', 'media', # 'links', 'screenshot', 'markdown', 'extracted_content', # 'metadata', 'error_message']) with open("screenshot.png", "wb") as f: f.write(base64.b64decode(result["screenshot"])) # Example of filtering the content using CSS selectors data = { "urls": ["https://www.nbcnews.com/business"], "css_selector": "article", "screenshot": True, } # Example of executing a JS script on the page before extracting the content data = { "urls": ["https://www.nbcnews.com/business"], "screenshot": True, "js": [ """ const loadMoreButton = Array.from(document.querySelectorAll('button')). find(button => button.textContent.includes('Load More')); loadMoreButton && loadMoreButton.click(); """ ], } # Example of using a custom extraction strategy data = { "urls": ["https://www.nbcnews.com/business"], "extraction_strategy": "CosineStrategy", "extraction_strategy_args": {"semantic_filter": "inflation rent prices"}, } # Example of using LLM to extract content data = { "urls": ["https://www.nbcnews.com/business"], "extraction_strategy": "LLMExtractionStrategy", "extraction_strategy_args": { "provider": "groq/llama3-8b-8192", "api_token": os.environ.get("GROQ_API_KEY"), "instruction": """I am interested in only financial news, and translate them in French.""", }, }
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/ssl_example.py
docs/examples/ssl_example.py
"""Example showing how to work with SSL certificates in Crawl4AI.""" import asyncio import os from crawl4ai import AsyncWebCrawler, CrawlerRunConfig, CacheMode # Create tmp directory if it doesn't exist parent_dir = os.path.dirname( os.path.dirname(os.path.dirname(os.path.abspath(__file__))) ) tmp_dir = os.path.join(parent_dir, "tmp") os.makedirs(tmp_dir, exist_ok=True) async def main(): # Configure crawler to fetch SSL certificate config = CrawlerRunConfig( fetch_ssl_certificate=True, cache_mode=CacheMode.BYPASS, # Bypass cache to always get fresh certificates ) async with AsyncWebCrawler() as crawler: result = await crawler.arun(url="https://example.com", config=config) if result.success and result.ssl_certificate: cert = result.ssl_certificate # 1. Access certificate properties directly print("\nCertificate Information:") print(f"Issuer: {cert.issuer.get('CN', '')}") print(f"Valid until: {cert.valid_until}") print(f"Fingerprint: {cert.fingerprint}") # 2. Export certificate in different formats cert.to_json(os.path.join(tmp_dir, "certificate.json")) # For analysis print("\nCertificate exported to:") print(f"- JSON: {os.path.join(tmp_dir, 'certificate.json')}") pem_data = cert.to_pem( os.path.join(tmp_dir, "certificate.pem") ) # For web servers print(f"- PEM: {os.path.join(tmp_dir, 'certificate.pem')}") der_data = cert.to_der( os.path.join(tmp_dir, "certificate.der") ) # For Java apps print(f"- DER: {os.path.join(tmp_dir, 'certificate.der')}") if __name__ == "__main__": asyncio.run(main())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/stealth_test_simple.py
docs/examples/stealth_test_simple.py
""" Simple test to verify stealth mode is working """ import asyncio from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig async def test_stealth(): """Test stealth mode effectiveness""" # Test WITHOUT stealth print("=== WITHOUT Stealth ===") config1 = BrowserConfig( headless=False, enable_stealth=False ) async with AsyncWebCrawler(config=config1) as crawler: result = await crawler.arun( url="https://bot.sannysoft.com", config=CrawlerRunConfig( wait_until="networkidle", screenshot=True ) ) print(f"Success: {result.success}") # Take screenshot if result.screenshot: with open("without_stealth.png", "wb") as f: import base64 f.write(base64.b64decode(result.screenshot)) print("Screenshot saved: without_stealth.png") # Test WITH stealth print("\n=== WITH Stealth ===") config2 = BrowserConfig( headless=False, enable_stealth=True ) async with AsyncWebCrawler(config=config2) as crawler: result = await crawler.arun( url="https://bot.sannysoft.com", config=CrawlerRunConfig( wait_until="networkidle", screenshot=True ) ) print(f"Success: {result.success}") # Take screenshot if result.screenshot: with open("with_stealth.png", "wb") as f: import base64 f.write(base64.b64decode(result.screenshot)) print("Screenshot saved: with_stealth.png") print("\nCheck the screenshots to see the difference in bot detection results!") if __name__ == "__main__": asyncio.run(test_stealth())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/undetected_simple_demo.py
docs/examples/undetected_simple_demo.py
""" Simple Undetected Browser Demo Demonstrates the basic usage of undetected browser mode """ import asyncio from crawl4ai import ( AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, UndetectedAdapter ) from crawl4ai.async_crawler_strategy import AsyncPlaywrightCrawlerStrategy async def crawl_with_regular_browser(url: str): """Crawl with regular browser""" print("\n[Regular Browser Mode]") browser_config = BrowserConfig( headless=False, verbose=True, ) async with AsyncWebCrawler(config=browser_config) as crawler: result = await crawler.arun( url=url, config=CrawlerRunConfig( delay_before_return_html=2.0 ) ) print(f"Success: {result.success}") print(f"Status: {result.status_code}") print(f"Content length: {len(result.markdown.raw_markdown)}") # Check for bot detection keywords content = result.markdown.raw_markdown.lower() if any(word in content for word in ["cloudflare", "checking your browser", "please wait"]): print("⚠️ Bot detection triggered!") else: print("βœ… Page loaded successfully") return result async def crawl_with_undetected_browser(url: str): """Crawl with undetected browser""" print("\n[Undetected Browser Mode]") browser_config = BrowserConfig( headless=False, verbose=True, ) # Create undetected adapter and strategy undetected_adapter = UndetectedAdapter() crawler_strategy = AsyncPlaywrightCrawlerStrategy( browser_config=browser_config, browser_adapter=undetected_adapter ) async with AsyncWebCrawler( crawler_strategy=crawler_strategy, config=browser_config ) as crawler: result = await crawler.arun( url=url, config=CrawlerRunConfig( delay_before_return_html=2.0 ) ) print(f"Success: {result.success}") print(f"Status: {result.status_code}") print(f"Content length: {len(result.markdown.raw_markdown)}") # Check for bot detection keywords content = result.markdown.raw_markdown.lower() if any(word in content for word in ["cloudflare", "checking your browser", "please wait"]): print("⚠️ Bot detection triggered!") else: print("βœ… Page loaded successfully") return result async def main(): """Demo comparing regular vs undetected modes""" print("πŸ€– Crawl4AI Undetected Browser Demo") print("="*50) # Test URLs - you can change these test_urls = [ "https://www.example.com", # Simple site "https://httpbin.org/headers", # Shows request headers ] for url in test_urls: print(f"\nπŸ“ Testing URL: {url}") # Test with regular browser regular_result = await crawl_with_regular_browser(url) # Small delay await asyncio.sleep(2) # Test with undetected browser undetected_result = await crawl_with_undetected_browser(url) # Compare results print(f"\nπŸ“Š Comparison for {url}:") print(f"Regular browser content: {len(regular_result.markdown.raw_markdown)} chars") print(f"Undetected browser content: {len(undetected_result.markdown.raw_markdown)} chars") if url == "https://httpbin.org/headers": # Show headers for comparison print("\nHeaders seen by server:") print("Regular:", regular_result.markdown.raw_markdown[:500]) print("\nUndetected:", undetected_result.markdown.raw_markdown[:500]) if __name__ == "__main__": asyncio.run(main())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/quickstart_examples_set_1.py
docs/examples/quickstart_examples_set_1.py
import asyncio import os import json import base64 from pathlib import Path from typing import List from crawl4ai import ProxyConfig from crawl4ai import AsyncWebCrawler, CrawlerRunConfig, CacheMode, CrawlResult from crawl4ai import RoundRobinProxyStrategy from crawl4ai import JsonCssExtractionStrategy, LLMExtractionStrategy from crawl4ai import LLMConfig from crawl4ai import PruningContentFilter, BM25ContentFilter from crawl4ai import DefaultMarkdownGenerator from crawl4ai import BFSDeepCrawlStrategy, DomainFilter, FilterChain from crawl4ai import BrowserConfig __cur_dir__ = Path(__file__).parent async def demo_basic_crawl(): """Basic web crawling with markdown generation""" print("\n=== 1. Basic Web Crawling ===") async with AsyncWebCrawler(config = BrowserConfig( viewport_height=800, viewport_width=1200, headless=True, verbose=True, )) as crawler: results: List[CrawlResult] = await crawler.arun( url="https://news.ycombinator.com/" ) for i, result in enumerate(results): print(f"Result {i + 1}:") print(f"Success: {result.success}") if result.success: print(f"Markdown length: {len(result.markdown.raw_markdown)} chars") print(f"First 100 chars: {result.markdown.raw_markdown[:100]}...") else: print("Failed to crawl the URL") async def demo_parallel_crawl(): """Crawl multiple URLs in parallel""" print("\n=== 2. Parallel Crawling ===") urls = [ "https://news.ycombinator.com/", "https://example.com/", "https://httpbin.org/html", ] async with AsyncWebCrawler() as crawler: results: List[CrawlResult] = await crawler.arun_many( urls=urls, ) print(f"Crawled {len(results)} URLs in parallel:") for i, result in enumerate(results): print( f" {i + 1}. {result.url} - {'Success' if result.success else 'Failed'}" ) async def demo_fit_markdown(): """Generate focused markdown with LLM content filter""" print("\n=== 3. Fit Markdown with LLM Content Filter ===") async with AsyncWebCrawler() as crawler: result: CrawlResult = await crawler.arun( url = "https://en.wikipedia.org/wiki/Python_(programming_language)", config=CrawlerRunConfig( markdown_generator=DefaultMarkdownGenerator( content_filter=PruningContentFilter() ) ), ) # Print stats and save the fit markdown print(f"Raw: {len(result.markdown.raw_markdown)} chars") print(f"Fit: {len(result.markdown.fit_markdown)} chars") async def demo_llm_structured_extraction_no_schema(): # Create a simple LLM extraction strategy (no schema required) extraction_strategy = LLMExtractionStrategy( llm_config=LLMConfig( provider="groq/qwen-2.5-32b", api_token="env:GROQ_API_KEY", ), instruction="This is news.ycombinator.com, extract all news, and for each, I want title, source url, number of comments.", extract_type="schema", schema="{title: string, url: string, comments: int}", extra_args={ "temperature": 0.0, "max_tokens": 4096, }, verbose=True, ) config = CrawlerRunConfig(extraction_strategy=extraction_strategy) async with AsyncWebCrawler() as crawler: results: List[CrawlResult] = await crawler.arun( "https://news.ycombinator.com/", config=config ) for result in results: print(f"URL: {result.url}") print(f"Success: {result.success}") if result.success: data = json.loads(result.extracted_content) print(json.dumps(data, indent=2)) else: print("Failed to extract structured data") async def demo_css_structured_extraction_no_schema(): """Extract structured data using CSS selectors""" print("\n=== 5. CSS-Based Structured Extraction ===") # Sample HTML for schema generation (one-time cost) sample_html = """ <div class="body-post clear"> <a class="story-link" href="https://thehackernews.com/2025/04/malicious-python-packages-on-pypi.html"> <div class="clear home-post-box cf"> <div class="home-img clear"> <div class="img-ratio"> <img alt="..." src="..."> </div> </div> <div class="clear home-right"> <h2 class="home-title">Malicious Python Packages on PyPI Downloaded 39,000+ Times, Steal Sensitive Data</h2> <div class="item-label"> <span class="h-datetime"><i class="icon-font icon-calendar">ξ ‚</i>Apr 05, 2025</span> <span class="h-tags">Malware / Supply Chain Attack</span> </div> <div class="home-desc"> Cybersecurity researchers have...</div> </div> </div> </a> </div> """ # Check if schema file exists schema_file_path = f"{__cur_dir__}/tmp/schema.json" if os.path.exists(schema_file_path): with open(schema_file_path, "r") as f: schema = json.load(f) else: # Generate schema using LLM (one-time setup) schema = JsonCssExtractionStrategy.generate_schema( html=sample_html, llm_config=LLMConfig( provider="groq/qwen-2.5-32b", api_token="env:GROQ_API_KEY", ), query="From https://thehackernews.com/, I have shared a sample of one news div with a title, date, and description. Please generate a schema for this news div.", ) print(f"Generated schema: {json.dumps(schema, indent=2)}") # Save the schema to a file , and use it for future extractions, in result for such extraction you will call LLM once with open(f"{__cur_dir__}/tmp/schema.json", "w") as f: json.dump(schema, f, indent=2) # Create no-LLM extraction strategy with the generated schema extraction_strategy = JsonCssExtractionStrategy(schema) config = CrawlerRunConfig(extraction_strategy=extraction_strategy) # Use the fast CSS extraction (no LLM calls during extraction) async with AsyncWebCrawler() as crawler: results: List[CrawlResult] = await crawler.arun( "https://thehackernews.com", config=config ) for result in results: print(f"URL: {result.url}") print(f"Success: {result.success}") if result.success: data = json.loads(result.extracted_content) print(json.dumps(data, indent=2)) else: print("Failed to extract structured data") async def demo_deep_crawl(): """Deep crawling with BFS strategy""" print("\n=== 6. Deep Crawling ===") filter_chain = FilterChain([DomainFilter(allowed_domains=["crawl4ai.com"])]) deep_crawl_strategy = BFSDeepCrawlStrategy( max_depth=1, max_pages=5, filter_chain=filter_chain ) async with AsyncWebCrawler() as crawler: results: List[CrawlResult] = await crawler.arun( url="https://docs.crawl4ai.com", config=CrawlerRunConfig(deep_crawl_strategy=deep_crawl_strategy), ) print(f"Deep crawl returned {len(results)} pages:") for i, result in enumerate(results): depth = result.metadata.get("depth", "unknown") print(f" {i + 1}. {result.url} (Depth: {depth})") async def demo_js_interaction(): """Execute JavaScript to load more content""" print("\n=== 7. JavaScript Interaction ===") # A simple page that needs JS to reveal content async with AsyncWebCrawler(config=BrowserConfig(headless=False)) as crawler: # Initial load news_schema = { "name": "news", "baseSelector": "tr.athing", "fields": [ { "name": "title", "selector": "span.titleline", "type": "text", } ], } results: List[CrawlResult] = await crawler.arun( url="https://news.ycombinator.com", config=CrawlerRunConfig( session_id="hn_session", # Keep session extraction_strategy=JsonCssExtractionStrategy(schema=news_schema), ), ) news = [] for result in results: if result.success: data = json.loads(result.extracted_content) news.extend(data) print(json.dumps(data, indent=2)) else: print("Failed to extract structured data") print(f"Initial items: {len(news)}") # Click "More" link more_config = CrawlerRunConfig( js_code="document.querySelector('a.morelink').click();", js_only=True, # Continue in same page session_id="hn_session", # Keep session extraction_strategy=JsonCssExtractionStrategy( schema=news_schema, ), ) result: List[CrawlResult] = await crawler.arun( url="https://news.ycombinator.com", config=more_config ) # Extract new items for result in results: if result.success: data = json.loads(result.extracted_content) news.extend(data) print(json.dumps(data, indent=2)) else: print("Failed to extract structured data") print(f"Total items: {len(news)}") async def demo_media_and_links(): """Extract media and links from a page""" print("\n=== 8. Media and Links Extraction ===") async with AsyncWebCrawler() as crawler: result: List[CrawlResult] = await crawler.arun("https://en.wikipedia.org/wiki/Main_Page") for i, result in enumerate(result): # Extract and save all images images = result.media.get("images", []) print(f"Found {len(images)} images") # Extract and save all links (internal and external) internal_links = result.links.get("internal", []) external_links = result.links.get("external", []) print(f"Found {len(internal_links)} internal links") print(f"Found {len(external_links)} external links") # Print some of the images and links for image in images[:3]: print(f"Image: {image['src']}") for link in internal_links[:3]: print(f"Internal link: {link['href']}") for link in external_links[:3]: print(f"External link: {link['href']}") # # Save everything to files with open(f"{__cur_dir__}/tmp/images.json", "w") as f: json.dump(images, f, indent=2) with open(f"{__cur_dir__}/tmp/links.json", "w") as f: json.dump( {"internal": internal_links, "external": external_links}, f, indent=2, ) async def demo_screenshot_and_pdf(): """Capture screenshot and PDF of a page""" print("\n=== 9. Screenshot and PDF Capture ===") async with AsyncWebCrawler() as crawler: result: List[CrawlResult] = await crawler.arun( # url="https://example.com", url="https://en.wikipedia.org/wiki/Giant_anteater", config=CrawlerRunConfig(screenshot=True, pdf=True), ) for i, result in enumerate(result): # if result.screenshot_data: if result.screenshot: # Save screenshot screenshot_path = f"{__cur_dir__}/tmp/example_screenshot.png" with open(screenshot_path, "wb") as f: f.write(base64.b64decode(result.screenshot)) print(f"Screenshot saved to {screenshot_path}") # if result.pdf_data: if result.pdf: # Save PDF pdf_path = f"{__cur_dir__}/tmp/example.pdf" with open(pdf_path, "wb") as f: f.write(result.pdf) print(f"PDF saved to {pdf_path}") async def demo_proxy_rotation(): """Proxy rotation for multiple requests""" print("\n=== 10. Proxy Rotation ===") # Example proxies (replace with real ones) proxies = [ ProxyConfig(server="http://proxy1.example.com:8080"), ProxyConfig(server="http://proxy2.example.com:8080"), ] proxy_strategy = RoundRobinProxyStrategy(proxies) print(f"Using {len(proxies)} proxies in rotation") print( "Note: This example uses placeholder proxies - replace with real ones to test" ) async with AsyncWebCrawler() as crawler: config = CrawlerRunConfig( proxy_rotation_strategy=proxy_strategy ) # In a real scenario, these would be run and the proxies would rotate print("In a real scenario, requests would rotate through the available proxies") async def demo_raw_html_and_file(): """Process raw HTML and local files""" print("\n=== 11. Raw HTML and Local Files ===") raw_html = """ <html><body> <h1>Sample Article</h1> <p>This is sample content for testing Crawl4AI's raw HTML processing.</p> </body></html> """ # Save to file file_path = Path("docs/examples/tmp/sample.html").absolute() with open(file_path, "w") as f: f.write(raw_html) async with AsyncWebCrawler() as crawler: # Crawl raw HTML raw_result = await crawler.arun( url="raw:" + raw_html, config=CrawlerRunConfig(cache_mode=CacheMode.BYPASS) ) print("Raw HTML processing:") print(f" Markdown: {raw_result.markdown.raw_markdown[:50]}...") # Crawl local file file_result = await crawler.arun( url=f"file://{file_path}", config=CrawlerRunConfig(cache_mode=CacheMode.BYPASS), ) print("\nLocal file processing:") print(f" Markdown: {file_result.markdown.raw_markdown[:50]}...") # Clean up os.remove(file_path) print(f"Processed both raw HTML and local file ({file_path})") async def main(): """Run all demo functions sequentially""" print("=== Comprehensive Crawl4AI Demo ===") print("Note: Some examples require API keys or other configurations") # Run all demos await demo_basic_crawl() await demo_parallel_crawl() await demo_fit_markdown() await demo_llm_structured_extraction_no_schema() await demo_css_structured_extraction_no_schema() await demo_deep_crawl() await demo_js_interaction() await demo_media_and_links() await demo_screenshot_and_pdf() # # await demo_proxy_rotation() await demo_raw_html_and_file() # Clean up any temp files that may have been created print("\n=== Demo Complete ===") print("Check for any generated files (screenshots, PDFs) in the current directory") if __name__ == "__main__": asyncio.run(main())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/scraping_strategies_performance.py
docs/examples/scraping_strategies_performance.py
import time, re from crawl4ai.content_scraping_strategy import LXMLWebScrapingStrategy # WebScrapingStrategy is now an alias for LXMLWebScrapingStrategy import time import functools from collections import defaultdict class TimingStats: def __init__(self): self.stats = defaultdict(lambda: defaultdict(lambda: {"calls": 0, "total_time": 0})) def add(self, strategy_name, func_name, elapsed): self.stats[strategy_name][func_name]["calls"] += 1 self.stats[strategy_name][func_name]["total_time"] += elapsed def report(self): for strategy_name, funcs in self.stats.items(): print(f"\n{strategy_name} Timing Breakdown:") print("-" * 60) print(f"{'Function':<30} {'Calls':<10} {'Total(s)':<10} {'Avg(ms)':<10}") print("-" * 60) for func, data in sorted(funcs.items(), key=lambda x: x[1]["total_time"], reverse=True): avg_ms = (data["total_time"] / data["calls"]) * 1000 print(f"{func:<30} {data['calls']:<10} {data['total_time']:<10.3f} {avg_ms:<10.2f}") timing_stats = TimingStats() # Modify timing decorator def timing_decorator(strategy_name): def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): start = time.time() result = func(*args, **kwargs) elapsed = time.time() - start timing_stats.add(strategy_name, func.__name__, elapsed) return result return wrapper return decorator # Modified decorator application def apply_decorators(cls, method_name, strategy_name): try: original_method = getattr(cls, method_name) decorated_method = timing_decorator(strategy_name)(original_method) setattr(cls, method_name, decorated_method) except AttributeError: print(f"Method {method_name} not found in class {cls.__name__}.") # Apply to key methods methods_to_profile = [ '_scrap', # 'process_element', '_process_element', 'process_image', ] # Apply decorators to both strategies for strategy, name in [(LXMLWebScrapingStrategy, "LXML")]: for method in methods_to_profile: apply_decorators(strategy, method, name) def generate_large_html(n_elements=1000): html = ['<!DOCTYPE html><html><head></head><body>'] for i in range(n_elements): html.append(f''' <div class="article"> <h2>Heading {i}</h2> <div> <div> <p>This is paragraph {i} with some content and a <a href="http://example.com/{i}">link</a></p> </div> </div> <img src="image{i}.jpg" alt="Image {i}"> <ul> <li>List item {i}.1</li> <li>List item {i}.2</li> </ul> </div> ''') html.append('</body></html>') return ''.join(html) def test_scraping(): # Initialize both scrapers original_scraper = LXMLWebScrapingStrategy() selected_scraper = LXMLWebScrapingStrategy() # Generate test HTML print("Generating HTML...") html = generate_large_html(5000) print(f"HTML Size: {len(html)/1024:.2f} KB") # Time the scraping print("\nStarting scrape...") start_time = time.time() kwargs = { "url": "http://example.com", "html": html, "word_count_threshold": 5, "keep_data_attributes": True } t1 = time.perf_counter() result_selected = selected_scraper.scrap(**kwargs) t2 = time.perf_counter() result_original = original_scraper.scrap(**kwargs) t3 = time.perf_counter() elapsed = t3 - start_time print(f"\nScraping completed in {elapsed:.2f} seconds") timing_stats.report() # Print stats of LXML output print("\Turbo Output:") print(f"\nExtracted links: {len(result_selected.links.internal) + len(result_selected.links.external)}") print(f"Extracted images: {len(result_selected.media.images)}") print(f"Clean HTML size: {len(result_selected.cleaned_html)/1024:.2f} KB") print(f"Scraping time: {t2 - t1:.2f} seconds") # Print stats of original output print("\nOriginal Output:") print(f"\nExtracted links: {len(result_original.links.internal) + len(result_original.links.external)}") print(f"Extracted images: {len(result_original.media.images)}") print(f"Clean HTML size: {len(result_original.cleaned_html)/1024:.2f} KB") print(f"Scraping time: {t3 - t1:.2f} seconds") if __name__ == "__main__": test_scraping()
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/quickstart_examples_set_2.py
docs/examples/quickstart_examples_set_2.py
import os, sys from crawl4ai.types import LLMConfig sys.path.append( os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) ) import asyncio import time import json import re from typing import Dict from bs4 import BeautifulSoup from pydantic import BaseModel, Field from crawl4ai import AsyncWebCrawler, CacheMode, BrowserConfig, CrawlerRunConfig from crawl4ai.markdown_generation_strategy import DefaultMarkdownGenerator from crawl4ai.content_filter_strategy import PruningContentFilter from crawl4ai import ( JsonCssExtractionStrategy, LLMExtractionStrategy, ) __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) print("Crawl4AI: Advanced Web Crawling and Data Extraction") print("GitHub Repository: https://github.com/unclecode/crawl4ai") print("Twitter: @unclecode") print("Website: https://crawl4ai.com") # Basic Example - Simple Crawl async def simple_crawl(): print("\n--- Basic Usage ---") browser_config = BrowserConfig(headless=True) crawler_config = CrawlerRunConfig(cache_mode=CacheMode.BYPASS) async with AsyncWebCrawler(config=browser_config) as crawler: result = await crawler.arun( url="https://www.nbcnews.com/business", config=crawler_config ) print(result.markdown[:500]) async def clean_content(): crawler_config = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, excluded_tags=["nav", "footer", "aside"], remove_overlay_elements=True, markdown_generator=DefaultMarkdownGenerator( content_filter=PruningContentFilter( threshold=0.48, threshold_type="fixed", min_word_threshold=0 ), options={"ignore_links": True}, ), ) async with AsyncWebCrawler() as crawler: result = await crawler.arun( url="https://en.wikipedia.org/wiki/Apple", config=crawler_config, ) full_markdown_length = len(result.markdown.raw_markdown) fit_markdown_length = len(result.markdown.fit_markdown) print(f"Full Markdown Length: {full_markdown_length}") print(f"Fit Markdown Length: {fit_markdown_length}") async def link_analysis(): crawler_config = CrawlerRunConfig( cache_mode=CacheMode.ENABLED, exclude_external_links=True, exclude_social_media_links=True, ) async with AsyncWebCrawler() as crawler: result = await crawler.arun( url="https://www.nbcnews.com/business", config=crawler_config, ) print(f"Found {len(result.links['internal'])} internal links") print(f"Found {len(result.links['external'])} external links") for link in result.links["internal"][:5]: print(f"Href: {link['href']}\nText: {link['text']}\n") # JavaScript Execution Example async def simple_example_with_running_js_code(): print("\n--- Executing JavaScript and Using CSS Selectors ---") browser_config = BrowserConfig(headless=True, java_script_enabled=True) crawler_config = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, js_code="const loadMoreButton = Array.from(document.querySelectorAll('button')).find(button => button.textContent.includes('Load More')); loadMoreButton && loadMoreButton.click();", # wait_for="() => { return Array.from(document.querySelectorAll('article.tease-card')).length > 10; }" ) async with AsyncWebCrawler(config=browser_config) as crawler: result = await crawler.arun( url="https://www.nbcnews.com/business", config=crawler_config ) print(result.markdown[:500]) # CSS Selector Example async def simple_example_with_css_selector(): print("\n--- Using CSS Selectors ---") browser_config = BrowserConfig(headless=True) crawler_config = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, css_selector=".wide-tease-item__description" ) async with AsyncWebCrawler(config=browser_config) as crawler: result = await crawler.arun( url="https://www.nbcnews.com/business", config=crawler_config ) print(result.markdown[:500]) async def media_handling(): crawler_config = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, exclude_external_images=True, screenshot=True ) async with AsyncWebCrawler() as crawler: result = await crawler.arun( url="https://www.nbcnews.com/business", config=crawler_config ) for img in result.media["images"][:5]: print(f"Image URL: {img['src']}, Alt: {img['alt']}, Score: {img['score']}") async def custom_hook_workflow(verbose=True): async with AsyncWebCrawler() as crawler: # Set a 'before_goto' hook to run custom code just before navigation crawler.crawler_strategy.set_hook( "before_goto", lambda page, context: print("[Hook] Preparing to navigate..."), ) # Perform the crawl operation result = await crawler.arun(url="https://crawl4ai.com") print(result.markdown.raw_markdown[:500].replace("\n", " -- ")) # Proxy Example async def use_proxy(): print("\n--- Using a Proxy ---") browser_config = BrowserConfig( headless=True, proxy_config={ "server": "http://proxy.example.com:8080", "username": "username", "password": "password", }, ) crawler_config = CrawlerRunConfig(cache_mode=CacheMode.BYPASS) async with AsyncWebCrawler(config=browser_config) as crawler: result = await crawler.arun( url="https://www.nbcnews.com/business", config=crawler_config ) if result.success: print(result.markdown[:500]) # Screenshot Example async def capture_and_save_screenshot(url: str, output_path: str): browser_config = BrowserConfig(headless=True) crawler_config = CrawlerRunConfig(cache_mode=CacheMode.BYPASS, screenshot=True) async with AsyncWebCrawler(config=browser_config) as crawler: result = await crawler.arun(url=url, config=crawler_config) if result.success and result.screenshot: import base64 screenshot_data = base64.b64decode(result.screenshot) with open(output_path, "wb") as f: f.write(screenshot_data) print(f"Screenshot saved successfully to {output_path}") else: print("Failed to capture screenshot") # LLM Extraction Example class OpenAIModelFee(BaseModel): model_name: str = Field(..., description="Name of the OpenAI model.") input_fee: str = Field(..., description="Fee for input token for the OpenAI model.") output_fee: str = Field( ..., description="Fee for output token for the OpenAI model." ) async def extract_structured_data_using_llm( provider: str, api_token: str = None, extra_headers: Dict[str, str] = None ): print(f"\n--- Extracting Structured Data with {provider} ---") if api_token is None and provider != "ollama": print(f"API token is required for {provider}. Skipping this example.") return browser_config = BrowserConfig(headless=True) extra_args = {"temperature": 0, "top_p": 0.9, "max_tokens": 2000} if extra_headers: extra_args["extra_headers"] = extra_headers crawler_config = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, word_count_threshold=1, page_timeout=80000, extraction_strategy=LLMExtractionStrategy( llm_config=LLMConfig(provider=provider,api_token=api_token), schema=OpenAIModelFee.model_json_schema(), extraction_type="schema", instruction="""From the crawled content, extract all mentioned model names along with their fees for input and output tokens. Do not miss any models in the entire content.""", extra_args=extra_args, ), ) async with AsyncWebCrawler(config=browser_config) as crawler: result = await crawler.arun( url="https://openai.com/api/pricing/", config=crawler_config ) print(result.extracted_content) # CSS Extraction Example async def extract_structured_data_using_css_extractor(): print("\n--- Using JsonCssExtractionStrategy for Fast Structured Output ---") schema = { "name": "KidoCode Courses", "baseSelector": "section.charge-methodology .framework-collection-item.w-dyn-item", "fields": [ { "name": "section_title", "selector": "h3.heading-50", "type": "text", }, { "name": "section_description", "selector": ".charge-content", "type": "text", }, { "name": "course_name", "selector": ".text-block-93", "type": "text", }, { "name": "course_description", "selector": ".course-content-text", "type": "text", }, { "name": "course_icon", "selector": ".image-92", "type": "attribute", "attribute": "src", }, ], } browser_config = BrowserConfig(headless=True, java_script_enabled=True) js_click_tabs = """ (async () => { const tabs = document.querySelectorAll("section.charge-methodology .tabs-menu-3 > div"); for(let tab of tabs) { tab.scrollIntoView(); tab.click(); await new Promise(r => setTimeout(r, 500)); } })(); """ crawler_config = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, extraction_strategy=JsonCssExtractionStrategy(schema), js_code=[js_click_tabs], delay_before_return_html=1 ) async with AsyncWebCrawler(config=browser_config) as crawler: result = await crawler.arun( url="https://www.kidocode.com/degrees/technology", config=crawler_config ) companies = json.loads(result.extracted_content) print(f"Successfully extracted {len(companies)} companies") print(json.dumps(companies[0], indent=2)) # Dynamic Content Examples - Method 1 async def crawl_dynamic_content_pages_method_1(): print("\n--- Advanced Multi-Page Crawling with JavaScript Execution ---") first_commit = "" async def on_execution_started(page, **kwargs): nonlocal first_commit try: while True: await page.wait_for_selector("li.Box-sc-g0xbh4-0 h4") commit = await page.query_selector("li.Box-sc-g0xbh4-0 h4") commit = await commit.evaluate("(element) => element.textContent") commit = re.sub(r"\s+", "", commit) if commit and commit != first_commit: first_commit = commit break await asyncio.sleep(0.5) except Exception as e: print(f"Warning: New content didn't appear after JavaScript execution: {e}") browser_config = BrowserConfig(headless=False, java_script_enabled=True) async with AsyncWebCrawler(config=browser_config) as crawler: crawler.crawler_strategy.set_hook("on_execution_started", on_execution_started) url = "https://github.com/microsoft/TypeScript/commits/main" session_id = "typescript_commits_session" all_commits = [] js_next_page = """ const button = document.querySelector('a[data-testid="pagination-next-button"]'); if (button) button.click(); """ for page in range(3): crawler_config = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, css_selector="li.Box-sc-g0xbh4-0", js_code=js_next_page if page > 0 else None, js_only=page > 0, session_id=session_id, ) result = await crawler.arun(url=url, config=crawler_config) assert result.success, f"Failed to crawl page {page + 1}" soup = BeautifulSoup(result.cleaned_html, "html.parser") commits = soup.select("li") all_commits.extend(commits) print(f"Page {page + 1}: Found {len(commits)} commits") print(f"Successfully crawled {len(all_commits)} commits across 3 pages") # Dynamic Content Examples - Method 2 async def crawl_dynamic_content_pages_method_2(): print("\n--- Advanced Multi-Page Crawling with JavaScript Execution ---") browser_config = BrowserConfig(headless=False, java_script_enabled=True) js_next_page_and_wait = """ (async () => { const getCurrentCommit = () => { const commits = document.querySelectorAll('li.Box-sc-g0xbh4-0 h4'); return commits.length > 0 ? commits[0].textContent.trim() : null; }; const initialCommit = getCurrentCommit(); const button = document.querySelector('a[data-testid="pagination-next-button"]'); if (button) button.click(); while (true) { await new Promise(resolve => setTimeout(resolve, 100)); const newCommit = getCurrentCommit(); if (newCommit && newCommit !== initialCommit) { break; } } })(); """ schema = { "name": "Commit Extractor", "baseSelector": "li.Box-sc-g0xbh4-0", "fields": [ { "name": "title", "selector": "h4.markdown-title", "type": "text", "transform": "strip", }, ], } async with AsyncWebCrawler(config=browser_config) as crawler: url = "https://github.com/microsoft/TypeScript/commits/main" session_id = "typescript_commits_session" all_commits = [] extraction_strategy = JsonCssExtractionStrategy(schema) for page in range(3): crawler_config = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, css_selector="li.Box-sc-g0xbh4-0", extraction_strategy=extraction_strategy, js_code=js_next_page_and_wait if page > 0 else None, js_only=page > 0, session_id=session_id, ) result = await crawler.arun(url=url, config=crawler_config) assert result.success, f"Failed to crawl page {page + 1}" commits = json.loads(result.extracted_content) all_commits.extend(commits) print(f"Page {page + 1}: Found {len(commits)} commits") print(f"Successfully crawled {len(all_commits)} commits across 3 pages") async def cosine_similarity_extraction(): from crawl4ai import CosineStrategy crawl_config = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, extraction_strategy=CosineStrategy( word_count_threshold=10, max_dist=0.2, # Maximum distance between two words linkage_method="ward", # Linkage method for hierarchical clustering (ward, complete, average, single) top_k=3, # Number of top keywords to extract sim_threshold=0.3, # Similarity threshold for clustering semantic_filter="McDonald's economic impact, American consumer trends", # Keywords to filter the content semantically using embeddings verbose=True, ), ) async with AsyncWebCrawler() as crawler: result = await crawler.arun( url="https://www.nbcnews.com/business/consumer/how-mcdonalds-e-coli-crisis-inflation-politics-reflect-american-story-rcna177156", config=crawl_config, ) print(json.loads(result.extracted_content)[:5]) # Browser Comparison async def crawl_custom_browser_type(): print("\n--- Browser Comparison ---") # Firefox browser_config_firefox = BrowserConfig(browser_type="firefox", headless=True) start = time.time() async with AsyncWebCrawler(config=browser_config_firefox) as crawler: result = await crawler.arun( url="https://www.example.com", config=CrawlerRunConfig(cache_mode=CacheMode.BYPASS), ) print("Firefox:", time.time() - start) print(result.markdown[:500]) # WebKit browser_config_webkit = BrowserConfig(browser_type="webkit", headless=True) start = time.time() async with AsyncWebCrawler(config=browser_config_webkit) as crawler: result = await crawler.arun( url="https://www.example.com", config=CrawlerRunConfig(cache_mode=CacheMode.BYPASS), ) print("WebKit:", time.time() - start) print(result.markdown[:500]) # Chromium (default) browser_config_chromium = BrowserConfig(browser_type="chromium", headless=True) start = time.time() async with AsyncWebCrawler(config=browser_config_chromium) as crawler: result = await crawler.arun( url="https://www.example.com", config=CrawlerRunConfig(cache_mode=CacheMode.BYPASS), ) print("Chromium:", time.time() - start) print(result.markdown[:500]) # Anti-Bot and User Simulation async def crawl_with_user_simulation(): browser_config = BrowserConfig( headless=True, user_agent_mode="random", user_agent_generator_config={"device_type": "mobile", "os_type": "android"}, ) crawler_config = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, magic=True, simulate_user=True, override_navigator=True, ) async with AsyncWebCrawler(config=browser_config) as crawler: result = await crawler.arun(url="YOUR-URL-HERE", config=crawler_config) print(result.markdown) async def ssl_certification(): # Configure crawler to fetch SSL certificate config = CrawlerRunConfig( fetch_ssl_certificate=True, cache_mode=CacheMode.BYPASS, # Bypass cache to always get fresh certificates ) async with AsyncWebCrawler() as crawler: result = await crawler.arun(url="https://example.com", config=config) if result.success and result.ssl_certificate: cert = result.ssl_certificate tmp_dir = os.path.join(__location__, "tmp") os.makedirs(tmp_dir, exist_ok=True) # 1. Access certificate properties directly print("\nCertificate Information:") print(f"Issuer: {cert.issuer.get('CN', '')}") print(f"Valid until: {cert.valid_until}") print(f"Fingerprint: {cert.fingerprint}") # 2. Export certificate in different formats cert.to_json(os.path.join(tmp_dir, "certificate.json")) # For analysis print("\nCertificate exported to:") print(f"- JSON: {os.path.join(tmp_dir, 'certificate.json')}") pem_data = cert.to_pem( os.path.join(tmp_dir, "certificate.pem") ) # For web servers print(f"- PEM: {os.path.join(tmp_dir, 'certificate.pem')}") der_data = cert.to_der( os.path.join(tmp_dir, "certificate.der") ) # For Java apps print(f"- DER: {os.path.join(tmp_dir, 'certificate.der')}") # Main execution async def main(): # Basic examples await simple_crawl() await simple_example_with_running_js_code() await simple_example_with_css_selector() # Advanced examples await extract_structured_data_using_css_extractor() await extract_structured_data_using_llm( "openai/gpt-4o", os.getenv("OPENAI_API_KEY") ) await crawl_dynamic_content_pages_method_1() await crawl_dynamic_content_pages_method_2() # Browser comparisons await crawl_custom_browser_type() # Screenshot example await capture_and_save_screenshot( "https://www.example.com", os.path.join(__location__, "tmp/example_screenshot.jpg") ) if __name__ == "__main__": asyncio.run(main())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/simple_anti_bot_examples.py
docs/examples/simple_anti_bot_examples.py
import asyncio from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, UndetectedAdapter from crawl4ai.async_crawler_strategy import AsyncPlaywrightCrawlerStrategy # Example 1: Stealth Mode async def stealth_mode_example(): browser_config = BrowserConfig( enable_stealth=True, headless=False ) async with AsyncWebCrawler(config=browser_config) as crawler: result = await crawler.arun("https://example.com") return result.html[:500] # Example 2: Undetected Browser async def undetected_browser_example(): browser_config = BrowserConfig( headless=False ) adapter = UndetectedAdapter() strategy = AsyncPlaywrightCrawlerStrategy( browser_config=browser_config, browser_adapter=adapter ) async with AsyncWebCrawler( crawler_strategy=strategy, config=browser_config ) as crawler: result = await crawler.arun("https://example.com") return result.html[:500] # Example 3: Both Combined async def combined_example(): browser_config = BrowserConfig( enable_stealth=True, headless=False ) adapter = UndetectedAdapter() strategy = AsyncPlaywrightCrawlerStrategy( browser_config=browser_config, browser_adapter=adapter ) async with AsyncWebCrawler( crawler_strategy=strategy, config=browser_config ) as crawler: result = await crawler.arun("https://example.com") return result.html[:500] # Run examples if __name__ == "__main__": asyncio.run(stealth_mode_example()) asyncio.run(undetected_browser_example()) asyncio.run(combined_example())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/virtual_scroll_example.py
docs/examples/virtual_scroll_example.py
""" Example of using the virtual scroll feature to capture content from pages with virtualized scrolling (like Twitter, Instagram, or other infinite scroll feeds). This example demonstrates virtual scroll with a local test server serving different types of scrolling behaviors from HTML files in the assets directory. """ import asyncio import os import http.server import socketserver import threading from pathlib import Path from crawl4ai import AsyncWebCrawler, CrawlerRunConfig, VirtualScrollConfig, CacheMode, BrowserConfig # Get the assets directory path ASSETS_DIR = Path(__file__).parent / "assets" class TestServer: """Simple HTTP server to serve our test HTML files""" def __init__(self, port=8080): self.port = port self.httpd = None self.server_thread = None async def start(self): """Start the test server""" Handler = http.server.SimpleHTTPRequestHandler # Save current directory and change to assets directory self.original_cwd = os.getcwd() os.chdir(ASSETS_DIR) # Try to find an available port for _ in range(10): try: self.httpd = socketserver.TCPServer(("", self.port), Handler) break except OSError: self.port += 1 if self.httpd is None: raise RuntimeError("Could not find available port") self.server_thread = threading.Thread(target=self.httpd.serve_forever) self.server_thread.daemon = True self.server_thread.start() # Give server time to start await asyncio.sleep(0.5) print(f"Test server started on http://localhost:{self.port}") return self.port def stop(self): """Stop the test server""" if self.httpd: self.httpd.shutdown() # Restore original directory if hasattr(self, 'original_cwd'): os.chdir(self.original_cwd) async def example_twitter_like_virtual_scroll(): """ Example 1: Twitter-like virtual scroll where content is REPLACED. This is the classic virtual scroll use case - only visible items exist in DOM. """ print("\n" + "="*60) print("EXAMPLE 1: Twitter-like Virtual Scroll") print("="*60) server = TestServer() port = await server.start() try: # Configure virtual scroll for Twitter-like timeline virtual_config = VirtualScrollConfig( container_selector="#timeline", # The scrollable container scroll_count=50, # Scroll up to 50 times to get all content scroll_by="container_height", # Scroll by container's height wait_after_scroll=0.3 # Wait 300ms after each scroll ) config = CrawlerRunConfig( virtual_scroll_config=virtual_config, cache_mode=CacheMode.BYPASS ) # TIP: Set headless=False to watch the scrolling happen! browser_config = BrowserConfig( headless=False, viewport={"width": 1280, "height": 800} ) async with AsyncWebCrawler(config=browser_config) as crawler: result = await crawler.arun( url=f"http://localhost:{port}/virtual_scroll_twitter_like.html", config=config ) # Count tweets captured import re tweets = re.findall(r'data-tweet-id="(\d+)"', result.html) unique_tweets = sorted(set(int(id) for id in tweets)) print(f"\nπŸ“Š Results:") print(f" Total HTML length: {len(result.html):,} characters") print(f" Tweets captured: {len(unique_tweets)} unique tweets") if unique_tweets: print(f" Tweet IDs range: {min(unique_tweets)} to {max(unique_tweets)}") print(f" Expected range: 0 to 499 (500 tweets total)") if len(unique_tweets) == 500: print(f" βœ… SUCCESS! All tweets captured!") else: print(f" ⚠️ Captured {len(unique_tweets)}/500 tweets") finally: server.stop() async def example_traditional_append_scroll(): """ Example 2: Traditional infinite scroll where content is APPENDED. No virtual scroll needed - all content stays in DOM. """ print("\n" + "="*60) print("EXAMPLE 2: Traditional Append-Only Scroll") print("="*60) server = TestServer() port = await server.start() try: # Configure virtual scroll virtual_config = VirtualScrollConfig( container_selector=".posts-container", scroll_count=15, # Less scrolls needed since content accumulates scroll_by=500, # Scroll by 500 pixels wait_after_scroll=0.4 ) config = CrawlerRunConfig( virtual_scroll_config=virtual_config, cache_mode=CacheMode.BYPASS ) async with AsyncWebCrawler() as crawler: result = await crawler.arun( url=f"http://localhost:{port}/virtual_scroll_append_only.html", config=config ) # Count posts import re posts = re.findall(r'data-post-id="(\d+)"', result.html) unique_posts = sorted(set(int(id) for id in posts)) print(f"\nπŸ“Š Results:") print(f" Total HTML length: {len(result.html):,} characters") print(f" Posts captured: {len(unique_posts)} unique posts") if unique_posts: print(f" Post IDs range: {min(unique_posts)} to {max(unique_posts)}") print(f" ℹ️ Note: This page appends content, so virtual scroll") print(f" just helps trigger more loads. All content stays in DOM.") finally: server.stop() async def example_instagram_grid(): """ Example 3: Instagram-like grid with virtual scroll. Grid layout where only visible rows are rendered. """ print("\n" + "="*60) print("EXAMPLE 3: Instagram Grid Virtual Scroll") print("="*60) server = TestServer() port = await server.start() try: # Configure for grid layout virtual_config = VirtualScrollConfig( container_selector=".feed-container", # Container with the grid scroll_count=100, # Many scrolls for 999 posts scroll_by="container_height", wait_after_scroll=0.2 # Faster scrolling for grid ) config = CrawlerRunConfig( virtual_scroll_config=virtual_config, cache_mode=CacheMode.BYPASS, screenshot=True # Take a screenshot of the final grid ) # Show browser for this visual example browser_config = BrowserConfig( headless=False, viewport={"width": 1200, "height": 900} ) async with AsyncWebCrawler(config=browser_config) as crawler: result = await crawler.arun( url=f"http://localhost:{port}/virtual_scroll_instagram_grid.html", config=config ) # Count posts in grid import re posts = re.findall(r'data-post-id="(\d+)"', result.html) unique_posts = sorted(set(int(id) for id in posts)) print(f"\nπŸ“Š Results:") print(f" Posts in grid: {len(unique_posts)} unique posts") if unique_posts: print(f" Post IDs range: {min(unique_posts)} to {max(unique_posts)}") print(f" Expected: 0 to 998 (999 posts total)") # Save screenshot if result.screenshot: import base64 with open("instagram_grid_result.png", "wb") as f: f.write(base64.b64decode(result.screenshot)) print(f" πŸ“Έ Screenshot saved as instagram_grid_result.png") finally: server.stop() async def example_mixed_content(): """ Example 4: News feed with mixed behavior. Featured articles stay (no virtual scroll), regular articles are virtualized. """ print("\n" + "="*60) print("EXAMPLE 4: News Feed with Mixed Behavior") print("="*60) server = TestServer() port = await server.start() try: # Configure virtual scroll virtual_config = VirtualScrollConfig( container_selector="#newsContainer", scroll_count=25, scroll_by="container_height", wait_after_scroll=0.3 ) config = CrawlerRunConfig( virtual_scroll_config=virtual_config, cache_mode=CacheMode.BYPASS ) async with AsyncWebCrawler() as crawler: result = await crawler.arun( url=f"http://localhost:{port}/virtual_scroll_news_feed.html", config=config ) # Count different types of articles import re featured = re.findall(r'data-article-id="featured-\d+"', result.html) regular = re.findall(r'data-article-id="article-(\d+)"', result.html) print(f"\nπŸ“Š Results:") print(f" Featured articles: {len(set(featured))} (always visible)") print(f" Regular articles: {len(set(regular))} unique articles") if regular: regular_ids = sorted(set(int(id) for id in regular)) print(f" Regular article IDs: {min(regular_ids)} to {max(regular_ids)}") print(f" ℹ️ Note: Featured articles stay in DOM, only regular") print(f" articles are replaced during virtual scroll") finally: server.stop() async def compare_with_without_virtual_scroll(): """ Comparison: Show the difference between crawling with and without virtual scroll. """ print("\n" + "="*60) print("COMPARISON: With vs Without Virtual Scroll") print("="*60) server = TestServer() port = await server.start() try: url = f"http://localhost:{port}/virtual_scroll_twitter_like.html" # First, crawl WITHOUT virtual scroll print("\n1️⃣ Crawling WITHOUT virtual scroll...") async with AsyncWebCrawler() as crawler: config_normal = CrawlerRunConfig(cache_mode=CacheMode.BYPASS) result_normal = await crawler.arun(url=url, config=config_normal) # Count items import re tweets_normal = len(set(re.findall(r'data-tweet-id="(\d+)"', result_normal.html))) # Then, crawl WITH virtual scroll print("2️⃣ Crawling WITH virtual scroll...") virtual_config = VirtualScrollConfig( container_selector="#timeline", scroll_count=50, scroll_by="container_height", wait_after_scroll=0.2 ) config_virtual = CrawlerRunConfig( virtual_scroll_config=virtual_config, cache_mode=CacheMode.BYPASS ) async with AsyncWebCrawler() as crawler: result_virtual = await crawler.arun(url=url, config=config_virtual) # Count items tweets_virtual = len(set(re.findall(r'data-tweet-id="(\d+)"', result_virtual.html))) # Compare results print(f"\nπŸ“Š Comparison Results:") print(f" Without virtual scroll: {tweets_normal} tweets (only initial visible)") print(f" With virtual scroll: {tweets_virtual} tweets (all content captured)") print(f" Improvement: {tweets_virtual / tweets_normal if tweets_normal > 0 else 'N/A':.1f}x more content!") print(f"\n HTML size without: {len(result_normal.html):,} characters") print(f" HTML size with: {len(result_virtual.html):,} characters") finally: server.stop() if __name__ == "__main__": print(""" ╔════════════════════════════════════════════════════════════╗ β•‘ Virtual Scroll Examples for Crawl4AI β•‘ β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β• These examples demonstrate different virtual scroll scenarios: 1. Twitter-like (content replaced) - Classic virtual scroll 2. Traditional append - Content accumulates 3. Instagram grid - Visual grid layout 4. Mixed behavior - Some content stays, some virtualizes Starting examples... """) # Run all examples asyncio.run(example_twitter_like_virtual_scroll()) asyncio.run(example_traditional_append_scroll()) asyncio.run(example_instagram_grid()) asyncio.run(example_mixed_content()) asyncio.run(compare_with_without_virtual_scroll()) print("\nβœ… All examples completed!") print("\nTIP: Set headless=False in BrowserConfig to watch the scrolling in action!")
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/table_extraction_example.py
docs/examples/table_extraction_example.py
""" Example: Using Table Extraction Strategies in Crawl4AI This example demonstrates how to use different table extraction strategies to extract tables from web pages. """ import asyncio import pandas as pd from crawl4ai import ( AsyncWebCrawler, CrawlerRunConfig, CacheMode, DefaultTableExtraction, NoTableExtraction, TableExtractionStrategy ) from typing import Dict, List, Any async def example_default_extraction(): """Example 1: Using default table extraction (automatic).""" print("\n" + "="*50) print("Example 1: Default Table Extraction") print("="*50) async with AsyncWebCrawler() as crawler: # No need to specify table_extraction - uses DefaultTableExtraction automatically config = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, table_score_threshold=7 # Adjust sensitivity (default: 7) ) result = await crawler.arun( "https://en.wikipedia.org/wiki/List_of_countries_by_GDP_(nominal)", config=config ) if result.success and result.tables: print(f"Found {len(result.tables)} tables") # Convert first table to pandas DataFrame if result.tables: first_table = result.tables[0] df = pd.DataFrame( first_table['rows'], columns=first_table['headers'] if first_table['headers'] else None ) print(f"\nFirst table preview:") print(df.head()) print(f"Shape: {df.shape}") async def example_custom_configuration(): """Example 2: Custom table extraction configuration.""" print("\n" + "="*50) print("Example 2: Custom Table Configuration") print("="*50) async with AsyncWebCrawler() as crawler: # Create custom extraction strategy with specific settings table_strategy = DefaultTableExtraction( table_score_threshold=5, # Lower threshold for more permissive detection min_rows=3, # Only extract tables with at least 3 rows min_cols=2, # Only extract tables with at least 2 columns verbose=True ) config = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, table_extraction=table_strategy, # Target specific tables using CSS selector css_selector="div.main-content" ) result = await crawler.arun( "https://example.com/data", config=config ) if result.success: print(f"Found {len(result.tables)} tables matching criteria") for i, table in enumerate(result.tables): print(f"\nTable {i+1}:") print(f" Caption: {table.get('caption', 'No caption')}") print(f" Size: {table['metadata']['row_count']} rows Γ— {table['metadata']['column_count']} columns") print(f" Has headers: {table['metadata']['has_headers']}") async def example_disable_extraction(): """Example 3: Disable table extraction when not needed.""" print("\n" + "="*50) print("Example 3: Disable Table Extraction") print("="*50) async with AsyncWebCrawler() as crawler: # Use NoTableExtraction to skip table processing entirely config = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, table_extraction=NoTableExtraction() # No tables will be extracted ) result = await crawler.arun( "https://example.com", config=config ) if result.success: print(f"Tables extracted: {len(result.tables)} (should be 0)") print("Table extraction disabled - better performance for non-table content") class FinancialTableExtraction(TableExtractionStrategy): """ Custom strategy for extracting financial tables with specific requirements. """ def __init__(self, currency_symbols=None, **kwargs): super().__init__(**kwargs) self.currency_symbols = currency_symbols or ['$', '€', 'Β£', 'Β₯'] def extract_tables(self, element, **kwargs): """Extract only tables that appear to contain financial data.""" tables_data = [] for table in element.xpath(".//table"): # Check if table contains currency symbols table_text = ''.join(table.itertext()) has_currency = any(symbol in table_text for symbol in self.currency_symbols) if not has_currency: continue # Extract using base logic (could reuse DefaultTableExtraction logic) headers = [] rows = [] # Extract headers for th in table.xpath(".//thead//th | .//tr[1]//th"): headers.append(th.text_content().strip()) # Extract rows for tr in table.xpath(".//tbody//tr | .//tr[position()>1]"): row = [] for td in tr.xpath(".//td"): cell_text = td.text_content().strip() # Clean currency values for symbol in self.currency_symbols: cell_text = cell_text.replace(symbol, '') row.append(cell_text) if row: rows.append(row) if headers or rows: tables_data.append({ "headers": headers, "rows": rows, "caption": table.xpath(".//caption/text()")[0] if table.xpath(".//caption") else "", "summary": table.get("summary", ""), "metadata": { "type": "financial", "has_currency": True, "row_count": len(rows), "column_count": len(headers) if headers else len(rows[0]) if rows else 0 } }) return tables_data async def example_custom_strategy(): """Example 4: Custom table extraction strategy.""" print("\n" + "="*50) print("Example 4: Custom Financial Table Strategy") print("="*50) async with AsyncWebCrawler() as crawler: # Use custom strategy for financial tables config = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, table_extraction=FinancialTableExtraction( currency_symbols=['$', '€'], verbose=True ) ) result = await crawler.arun( "https://finance.yahoo.com/", config=config ) if result.success: print(f"Found {len(result.tables)} financial tables") for table in result.tables: if table['metadata'].get('type') == 'financial': print(f" βœ“ Financial table with {table['metadata']['row_count']} rows") async def example_combined_extraction(): """Example 5: Combine table extraction with other strategies.""" print("\n" + "="*50) print("Example 5: Combined Extraction Strategies") print("="*50) from crawl4ai import LLMExtractionStrategy, LLMConfig async with AsyncWebCrawler() as crawler: # Define schema for structured extraction schema = { "type": "object", "properties": { "page_title": {"type": "string"}, "main_topic": {"type": "string"}, "key_figures": { "type": "array", "items": {"type": "string"} } } } config = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, # Table extraction table_extraction=DefaultTableExtraction( table_score_threshold=6, min_rows=2 ), # LLM extraction for structured data extraction_strategy=LLMExtractionStrategy( llm_config=LLMConfig(provider="openai"), schema=schema ) ) result = await crawler.arun( "https://en.wikipedia.org/wiki/Economy_of_the_United_States", config=config ) if result.success: print(f"Tables found: {len(result.tables)}") # Tables are in result.tables if result.tables: print(f"First table has {len(result.tables[0]['rows'])} rows") # Structured data is in result.extracted_content if result.extracted_content: import json structured_data = json.loads(result.extracted_content) print(f"Page title: {structured_data.get('page_title', 'N/A')}") print(f"Main topic: {structured_data.get('main_topic', 'N/A')}") async def main(): """Run all examples.""" print("\n" + "="*60) print("CRAWL4AI TABLE EXTRACTION EXAMPLES") print("="*60) # Run examples await example_default_extraction() await example_custom_configuration() await example_disable_extraction() await example_custom_strategy() # await example_combined_extraction() # Requires OpenAI API key print("\n" + "="*60) print("EXAMPLES COMPLETED") print("="*60) if __name__ == "__main__": asyncio.run(main())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/docker_config_obj.py
docs/examples/docker_config_obj.py
from crawl4ai import BrowserConfig, CrawlerRunConfig, PruningContentFilter, DefaultMarkdownGenerator from crawl4ai.deep_crawling.filters import ContentTypeFilter, DomainFilter from crawl4ai.deep_crawling.scorers import KeywordRelevanceScorer, PathDepthScorer from crawl4ai.cache_context import CacheMode from crawl4ai.deep_crawling.bfs_strategy import BFSDeepCrawlStrategy from crawl4ai.deep_crawling.filters import FilterChain from crawl4ai.deep_crawling.scorers import CompositeScorer from crawl4ai.docker_client import Crawl4aiDockerClient import json from rich.console import Console from rich.syntax import Syntax console = Console() def print_json(data: dict, title: str = None): """Helper to print JSON prettily with syntax highlighting""" if title: console.print(f"\n[bold blue]{title}[/bold blue]") json_str = json.dumps(data, indent=2) syntax = Syntax(json_str, "json", theme="monokai", line_numbers=True) console.print(syntax) async def part1_basic_config(): """PART 1: Understanding Basic Configuration Objects Here we create simple configuration objects and examine their structure. This helps understand the basic type-params pattern used throughout the API. """ console.print("\n[bold green]Explanation:[/bold green] Configuration objects like BrowserConfig and CrawlerRunConfig are the foundation of Crawl4AI. They define how the crawler behavesβ€”e.g., whether it runs headless or how it processes content. These objects use a 'type-params' pattern: 'type' identifies the object class, and 'params' holds its settings. This structure is key because it’s reusable and can be serialized into JSON for API calls.") # Create a simple browser config browser_config = BrowserConfig( headless=False, viewport_width=500, headers = {"User-Agent": "Mozilla/5.0"} ) # Show its structure print_json(browser_config.dump(), "Simple Browser Config Structure") # Create a more complex config with nested objects crawler_config = CrawlerRunConfig( word_count_threshold=200, markdown_generator=DefaultMarkdownGenerator( content_filter=PruningContentFilter(threshold=0.5) ) ) print_json(crawler_config.dump(), "Complex Config with Nested Objects") async def part2_manual_json(): """PART 2: Building JSON Manually Learn how to construct the JSON structure by hand. This demonstrates deep understanding of the configuration format. """ console.print("\n[bold green]Explanation:[/bold green] Manually building JSON configurations mirrors how the API expects data. It’s a hands-on way to learn the exact structureβ€”each object has a 'type' and 'params' section. This is useful when you’re troubleshooting or working without the SDK, as it forces you to understand every detail of the config format.") # Manual browser config manual_browser = { "type": "BrowserConfig", "params": { "headless": True, "viewport": { "type": "dict", "value": { "width": 1200, "height": 800 } } } } # Validate by loading into BrowserConfig loaded_config = BrowserConfig.load(manual_browser) print_json(loaded_config.dump(), "Manually Created -> Loaded -> Dumped") # Show they're equivalent original = BrowserConfig(headless=True, viewport={"width": 1200, "height": 800}) assert loaded_config.dump() == original.dump(), "Configs are equivalent!" async def part3_complex_structures(): """PART 3: Working with Complex Nested Structures Explore more complex configurations with multiple levels of nesting. This shows how the type-params pattern scales to complex scenarios. """ console.print("\n[bold green]Explanation:[/bold green] Real-world crawling often requires detailed settingsβ€”like filtering content or customizing output. Here, we nest objects (e.g., a markdown generator with a content filter) using the same 'type-params' pattern. This nesting lets you fine-tune the crawler’s behavior at multiple levels, making it powerful and flexible.") config = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, markdown_generator=DefaultMarkdownGenerator( content_filter=PruningContentFilter() ), deep_crawl_strategy=BFSDeepCrawlStrategy( max_depth=5, filter_chain=FilterChain( filters=[ ContentTypeFilter(allowed_types=["text/html"]), DomainFilter(allowed_domains=["example.com"]) ] ), url_scorer=CompositeScorer( scorers=[ KeywordRelevanceScorer(keywords=["data", "analysis"]), PathDepthScorer(optimal_depth=3) ] ) ) ) print_json(config.dump(), "Deep Nested Configuration") async def part4_client_sdk(): """PART 4: Using the Client SDK Demonstrate how the SDK makes working with the API simple by handling all the complex serialization automatically. """ console.print("\n[bold green]Explanation:[/bold green] The Crawl4aiDockerClient SDK is a time-saverβ€”it takes your configuration objects and turns them into API-ready JSON automatically. This means less manual work and fewer mistakes. You just define your settings, pass them to the SDK, and it handles the rest, making crawling easier and faster.") async with Crawl4aiDockerClient(base_url="http://localhost:8000") as client: # You would normally authenticate here if JWT is enabled await client.authenticate("user@example.com") # Create configs browser_config = BrowserConfig(headless=True) crawler_config = CrawlerRunConfig(stream=False) # SDK handles all serialization result = await client.crawl( urls=["https://example.com"], browser_config=browser_config, crawler_config=crawler_config ) console.print("\n[bold green]πŸš€ Crawl completed successfully![/bold green]") console.print(f"Markdown length: {len(result.markdown)} characters") async def part5_direct_api(): """PART 5: Using the API Directly Learn how to make direct API calls without the SDK. This demonstrates the raw request structure and gives more control. """ console.print("\n[bold green]Explanation:[/bold green] Skipping the SDK means you’re in full controlβ€”you build the JSON payload yourself and send it to the API. This is harder but gives you a deeper understanding of how Crawl4AI works under the hood. It’s also useful if you’re integrating with systems that don’t use the SDK.") import aiohttp from datetime import datetime # Prepare the request payload payload = { "urls": ["https://example.com"], "browser_config": { "type": "BrowserConfig", "params": { "headless": True, "viewport": { "type": "dict", "value": { "width": 1200, "height": 800 } } } }, "crawler_config": { "type": "CrawlerRunConfig", "params": { "cache_mode": "bypass", "markdown_generator": { "type": "DefaultMarkdownGenerator", "params": { "content_filter": { "type": "PruningContentFilter", "params": { "threshold": 0.48, "threshold_type": "fixed" } } } } } } } print_json(payload, "Direct API Request Payload") async with aiohttp.ClientSession() as session: # If JWT is enabled, get token first token_response = await session.post( "http://localhost:8000/token", json={"email": "user@example.com"} ) token = (await token_response.json())["access_token"] headers = {"Authorization": f"Bearer {token}"} # Make the crawl request start_time = datetime.now() async with session.post( "http://localhost:8000/crawl", json=payload, headers=headers # comment if using JWT ) as response: result = await response.json() duration = (datetime.now() - start_time).total_seconds() console.print(f"\n[bold green]βœ… API call completed in {duration:.2f}s[/bold green]") print_json(result, "API Response") async def part6_wrap_up(): """PART 6: Wrap-Up and Key Takeaways Summarize the key concepts learned in this tutorial. """ console.print("\n[bold yellow]πŸŽ“ Tutorial Wrap-Up[/bold yellow]") console.print("[italic]Key Takeaways:[/italic]\n") console.print("- **Configurations:** Use the type-params pattern to define settings flexibly.") console.print("- **Manual JSON:** Build configs by hand to master the structure.") console.print("- **Nesting:** Customize deeply with nested objects.") console.print("- **SDK:** Simplify API calls with automatic serialization.") console.print("- **Direct API:** Gain control by crafting raw requests.") console.print("\n[bold green]πŸš€ You’re ready to crawl with Crawl4AI![/bold green]") async def main(): """Main tutorial runner that executes each part in sequence""" console.print("\n[bold yellow]πŸŽ“ Crawl4AI Docker Tutorial[/bold yellow]") console.print("[italic]Learn how to work with configuration objects and the Docker API[/italic]\n") parts = [ (part1_basic_config, "Understanding Basic Configurations"), (part2_manual_json, "Manual JSON Construction"), (part3_complex_structures, "Complex Nested Structures"), (part4_client_sdk, "Using the Client SDK"), (part5_direct_api, "Direct API Integration"), (part6_wrap_up, "Wrap-Up and Key Takeaways") ] for func, title in parts: console.print(f"\n[bold cyan]πŸ“š {title}[/bold cyan]") console.print("[dim]" + func.__doc__.strip() + "[/dim]\n") await func() if func != part6_wrap_up: # No pause after wrap-up input("\nPress Enter to continue...\n") # Run the tutorial if __name__ == "__main__": import asyncio asyncio.run(main())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/amazon_product_extraction_using_hooks.py
docs/examples/amazon_product_extraction_using_hooks.py
""" This example demonstrates how to use JSON CSS extraction to scrape product information from Amazon search results. It shows how to extract structured data like product titles, prices, ratings, and other details using CSS selectors. """ from crawl4ai import AsyncWebCrawler, CacheMode from crawl4ai import JsonCssExtractionStrategy from crawl4ai.async_configs import BrowserConfig, CrawlerRunConfig import json from playwright.async_api import Page, BrowserContext async def extract_amazon_products(): # Initialize browser config browser_config = BrowserConfig( # browser_type="chromium", headless=True ) # Initialize crawler config with JSON CSS extraction strategy nav-search-submit-button crawler_config = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, extraction_strategy=JsonCssExtractionStrategy( schema={ "name": "Amazon Product Search Results", "baseSelector": "[data-component-type='s-search-result']", "fields": [ { "name": "asin", "selector": "", "type": "attribute", "attribute": "data-asin", }, {"name": "title", "selector": "h2 a span", "type": "text"}, { "name": "url", "selector": "h2 a", "type": "attribute", "attribute": "href", }, { "name": "image", "selector": ".s-image", "type": "attribute", "attribute": "src", }, { "name": "rating", "selector": ".a-icon-star-small .a-icon-alt", "type": "text", }, { "name": "reviews_count", "selector": "[data-csa-c-func-deps='aui-da-a-popover'] ~ span span", "type": "text", }, { "name": "price", "selector": ".a-price .a-offscreen", "type": "text", }, { "name": "original_price", "selector": ".a-price.a-text-price .a-offscreen", "type": "text", }, { "name": "sponsored", "selector": ".puis-sponsored-label-text", "type": "exists", }, { "name": "delivery_info", "selector": "[data-cy='delivery-recipe'] .a-color-base", "type": "text", "multiple": True, }, ], } ), ) url = "https://www.amazon.com/" async def after_goto( page: Page, context: BrowserContext, url: str, response: dict, **kwargs ): """Hook called after navigating to each URL""" print(f"[HOOK] after_goto - Successfully loaded: {url}") try: # Wait for search box to be available search_box = await page.wait_for_selector( "#twotabsearchtextbox", timeout=1000 ) # Type the search query await search_box.fill("Samsung Galaxy Tab") # Get the search button and prepare for navigation search_button = await page.wait_for_selector( "#nav-search-submit-button", timeout=1000 ) # Click with navigation waiting await search_button.click() # Wait for search results to load await page.wait_for_selector( '[data-component-type="s-search-result"]', timeout=10000 ) print("[HOOK] Search completed and results loaded!") except Exception as e: print(f"[HOOK] Error during search operation: {str(e)}") return page # Use context manager for proper resource handling async with AsyncWebCrawler(config=browser_config) as crawler: crawler.crawler_strategy.set_hook("after_goto", after_goto) # Extract the data result = await crawler.arun(url=url, config=crawler_config) # Process and print the results if result and result.extracted_content: # Parse the JSON string into a list of products products = json.loads(result.extracted_content) # Process each product in the list for product in products: print("\nProduct Details:") print(f"ASIN: {product.get('asin')}") print(f"Title: {product.get('title')}") print(f"Price: {product.get('price')}") print(f"Original Price: {product.get('original_price')}") print(f"Rating: {product.get('rating')}") print(f"Reviews: {product.get('reviews_count')}") print(f"Sponsored: {'Yes' if product.get('sponsored') else 'No'}") if product.get("delivery_info"): print(f"Delivery: {' '.join(product['delivery_info'])}") print("-" * 80) if __name__ == "__main__": import asyncio asyncio.run(extract_amazon_products())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/docker_hooks_examples.py
docs/examples/docker_hooks_examples.py
#!/usr/bin/env python3 """ πŸš€ Crawl4AI Docker Hooks System - Complete Examples ==================================================== This file demonstrates the Docker Hooks System with three different approaches: 1. String-based hooks for REST API 2. hooks_to_string() utility to convert functions 3. Docker Client with automatic conversion (most convenient) Requirements: - Docker container running: docker run -p 11235:11235 unclecode/crawl4ai:latest - crawl4ai installed: pip install crawl4ai """ import asyncio import requests import json import time from typing import Dict, Any # Import Crawl4AI components from crawl4ai import hooks_to_string from crawl4ai.docker_client import Crawl4aiDockerClient # Configuration DOCKER_URL = "http://localhost:11235" TEST_URLS = [ "https://www.kidocode.com", "https://quotes.toscrape.com", "https://httpbin.org/html", ] def print_section(title: str, description: str = ""): """Print a formatted section header""" print("\n" + "=" * 70) print(f" {title}") if description: print(f" {description}") print("=" * 70 + "\n") def check_docker_service() -> bool: """Check if Docker service is running""" try: response = requests.get(f"{DOCKER_URL}/health", timeout=3) return response.status_code == 200 except: return False # ============================================================================ # REUSABLE HOOK LIBRARY # ============================================================================ async def performance_optimization_hook(page, context, **kwargs): """ Performance Hook: Block unnecessary resources to speed up crawling """ print(" [Hook] πŸš€ Optimizing performance - blocking images and ads...") # Block images await context.route( "**/*.{png,jpg,jpeg,gif,webp,svg,ico}", lambda route: route.abort() ) # Block ads and analytics await context.route("**/analytics/*", lambda route: route.abort()) await context.route("**/ads/*", lambda route: route.abort()) await context.route("**/google-analytics.com/*", lambda route: route.abort()) print(" [Hook] βœ“ Performance optimization applied") return page async def viewport_setup_hook(page, context, **kwargs): """ Viewport Hook: Set consistent viewport size for rendering """ print(" [Hook] πŸ–₯️ Setting viewport to 1920x1080...") await page.set_viewport_size({"width": 1920, "height": 1080}) print(" [Hook] βœ“ Viewport configured") return page async def authentication_headers_hook(page, context, url, **kwargs): """ Headers Hook: Add custom authentication and tracking headers """ print(f" [Hook] πŸ” Adding custom headers for {url[:50]}...") await page.set_extra_http_headers({ 'X-Crawl4AI': 'docker-hooks', 'X-Custom-Hook': 'function-based', 'Accept-Language': 'en-US,en;q=0.9', }) print(" [Hook] βœ“ Custom headers added") return page async def lazy_loading_handler_hook(page, context, **kwargs): """ Content Hook: Handle lazy-loaded content by scrolling """ print(" [Hook] πŸ“œ Scrolling to load lazy content...") # Scroll to bottom await page.evaluate("window.scrollTo(0, document.body.scrollHeight)") await page.wait_for_timeout(1000) # Scroll to middle await page.evaluate("window.scrollTo(0, document.body.scrollHeight / 2)") await page.wait_for_timeout(500) # Scroll back to top await page.evaluate("window.scrollTo(0, 0)") await page.wait_for_timeout(500) print(" [Hook] βœ“ Lazy content loaded") return page async def page_analytics_hook(page, context, **kwargs): """ Analytics Hook: Log page metrics before extraction """ print(" [Hook] πŸ“Š Collecting page analytics...") metrics = await page.evaluate(''' () => ({ title: document.title, images: document.images.length, links: document.links.length, scripts: document.scripts.length, headings: document.querySelectorAll('h1, h2, h3').length, paragraphs: document.querySelectorAll('p').length }) ''') print(f" [Hook] πŸ“ˆ Page: {metrics['title'][:50]}...") print(f" Links: {metrics['links']}, Images: {metrics['images']}, " f"Headings: {metrics['headings']}, Paragraphs: {metrics['paragraphs']}") return page # ============================================================================ # APPROACH 1: String-Based Hooks (REST API) # ============================================================================ def example_1_string_based_hooks(): """ Demonstrate string-based hooks with REST API Use this when working with REST API directly or non-Python clients """ print_section( "APPROACH 1: String-Based Hooks (REST API)", "Define hooks as strings for REST API requests" ) # Define hooks as strings hooks_config = { "on_page_context_created": """ async def hook(page, context, **kwargs): print(" [String Hook] Setting up page context...") # Block images for performance await context.route("**/*.{png,jpg,jpeg,gif,webp}", lambda route: route.abort()) await page.set_viewport_size({"width": 1920, "height": 1080}) return page """, "before_goto": """ async def hook(page, context, url, **kwargs): print(f" [String Hook] Navigating to {url[:50]}...") await page.set_extra_http_headers({ 'X-Crawl4AI': 'string-based-hooks', }) return page """, "before_retrieve_html": """ async def hook(page, context, **kwargs): print(" [String Hook] Scrolling page...") await page.evaluate("window.scrollTo(0, document.body.scrollHeight)") await page.wait_for_timeout(1000) return page """ } # Prepare request payload payload = { "urls": [TEST_URLS[2]], # httpbin.org "hooks": { "code": hooks_config, "timeout": 30 }, "crawler_config": { "cache_mode": "bypass" } } print(f"🎯 Target URL: {TEST_URLS[2]}") print(f"πŸ”§ Configured {len(hooks_config)} string-based hooks") print(f"πŸ“‘ Sending request to Docker API...\n") try: start_time = time.time() response = requests.post(f"{DOCKER_URL}/crawl", json=payload, timeout=60) execution_time = time.time() - start_time if response.status_code == 200: result = response.json() print(f"\nβœ… Request successful! (took {execution_time:.2f}s)") # Display results if result.get('results') and result['results'][0].get('success'): crawl_result = result['results'][0] html_length = len(crawl_result.get('html', '')) markdown_length = len(crawl_result.get('markdown', '')) print(f"\nπŸ“Š Results:") print(f" β€’ HTML length: {html_length:,} characters") print(f" β€’ Markdown length: {markdown_length:,} characters") print(f" β€’ URL: {crawl_result.get('url')}") # Check hooks execution if 'hooks' in result: hooks_info = result['hooks'] print(f"\n🎣 Hooks Execution:") print(f" β€’ Status: {hooks_info['status']['status']}") print(f" β€’ Attached hooks: {len(hooks_info['status']['attached_hooks'])}") if 'summary' in hooks_info: summary = hooks_info['summary'] print(f" β€’ Total executions: {summary['total_executions']}") print(f" β€’ Successful: {summary['successful']}") print(f" β€’ Success rate: {summary['success_rate']:.1f}%") else: print(f"⚠️ Crawl completed but no results") else: print(f"❌ Request failed with status {response.status_code}") print(f" Error: {response.text[:200]}") except requests.exceptions.Timeout: print("⏰ Request timed out after 60 seconds") except Exception as e: print(f"❌ Error: {str(e)}") print("\n" + "─" * 70) print("βœ“ String-based hooks example complete\n") # ============================================================================ # APPROACH 2: Function-Based Hooks with hooks_to_string() Utility # ============================================================================ def example_2_hooks_to_string_utility(): """ Demonstrate the hooks_to_string() utility for converting functions Use this when you want to write hooks as functions but use REST API """ print_section( "APPROACH 2: hooks_to_string() Utility", "Convert Python functions to strings for REST API" ) print("πŸ“¦ Creating hook functions...") print(" β€’ performance_optimization_hook") print(" β€’ authentication_headers_hook") print(" β€’ lazy_loading_handler_hook") # Convert function objects to strings using the utility print("\nπŸ”„ Converting functions to strings with hooks_to_string()...") hooks_dict = { "on_page_context_created": performance_optimization_hook, "before_goto": authentication_headers_hook, "before_retrieve_html": lazy_loading_handler_hook, } hooks_as_strings = hooks_to_string(hooks_dict) print(f"βœ… Successfully converted {len(hooks_as_strings)} functions to strings") # Show a preview print("\nπŸ“ Sample converted hook (first 200 characters):") print("─" * 70) sample_hook = list(hooks_as_strings.values())[0] print(sample_hook[:200] + "...") print("─" * 70) # Use the converted hooks with REST API print("\nπŸ“‘ Using converted hooks with REST API...") payload = { "urls": [TEST_URLS[2]], "hooks": { "code": hooks_as_strings, "timeout": 30 } } try: start_time = time.time() response = requests.post(f"{DOCKER_URL}/crawl", json=payload, timeout=60) execution_time = time.time() - start_time if response.status_code == 200: result = response.json() print(f"\nβœ… Request successful! (took {execution_time:.2f}s)") if result.get('results') and result['results'][0].get('success'): crawl_result = result['results'][0] print(f" β€’ HTML length: {len(crawl_result.get('html', '')):,} characters") print(f" β€’ Hooks executed successfully!") else: print(f"❌ Request failed: {response.status_code}") except Exception as e: print(f"❌ Error: {str(e)}") print("\nπŸ’‘ Benefits of hooks_to_string():") print(" βœ“ Write hooks as regular Python functions") print(" βœ“ Full IDE support (autocomplete, syntax highlighting)") print(" βœ“ Type checking and linting") print(" βœ“ Easy to test and debug") print(" βœ“ Reusable across projects") print(" βœ“ Works with any REST API client") print("\n" + "─" * 70) print("βœ“ hooks_to_string() utility example complete\n") # ============================================================================ # APPROACH 3: Docker Client with Automatic Conversion (RECOMMENDED) # ============================================================================ async def example_3_docker_client_auto_conversion(): """ Demonstrate Docker Client with automatic hook conversion (RECOMMENDED) Use this for the best developer experience with Python """ print_section( "APPROACH 3: Docker Client with Auto-Conversion (RECOMMENDED)", "Pass function objects directly - conversion happens automatically!" ) print("🐳 Initializing Crawl4AI Docker Client...") client = Crawl4aiDockerClient(base_url=DOCKER_URL) print("βœ… Client ready!\n") # Use our reusable hook library - just pass the function objects! print("πŸ“š Using reusable hook library:") print(" β€’ performance_optimization_hook") print(" β€’ authentication_headers_hook") print(" β€’ lazy_loading_handler_hook") print(" β€’ page_analytics_hook") print("\n🎯 Target URL: " + TEST_URLS[0]) print("πŸš€ Starting crawl with automatic hook conversion...\n") try: start_time = time.time() # Pass function objects directly - NO manual conversion needed! ✨ results = await client.crawl( urls=[TEST_URLS[0]], hooks={ "on_page_context_created": performance_optimization_hook, "before_goto": authentication_headers_hook, "before_retrieve_html": lazy_loading_handler_hook, "before_return_html": page_analytics_hook, }, hooks_timeout=30 ) execution_time = time.time() - start_time print(f"\nβœ… Crawl completed! (took {execution_time:.2f}s)\n") # Display results if results and results.success: result = results print(f"πŸ“Š Results:") print(f" β€’ URL: {result.url}") print(f" β€’ Success: {result.success}") print(f" β€’ HTML length: {len(result.html):,} characters") print(f" β€’ Markdown length: {len(result.markdown):,} characters") # Show metadata if result.metadata: print(f"\nπŸ“‹ Metadata:") print(f" β€’ Title: {result.metadata.get('title', 'N/A')[:50]}...") # Show links if result.links: internal_count = len(result.links.get('internal', [])) external_count = len(result.links.get('external', [])) print(f"\nπŸ”— Links Found:") print(f" β€’ Internal: {internal_count}") print(f" β€’ External: {external_count}") else: print(f"⚠️ Crawl completed but no successful results") if results: print(f" Error: {results.error_message}") except Exception as e: print(f"❌ Error: {str(e)}") import traceback traceback.print_exc() print("\n🌟 Why Docker Client is RECOMMENDED:") print(" βœ“ Automatic function-to-string conversion") print(" βœ“ No manual hooks_to_string() calls needed") print(" βœ“ Cleaner, more Pythonic code") print(" βœ“ Full type hints and IDE support") print(" βœ“ Built-in error handling") print(" βœ“ Async/await support") print("\n" + "─" * 70) print("βœ“ Docker Client auto-conversion example complete\n") # ============================================================================ # APPROACH 4: Authentication Example # ============================================================================ def example_4_authentication_flow(): """ Demonstrate authentication flow with multiple hooks """ print_section( "EXAMPLE 4: Authentication Flow", "Using hooks for authentication with cookies and headers" ) hooks_code = { "on_page_context_created": """ async def hook(page, context, **kwargs): print("[HOOK] Setting up authentication context") # Add authentication cookies await context.add_cookies([ { "name": "auth_token", "value": "fake_jwt_token_here", "domain": ".httpbin.org", "path": "/", "httpOnly": True, "secure": True } ]) return page """, "before_goto": """ async def hook(page, context, url, **kwargs): print(f"[HOOK] Adding auth headers for {url}") # Add Authorization header import base64 credentials = base64.b64encode(b"user:passwd").decode('ascii') await page.set_extra_http_headers({ 'Authorization': f'Basic {credentials}', 'X-API-Key': 'test-api-key-123' }) return page """ } payload = { "urls": ["https://httpbin.org/basic-auth/user/passwd"], "hooks": { "code": hooks_code, "timeout": 15 } } print("\nTesting authentication with httpbin endpoints...") response = requests.post(f"{DOCKER_URL}/crawl", json=payload) if response.status_code == 200: data = response.json() print("βœ… Authentication test completed") if 'results' in data: for i, result in enumerate(data['results']): print(f"\n URL {i+1}: {result['url']}") if result.get('success'): # Check for authentication success indicators html_content = result.get('html', '') if '"authenticated"' in html_content and 'true' in html_content: print(" βœ… Authentication successful! Basic auth worked.") else: print(" ⚠️ Page loaded but auth status unclear") else: print(f" ❌ Failed: {result.get('error_message', 'Unknown error')}") else: print(f"❌ Error: {response.status_code}") print("\n" + "─" * 70) print("βœ“ Authentication example complete\n") # ============================================================================ # MAIN EXECUTION # ============================================================================ async def main(): """ Run all example demonstrations """ print("\n" + "=" * 70) print(" πŸš€ Crawl4AI - Docker Hooks System Examples") print("=" * 70) # Check Docker service print("\nπŸ” Checking Docker service status...") if not check_docker_service(): print("❌ Docker service is not running!") print("\nπŸ“‹ To start the Docker service:") print(" docker run -p 11235:11235 unclecode/crawl4ai:latest") print("\nPlease start the service and run this example again.") return print("βœ… Docker service is running!\n") # Run all examples examples = [ ("String-Based Hooks (REST API)", example_1_string_based_hooks, False), ("hooks_to_string() Utility", example_2_hooks_to_string_utility, False), ("Docker Client Auto-Conversion (Recommended)", example_3_docker_client_auto_conversion, True), ("Authentication Flow", example_4_authentication_flow, False), ] for i, (name, example_func, is_async) in enumerate(examples, 1): print(f"\n{'πŸ”·' * 35}") print(f"Example {i}/{len(examples)}: {name}") print(f"{'πŸ”·' * 35}\n") try: if is_async: await example_func() else: example_func() print(f"βœ… Example {i} completed successfully!") # Pause between examples (except the last one) if i < len(examples): print("\n⏸️ Press Enter to continue to next example...") input() except KeyboardInterrupt: print(f"\n⏹️ Examples interrupted by user") break except Exception as e: print(f"\n❌ Example {i} failed: {str(e)}") import traceback traceback.print_exc() print("\nContinuing to next example...\n") continue # Final summary print("\n" + "=" * 70) print(" πŸŽ‰ All Examples Complete!") print("=" * 70) print("\nπŸ“Š Summary - Three Approaches to Docker Hooks:") print("\n✨ 1. String-Based Hooks:") print(" β€’ Write hooks as strings directly in JSON") print(" β€’ Best for: REST API, non-Python clients, simple use cases") print(" β€’ Cons: No IDE support, harder to debug") print("\n✨ 2. hooks_to_string() Utility:") print(" β€’ Write hooks as Python functions, convert to strings") print(" β€’ Best for: Python with REST API, reusable hook libraries") print(" β€’ Pros: IDE support, type checking, easy debugging") print("\n✨ 3. Docker Client (RECOMMENDED):") print(" β€’ Pass function objects directly, automatic conversion") print(" β€’ Best for: Python applications, best developer experience") print(" β€’ Pros: All benefits of #2 + cleaner code, no manual conversion") print("\nπŸ’‘ Recommendation:") print(" Use Docker Client (#3) for Python applications") print(" Use hooks_to_string() (#2) when you need REST API flexibility") print(" Use string-based (#1) for non-Python clients or simple scripts") print("\n🎯 8 Hook Points Available:") print(" β€’ on_browser_created, on_page_context_created") print(" β€’ on_user_agent_updated, before_goto, after_goto") print(" β€’ on_execution_started, before_retrieve_html, before_return_html") print("\nπŸ“š Resources:") print(" β€’ Docs: https://docs.crawl4ai.com/core/docker-deployment") print(" β€’ GitHub: https://github.com/unclecode/crawl4ai") print(" β€’ Discord: https://discord.gg/jP8KfhDhyN") print("\n" + "=" * 70) print(" Happy Crawling! πŸ•·οΈ") print("=" * 70 + "\n") if __name__ == "__main__": print("\n🎬 Starting Crawl4AI Docker Hooks Examples...") print("Press Ctrl+C anytime to exit\n") try: asyncio.run(main()) except KeyboardInterrupt: print("\n\nπŸ‘‹ Examples stopped by user. Thanks for exploring Crawl4AI!") except Exception as e: print(f"\n\n❌ Error: {str(e)}") import traceback traceback.print_exc()
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/research_assistant.py
docs/examples/research_assistant.py
# Make sure to install the required packageschainlit and groq import os, time from openai import AsyncOpenAI import chainlit as cl import re import requests from io import BytesIO from chainlit.element import ElementBased from groq import Groq # Import threadpools to run the crawl_url function in a separate thread from concurrent.futures import ThreadPoolExecutor client = AsyncOpenAI( base_url="https://api.groq.com/openai/v1", api_key=os.getenv("GROQ_API_KEY") ) # Instrument the OpenAI client cl.instrument_openai() settings = { "model": "llama3-8b-8192", "temperature": 0.5, "max_tokens": 500, "top_p": 1, "frequency_penalty": 0, "presence_penalty": 0, } def extract_urls(text): url_pattern = re.compile(r"(https?://\S+)") return url_pattern.findall(text) def crawl_url(url): data = { "urls": [url], "include_raw_html": True, "word_count_threshold": 10, "extraction_strategy": "NoExtractionStrategy", "chunking_strategy": "RegexChunking", } response = requests.post("https://crawl4ai.com/crawl", json=data) response_data = response.json() response_data = response_data["results"][0] return response_data["markdown"] @cl.on_chat_start async def on_chat_start(): cl.user_session.set("session", {"history": [], "context": {}}) await cl.Message(content="Welcome to the chat! How can I assist you today?").send() @cl.on_message async def on_message(message: cl.Message): user_session = cl.user_session.get("session") # Extract URLs from the user's message urls = extract_urls(message.content) futures = [] with ThreadPoolExecutor() as executor: for url in urls: futures.append(executor.submit(crawl_url, url)) results = [future.result() for future in futures] for url, result in zip(urls, results): ref_number = f"REF_{len(user_session['context']) + 1}" user_session["context"][ref_number] = {"url": url, "content": result} user_session["history"].append({"role": "user", "content": message.content}) # Create a system message that includes the context context_messages = [ f'<appendix ref="{ref}">\n{data["content"]}\n</appendix>' for ref, data in user_session["context"].items() ] if context_messages: system_message = { "role": "system", "content": ( "You are a helpful bot. Use the following context for answering questions. " "Refer to the sources using the REF number in square brackets, e.g., [1], only if the source is given in the appendices below.\n\n" "If the question requires any information from the provided appendices or context, refer to the sources. " "If not, there is no need to add a references section. " "At the end of your response, provide a reference section listing the URLs and their REF numbers only if sources from the appendices were used.\n\n" "\n\n".join(context_messages) ), } else: system_message = {"role": "system", "content": "You are a helpful assistant."} msg = cl.Message(content="") await msg.send() # Get response from the LLM stream = await client.chat.completions.create( messages=[system_message, *user_session["history"]], stream=True, **settings ) assistant_response = "" async for part in stream: if token := part.choices[0].delta.content: assistant_response += token await msg.stream_token(token) # Add assistant message to the history user_session["history"].append({"role": "assistant", "content": assistant_response}) await msg.update() # Append the reference section to the assistant's response reference_section = "\n\nReferences:\n" for ref, data in user_session["context"].items(): reference_section += f"[{ref.split('_')[1]}]: {data['url']}\n" msg.content += reference_section await msg.update() @cl.on_audio_chunk async def on_audio_chunk(chunk: cl.AudioChunk): if chunk.isStart: buffer = BytesIO() # This is required for whisper to recognize the file type buffer.name = f"input_audio.{chunk.mimeType.split('/')[1]}" # Initialize the session for a new audio stream cl.user_session.set("audio_buffer", buffer) cl.user_session.set("audio_mime_type", chunk.mimeType) # Write the chunks to a buffer and transcribe the whole audio at the end cl.user_session.get("audio_buffer").write(chunk.data) pass @cl.step(type="tool") async def speech_to_text(audio_file): cli = Groq() response = await client.audio.transcriptions.create( model="whisper-large-v3", file=audio_file ) return response.text @cl.on_audio_end async def on_audio_end(elements: list[ElementBased]): # Get the audio buffer from the session audio_buffer: BytesIO = cl.user_session.get("audio_buffer") audio_buffer.seek(0) # Move the file pointer to the beginning audio_file = audio_buffer.read() audio_mime_type: str = cl.user_session.get("audio_mime_type") start_time = time.time() whisper_input = (audio_buffer.name, audio_file, audio_mime_type) transcription = await speech_to_text(whisper_input) end_time = time.time() print(f"Transcription took {end_time - start_time} seconds") user_msg = cl.Message(author="You", type="user_message", content=transcription) await user_msg.send() await on_message(user_msg) if __name__ == "__main__": from chainlit.cli import run_chainlit run_chainlit(__file__)
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/stealth_mode_quick_start.py
docs/examples/stealth_mode_quick_start.py
""" Quick Start: Using Stealth Mode in Crawl4AI This example shows practical use cases for the stealth mode feature. Stealth mode helps bypass basic bot detection mechanisms. """ import asyncio from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig async def example_1_basic_stealth(): """Example 1: Basic stealth mode usage""" print("\n=== Example 1: Basic Stealth Mode ===") # Enable stealth mode in browser config browser_config = BrowserConfig( enable_stealth=True, # This is the key parameter headless=True ) async with AsyncWebCrawler(config=browser_config) as crawler: result = await crawler.arun(url="https://example.com") print(f"βœ“ Crawled {result.url} successfully") print(f"βœ“ Title: {result.metadata.get('title', 'N/A')}") async def example_2_stealth_with_screenshot(): """Example 2: Stealth mode with screenshot to show detection results""" print("\n=== Example 2: Stealth Mode Visual Verification ===") browser_config = BrowserConfig( enable_stealth=True, headless=False # Set to False to see the browser ) async with AsyncWebCrawler(config=browser_config) as crawler: config = CrawlerRunConfig( screenshot=True, wait_until="networkidle" ) result = await crawler.arun( url="https://bot.sannysoft.com", config=config ) if result.success: print(f"βœ“ Successfully crawled bot detection site") print(f"βœ“ With stealth enabled, many detection tests should show as passed") if result.screenshot: # Save screenshot for verification import base64 with open("stealth_detection_results.png", "wb") as f: f.write(base64.b64decode(result.screenshot)) print(f"βœ“ Screenshot saved as 'stealth_detection_results.png'") print(f" Check the screenshot to see detection results!") async def example_3_stealth_for_protected_sites(): """Example 3: Using stealth for sites with bot protection""" print("\n=== Example 3: Stealth for Protected Sites ===") browser_config = BrowserConfig( enable_stealth=True, headless=True, viewport_width=1920, viewport_height=1080 ) async with AsyncWebCrawler(config=browser_config) as crawler: # Add human-like behavior config = CrawlerRunConfig( wait_until="networkidle", delay_before_return_html=2.0, # Wait 2 seconds js_code=""" // Simulate human-like scrolling window.scrollTo({ top: document.body.scrollHeight / 2, behavior: 'smooth' }); """ ) # Try accessing a site that might have bot protection result = await crawler.arun( url="https://www.g2.com/products/slack/reviews", config=config ) if result.success: print(f"βœ“ Successfully accessed protected site") print(f"βœ“ Retrieved {len(result.html)} characters of HTML") else: print(f"βœ— Failed to access site: {result.error_message}") async def example_4_stealth_with_sessions(): """Example 4: Stealth mode with session management""" print("\n=== Example 4: Stealth + Session Management ===") browser_config = BrowserConfig( enable_stealth=True, headless=False ) async with AsyncWebCrawler(config=browser_config) as crawler: session_id = "my_stealth_session" # First request - establish session config = CrawlerRunConfig( session_id=session_id, wait_until="domcontentloaded" ) result1 = await crawler.arun( url="https://news.ycombinator.com", config=config ) print(f"βœ“ First request completed: {result1.url}") # Second request - reuse session await asyncio.sleep(2) # Brief delay between requests result2 = await crawler.arun( url="https://news.ycombinator.com/best", config=config ) print(f"βœ“ Second request completed: {result2.url}") print(f"βœ“ Session reused, maintaining cookies and state") async def example_5_stealth_comparison(): """Example 5: Compare results with and without stealth using screenshots""" print("\n=== Example 5: Stealth Mode Comparison ===") test_url = "https://bot.sannysoft.com" # First test WITHOUT stealth print("\nWithout stealth:") regular_config = BrowserConfig( enable_stealth=False, headless=True ) async with AsyncWebCrawler(config=regular_config) as crawler: config = CrawlerRunConfig( screenshot=True, wait_until="networkidle" ) result = await crawler.arun(url=test_url, config=config) if result.success and result.screenshot: import base64 with open("comparison_without_stealth.png", "wb") as f: f.write(base64.b64decode(result.screenshot)) print(f" βœ“ Screenshot saved: comparison_without_stealth.png") print(f" Many tests will show as FAILED (red)") # Then test WITH stealth print("\nWith stealth:") stealth_config = BrowserConfig( enable_stealth=True, headless=True ) async with AsyncWebCrawler(config=stealth_config) as crawler: config = CrawlerRunConfig( screenshot=True, wait_until="networkidle" ) result = await crawler.arun(url=test_url, config=config) if result.success and result.screenshot: import base64 with open("comparison_with_stealth.png", "wb") as f: f.write(base64.b64decode(result.screenshot)) print(f" βœ“ Screenshot saved: comparison_with_stealth.png") print(f" More tests should show as PASSED (green)") print("\nCompare the two screenshots to see the difference!") async def main(): """Run all examples""" print("Crawl4AI Stealth Mode Examples") print("==============================") # Run basic example await example_1_basic_stealth() # Run screenshot verification example await example_2_stealth_with_screenshot() # Run protected site example await example_3_stealth_for_protected_sites() # Run session example await example_4_stealth_with_sessions() # Run comparison example await example_5_stealth_comparison() print("\n" + "="*50) print("Tips for using stealth mode effectively:") print("- Use realistic viewport sizes (1920x1080, 1366x768)") print("- Add delays between requests to appear more human") print("- Combine with session management for better results") print("- Remember: stealth mode is for legitimate scraping only") print("="*50) if __name__ == "__main__": asyncio.run(main())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/network_console_capture_example.py
docs/examples/network_console_capture_example.py
import asyncio import json import os import base64 from pathlib import Path from typing import List, Dict, Any from datetime import datetime from crawl4ai import AsyncWebCrawler, CrawlerRunConfig, CacheMode, CrawlResult from crawl4ai import BrowserConfig __cur_dir__ = Path(__file__).parent # Create temp directory if it doesn't exist os.makedirs(os.path.join(__cur_dir__, "tmp"), exist_ok=True) async def demo_basic_network_capture(): """Basic network request capturing example""" print("\n=== 1. Basic Network Request Capturing ===") async with AsyncWebCrawler() as crawler: config = CrawlerRunConfig( capture_network_requests=True, wait_until="networkidle" # Wait for network to be idle ) result = await crawler.arun( url="https://example.com/", config=config ) if result.success and result.network_requests: print(f"Captured {len(result.network_requests)} network events") # Count by event type event_types = {} for req in result.network_requests: event_type = req.get("event_type", "unknown") event_types[event_type] = event_types.get(event_type, 0) + 1 print("Event types:") for event_type, count in event_types.items(): print(f" - {event_type}: {count}") # Show a sample request and response request = next((r for r in result.network_requests if r.get("event_type") == "request"), None) response = next((r for r in result.network_requests if r.get("event_type") == "response"), None) if request: print("\nSample request:") print(f" URL: {request.get('url')}") print(f" Method: {request.get('method')}") print(f" Headers: {list(request.get('headers', {}).keys())}") if response: print("\nSample response:") print(f" URL: {response.get('url')}") print(f" Status: {response.get('status')} {response.get('status_text', '')}") print(f" Headers: {list(response.get('headers', {}).keys())}") async def demo_basic_console_capture(): """Basic console message capturing example""" print("\n=== 2. Basic Console Message Capturing ===") # Create a simple HTML file with console messages html_file = os.path.join(__cur_dir__, "tmp", "console_test.html") with open(html_file, "w") as f: f.write(""" <!DOCTYPE html> <html> <head> <title>Console Test</title> </head> <body> <h1>Console Message Test</h1> <script> console.log("This is a basic log message"); console.info("This is an info message"); console.warn("This is a warning message"); console.error("This is an error message"); // Generate an error try { nonExistentFunction(); } catch (e) { console.error("Caught error:", e); } </script> </body> </html> """) async with AsyncWebCrawler() as crawler: config = CrawlerRunConfig( capture_console_messages=True, wait_until="networkidle" # Wait to make sure all scripts execute ) result = await crawler.arun( url=f"file://{html_file}", config=config ) if result.success and result.console_messages: print(f"Captured {len(result.console_messages)} console messages") # Count by message type message_types = {} for msg in result.console_messages: msg_type = msg.get("type", "unknown") message_types[msg_type] = message_types.get(msg_type, 0) + 1 print("Message types:") for msg_type, count in message_types.items(): print(f" - {msg_type}: {count}") # Show all messages print("\nAll console messages:") for i, msg in enumerate(result.console_messages, 1): print(f" {i}. [{msg.get('type', 'unknown')}] {msg.get('text', '')}") async def demo_combined_capture(): """Capturing both network requests and console messages""" print("\n=== 3. Combined Network and Console Capture ===") async with AsyncWebCrawler() as crawler: config = CrawlerRunConfig( capture_network_requests=True, capture_console_messages=True, wait_until="networkidle" ) result = await crawler.arun( url="https://httpbin.org/html", config=config ) if result.success: network_count = len(result.network_requests) if result.network_requests else 0 console_count = len(result.console_messages) if result.console_messages else 0 print(f"Captured {network_count} network events and {console_count} console messages") # Save the captured data to a JSON file for analysis output_file = os.path.join(__cur_dir__, "tmp", "capture_data.json") with open(output_file, "w") as f: json.dump({ "url": result.url, "timestamp": datetime.now().isoformat(), "network_requests": result.network_requests, "console_messages": result.console_messages }, f, indent=2) print(f"Full capture data saved to {output_file}") async def analyze_spa_network_traffic(): """Analyze network traffic of a Single-Page Application""" print("\n=== 4. Analyzing SPA Network Traffic ===") async with AsyncWebCrawler(config=BrowserConfig( headless=True, viewport_width=1280, viewport_height=800 )) as crawler: config = CrawlerRunConfig( capture_network_requests=True, capture_console_messages=True, # Wait longer to ensure all resources are loaded wait_until="networkidle", page_timeout=60000, # 60 seconds ) result = await crawler.arun( url="https://weather.com", config=config ) if result.success and result.network_requests: # Extract different types of requests requests = [] responses = [] failures = [] for event in result.network_requests: event_type = event.get("event_type") if event_type == "request": requests.append(event) elif event_type == "response": responses.append(event) elif event_type == "request_failed": failures.append(event) print(f"Captured {len(requests)} requests, {len(responses)} responses, and {len(failures)} failures") # Analyze request types resource_types = {} for req in requests: resource_type = req.get("resource_type", "unknown") resource_types[resource_type] = resource_types.get(resource_type, 0) + 1 print("\nResource types:") for resource_type, count in sorted(resource_types.items(), key=lambda x: x[1], reverse=True): print(f" - {resource_type}: {count}") # Analyze API calls api_calls = [r for r in requests if "api" in r.get("url", "").lower()] if api_calls: print(f"\nDetected {len(api_calls)} API calls:") for i, call in enumerate(api_calls[:5], 1): # Show first 5 print(f" {i}. {call.get('method')} {call.get('url')}") if len(api_calls) > 5: print(f" ... and {len(api_calls) - 5} more") # Analyze response status codes status_codes = {} for resp in responses: status = resp.get("status", 0) status_codes[status] = status_codes.get(status, 0) + 1 print("\nResponse status codes:") for status, count in sorted(status_codes.items()): print(f" - {status}: {count}") # Analyze failures if failures: print("\nFailed requests:") for i, failure in enumerate(failures[:5], 1): # Show first 5 print(f" {i}. {failure.get('url')} - {failure.get('failure_text')}") if len(failures) > 5: print(f" ... and {len(failures) - 5} more") # Check for console errors if result.console_messages: errors = [msg for msg in result.console_messages if msg.get("type") == "error"] if errors: print(f"\nDetected {len(errors)} console errors:") for i, error in enumerate(errors[:3], 1): # Show first 3 print(f" {i}. {error.get('text', '')[:100]}...") if len(errors) > 3: print(f" ... and {len(errors) - 3} more") # Save analysis to file output_file = os.path.join(__cur_dir__, "tmp", "weather_network_analysis.json") with open(output_file, "w") as f: json.dump({ "url": result.url, "timestamp": datetime.now().isoformat(), "statistics": { "request_count": len(requests), "response_count": len(responses), "failure_count": len(failures), "resource_types": resource_types, "status_codes": {str(k): v for k, v in status_codes.items()}, "api_call_count": len(api_calls), "console_error_count": len(errors) if result.console_messages else 0 }, "network_requests": result.network_requests, "console_messages": result.console_messages }, f, indent=2) print(f"\nFull analysis saved to {output_file}") async def demo_security_analysis(): """Using network capture for security analysis""" print("\n=== 5. Security Analysis with Network Capture ===") async with AsyncWebCrawler() as crawler: config = CrawlerRunConfig( capture_network_requests=True, capture_console_messages=True, wait_until="networkidle" ) # A site that makes multiple third-party requests result = await crawler.arun( url="https://www.nytimes.com/", config=config ) if result.success and result.network_requests: print(f"Captured {len(result.network_requests)} network events") # Extract all domains domains = set() for req in result.network_requests: if req.get("event_type") == "request": url = req.get("url", "") try: from urllib.parse import urlparse domain = urlparse(url).netloc if domain: domains.add(domain) except: pass print(f"\nDetected requests to {len(domains)} unique domains:") main_domain = urlparse(result.url).netloc # Separate first-party vs third-party domains first_party = [d for d in domains if main_domain in d] third_party = [d for d in domains if main_domain not in d] print(f" - First-party domains: {len(first_party)}") print(f" - Third-party domains: {len(third_party)}") # Look for potential trackers/analytics tracking_keywords = ["analytics", "tracker", "pixel", "tag", "stats", "metric", "collect", "beacon"] potential_trackers = [] for domain in third_party: if any(keyword in domain.lower() for keyword in tracking_keywords): potential_trackers.append(domain) if potential_trackers: print(f"\nPotential tracking/analytics domains ({len(potential_trackers)}):") for i, domain in enumerate(sorted(potential_trackers)[:10], 1): print(f" {i}. {domain}") if len(potential_trackers) > 10: print(f" ... and {len(potential_trackers) - 10} more") # Check for insecure (HTTP) requests insecure_requests = [ req.get("url") for req in result.network_requests if req.get("event_type") == "request" and req.get("url", "").startswith("http://") ] if insecure_requests: print(f"\nWarning: Found {len(insecure_requests)} insecure (HTTP) requests:") for i, url in enumerate(insecure_requests[:5], 1): print(f" {i}. {url}") if len(insecure_requests) > 5: print(f" ... and {len(insecure_requests) - 5} more") # Save security analysis to file output_file = os.path.join(__cur_dir__, "tmp", "security_analysis.json") with open(output_file, "w") as f: json.dump({ "url": result.url, "main_domain": main_domain, "timestamp": datetime.now().isoformat(), "analysis": { "total_requests": len([r for r in result.network_requests if r.get("event_type") == "request"]), "unique_domains": len(domains), "first_party_domains": first_party, "third_party_domains": third_party, "potential_trackers": potential_trackers, "insecure_requests": insecure_requests } }, f, indent=2) print(f"\nFull security analysis saved to {output_file}") async def demo_performance_analysis(): """Using network capture for performance analysis""" print("\n=== 6. Performance Analysis with Network Capture ===") async with AsyncWebCrawler() as crawler: config = CrawlerRunConfig( capture_network_requests=True, page_timeout=60 * 2 * 1000 # 120 seconds ) result = await crawler.arun( url="https://www.cnn.com/", config=config ) if result.success and result.network_requests: # Filter only response events with timing information responses_with_timing = [ r for r in result.network_requests if r.get("event_type") == "response" and r.get("request_timing") ] if responses_with_timing: print(f"Analyzing timing for {len(responses_with_timing)} network responses") # Group by resource type resource_timings = {} for resp in responses_with_timing: url = resp.get("url", "") timing = resp.get("request_timing", {}) # Determine resource type from URL extension ext = url.split(".")[-1].lower() if "." in url.split("/")[-1] else "unknown" if ext in ["jpg", "jpeg", "png", "gif", "webp", "svg", "ico"]: resource_type = "image" elif ext in ["js"]: resource_type = "javascript" elif ext in ["css"]: resource_type = "css" elif ext in ["woff", "woff2", "ttf", "otf", "eot"]: resource_type = "font" else: resource_type = "other" if resource_type not in resource_timings: resource_timings[resource_type] = [] # Calculate request duration if timing information is available if isinstance(timing, dict) and "requestTime" in timing and "receiveHeadersEnd" in timing: # Convert to milliseconds duration = (timing["receiveHeadersEnd"] - timing["requestTime"]) * 1000 resource_timings[resource_type].append({ "url": url, "duration_ms": duration }) if isinstance(timing, dict) and "requestStart" in timing and "responseStart" in timing and "startTime" in timing: # Convert to milliseconds duration = (timing["responseStart"] - timing["requestStart"]) * 1000 resource_timings[resource_type].append({ "url": url, "duration_ms": duration }) # Calculate statistics for each resource type print("\nPerformance by resource type:") for resource_type, timings in resource_timings.items(): if timings: durations = [t["duration_ms"] for t in timings] avg_duration = sum(durations) / len(durations) max_duration = max(durations) slowest_resource = next(t["url"] for t in timings if t["duration_ms"] == max_duration) print(f" {resource_type.upper()}:") print(f" - Count: {len(timings)}") print(f" - Avg time: {avg_duration:.2f} ms") print(f" - Max time: {max_duration:.2f} ms") print(f" - Slowest: {slowest_resource}") # Identify the slowest resources overall all_timings = [] for resource_type, timings in resource_timings.items(): for timing in timings: timing["type"] = resource_type all_timings.append(timing) all_timings.sort(key=lambda x: x["duration_ms"], reverse=True) print("\nTop 5 slowest resources:") for i, timing in enumerate(all_timings[:5], 1): print(f" {i}. [{timing['type']}] {timing['url']} - {timing['duration_ms']:.2f} ms") # Save performance analysis to file output_file = os.path.join(__cur_dir__, "tmp", "performance_analysis.json") with open(output_file, "w") as f: json.dump({ "url": result.url, "timestamp": datetime.now().isoformat(), "resource_timings": resource_timings, "slowest_resources": all_timings[:10] # Save top 10 }, f, indent=2) print(f"\nFull performance analysis saved to {output_file}") async def main(): """Run all demo functions sequentially""" print("=== Network and Console Capture Examples ===") # Make sure tmp directory exists os.makedirs(os.path.join(__cur_dir__, "tmp"), exist_ok=True) # Run basic examples # await demo_basic_network_capture() await demo_basic_console_capture() # await demo_combined_capture() # Run advanced examples # await analyze_spa_network_traffic() # await demo_security_analysis() # await demo_performance_analysis() print("\n=== Examples Complete ===") print(f"Check the tmp directory for output files: {os.path.join(__cur_dir__, 'tmp')}") if __name__ == "__main__": asyncio.run(main())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/proxy_rotation_demo.py
docs/examples/proxy_rotation_demo.py
import os import re from typing import List, Dict from crawl4ai import ( AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode, RoundRobinProxyStrategy ) def load_proxies_from_env() -> List[Dict]: """Load proxies from PROXIES environment variable""" proxies = [] try: proxy_list = os.getenv("PROXIES", "").split(",") for proxy in proxy_list: if not proxy: continue ip, port, username, password = proxy.split(":") proxies.append({ "server": f"http://{ip}:{port}", "username": username, "password": password, "ip": ip # Store original IP for verification }) except Exception as e: print(f"Error loading proxies from environment: {e}") return proxies async def demo_proxy_rotation(): """ Proxy Rotation Demo using RoundRobinProxyStrategy =============================================== Demonstrates proxy rotation using the strategy pattern. """ print("\n=== Proxy Rotation Demo (Round Robin) ===") # Load proxies and create rotation strategy proxies = load_proxies_from_env() if not proxies: print("No proxies found in environment. Set PROXIES env variable!") return proxy_strategy = RoundRobinProxyStrategy(proxies) # Create configs browser_config = BrowserConfig(headless=True, verbose=False) run_config = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, proxy_rotation_strategy=proxy_strategy ) # Test URLs urls = ["https://httpbin.org/ip"] * len(proxies) # Test each proxy once async with AsyncWebCrawler(config=browser_config) as crawler: for url in urls: result = await crawler.arun(url=url, config=run_config) if result.success: # Extract IP from response ip_match = re.search(r'(?:[0-9]{1,3}\.){3}[0-9]{1,3}', result.html) current_proxy = run_config.proxy_config if run_config.proxy_config else None if current_proxy: print(f"Proxy {current_proxy['server']} -> Response IP: {ip_match.group(0) if ip_match else 'Not found'}") verified = ip_match and ip_match.group(0) == current_proxy['ip'] if verified: print(f"βœ… Proxy working! IP matches: {current_proxy['ip']}") else: print("❌ Proxy failed or IP mismatch!") else: print(f"Request failed: {result.error_message}") async def demo_proxy_rotation_batch(): """ Proxy Rotation Demo with Batch Processing ======================================= Demonstrates proxy rotation using arun_many with memory dispatcher. """ print("\n=== Proxy Rotation Batch Demo ===") try: # Load proxies and create rotation strategy proxies = load_proxies_from_env() if not proxies: print("No proxies found in environment. Set PROXIES env variable!") return proxy_strategy = RoundRobinProxyStrategy(proxies) # Configurations browser_config = BrowserConfig(headless=True, verbose=False) run_config = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, proxy_rotation_strategy=proxy_strategy, markdown_generator=DefaultMarkdownGenerator() ) # Test URLs - multiple requests to test rotation urls = ["https://httpbin.org/ip"] * (len(proxies) * 2) # Test each proxy twice print("\nπŸ“ˆ Initializing crawler with proxy rotation...") async with AsyncWebCrawler(config=browser_config) as crawler: monitor = CrawlerMonitor( max_visible_rows=10, display_mode=DisplayMode.DETAILED ) dispatcher = MemoryAdaptiveDispatcher( memory_threshold_percent=80.0, check_interval=0.5, max_session_permit=1, #len(proxies), # Match concurrent sessions to proxy count # monitor=monitor ) print("\nπŸš€ Starting batch crawl with proxy rotation...") results = await crawler.arun_many( urls=urls, config=run_config, dispatcher=dispatcher ) # Verify results success_count = 0 for result in results: if result.success: ip_match = re.search(r'(?:[0-9]{1,3}\.){3}[0-9]{1,3}', result.html) current_proxy = run_config.proxy_config if run_config.proxy_config else None if current_proxy and ip_match: print(f"URL {result.url}") print(f"Proxy {current_proxy['server']} -> Response IP: {ip_match.group(0)}") verified = ip_match.group(0) == current_proxy['ip'] if verified: print(f"βœ… Proxy working! IP matches: {current_proxy['ip']}") success_count += 1 else: print("❌ Proxy failed or IP mismatch!") print("---") print(f"\nβœ… Completed {len(results)} requests with {success_count} successful proxy verifications") except Exception as e: print(f"\n❌ Error in proxy rotation batch demo: {str(e)}") if __name__ == "__main__": import asyncio from crawl4ai import ( CrawlerMonitor, DisplayMode, MemoryAdaptiveDispatcher, DefaultMarkdownGenerator ) async def run_demos(): # await demo_proxy_rotation() # Original single-request demo await demo_proxy_rotation_batch() # New batch processing demo asyncio.run(run_demos())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/amazon_product_extraction_direct_url.py
docs/examples/amazon_product_extraction_direct_url.py
""" This example demonstrates how to use JSON CSS extraction to scrape product information from Amazon search results. It shows how to extract structured data like product titles, prices, ratings, and other details using CSS selectors. """ from crawl4ai import AsyncWebCrawler from crawl4ai import JsonCssExtractionStrategy from crawl4ai.async_configs import BrowserConfig, CrawlerRunConfig import json async def extract_amazon_products(): # Initialize browser config browser_config = BrowserConfig(browser_type="chromium", headless=True) # Initialize crawler config with JSON CSS extraction strategy crawler_config = CrawlerRunConfig( extraction_strategy=JsonCssExtractionStrategy( schema={ "name": "Amazon Product Search Results", "baseSelector": "[data-component-type='s-search-result']", "fields": [ { "name": "asin", "selector": "", "type": "attribute", "attribute": "data-asin", }, {"name": "title", "selector": "h2 a span", "type": "text"}, { "name": "url", "selector": "h2 a", "type": "attribute", "attribute": "href", }, { "name": "image", "selector": ".s-image", "type": "attribute", "attribute": "src", }, { "name": "rating", "selector": ".a-icon-star-small .a-icon-alt", "type": "text", }, { "name": "reviews_count", "selector": "[data-csa-c-func-deps='aui-da-a-popover'] ~ span span", "type": "text", }, { "name": "price", "selector": ".a-price .a-offscreen", "type": "text", }, { "name": "original_price", "selector": ".a-price.a-text-price .a-offscreen", "type": "text", }, { "name": "sponsored", "selector": ".puis-sponsored-label-text", "type": "exists", }, { "name": "delivery_info", "selector": "[data-cy='delivery-recipe'] .a-color-base", "type": "text", "multiple": True, }, ], } ) ) # Example search URL (you should replace with your actual Amazon URL) url = "https://www.amazon.com/s?k=Samsung+Galaxy+Tab" # Use context manager for proper resource handling async with AsyncWebCrawler(config=browser_config) as crawler: # Extract the data result = await crawler.arun(url=url, config=crawler_config) # Process and print the results if result and result.extracted_content: # Parse the JSON string into a list of products products = json.loads(result.extracted_content) # Process each product in the list for product in products: print("\nProduct Details:") print(f"ASIN: {product.get('asin')}") print(f"Title: {product.get('title')}") print(f"Price: {product.get('price')}") print(f"Original Price: {product.get('original_price')}") print(f"Rating: {product.get('rating')}") print(f"Reviews: {product.get('reviews_count')}") print(f"Sponsored: {'Yes' if product.get('sponsored') else 'No'}") if product.get("delivery_info"): print(f"Delivery: {' '.join(product['delivery_info'])}") print("-" * 80) if __name__ == "__main__": import asyncio asyncio.run(extract_amazon_products())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/docs/examples/hello_world_undetected.py
docs/examples/hello_world_undetected.py
import asyncio from crawl4ai import ( AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, DefaultMarkdownGenerator, PruningContentFilter, CrawlResult, UndetectedAdapter ) from crawl4ai.async_crawler_strategy import AsyncPlaywrightCrawlerStrategy async def main(): # Create browser config browser_config = BrowserConfig( headless=False, verbose=True, ) # Create the undetected adapter undetected_adapter = UndetectedAdapter() # Create the crawler strategy with the undetected adapter crawler_strategy = AsyncPlaywrightCrawlerStrategy( browser_config=browser_config, browser_adapter=undetected_adapter ) # Create the crawler with our custom strategy async with AsyncWebCrawler( crawler_strategy=crawler_strategy, config=browser_config ) as crawler: # Configure the crawl crawler_config = CrawlerRunConfig( markdown_generator=DefaultMarkdownGenerator( content_filter=PruningContentFilter() ), capture_console_messages=True, # Enable console capture to test adapter ) # Test on a site that typically detects bots print("Testing undetected adapter...") result: CrawlResult = await crawler.arun( url="https://www.helloworld.org", config=crawler_config ) print(f"Status: {result.status_code}") print(f"Success: {result.success}") print(f"Console messages captured: {len(result.console_messages or [])}") print(f"Markdown content (first 500 chars):\n{result.markdown.raw_markdown[:500]}") if __name__ == "__main__": asyncio.run(main())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false