repo
stringlengths
7
90
file_url
stringlengths
81
315
file_path
stringlengths
4
228
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 14:38:15
2026-01-05 02:33:18
truncated
bool
2 classes
FoundationAgents/MetaGPT
https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/exp_pool/scorers/simple.py
metagpt/exp_pool/scorers/simple.py
"""Simple scorer.""" import json from pydantic import Field from metagpt.exp_pool.schema import Score from metagpt.exp_pool.scorers.base import BaseScorer from metagpt.llm import LLM from metagpt.provider.base_llm import BaseLLM from metagpt.utils.common import CodeParser SIMPLE_SCORER_TEMPLATE = """ Role: You are a highly efficient assistant, tasked with evaluating a response to a given request. The response is generated by a large language model (LLM). I will provide you with a request and a corresponding response. Your task is to assess this response and provide a score from a human perspective. ## Context ### Request {req} ### Response {resp} ## Format Example ```json {{ "val": "the value of the score, int from 1 to 10, higher is better.", "reason": "an explanation supporting the score." }} ``` ## Instructions - Understand the request and response given by the user. - Evaluate the response based on its quality relative to the given request. - Provide a score from 1 to 10, where 10 is the best. - Provide a reason supporting your score. ## Constraint Format: Just print the result in json format like **Format Example**. ## Action Follow instructions, generate output and make sure it follows the **Constraint**. """ class SimpleScorer(BaseScorer): llm: BaseLLM = Field(default_factory=LLM) async def evaluate(self, req: str, resp: str) -> Score: """Evaluates the quality of a response relative to a given request, as scored by an LLM. Args: req (str): The request. resp (str): The response. Returns: Score: An object containing the score (1-10) and the reasoning. """ prompt = SIMPLE_SCORER_TEMPLATE.format(req=req, resp=resp) resp = await self.llm.aask(prompt) resp_json = json.loads(CodeParser.parse_code(resp, lang="json")) return Score(**resp_json)
python
MIT
fc6e8433747be02826dec818627ed5cec0950e77
2026-01-04T14:38:37.890126Z
false
FoundationAgents/MetaGPT
https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/exp_pool/scorers/__init__.py
metagpt/exp_pool/scorers/__init__.py
"""Scorers init.""" from metagpt.exp_pool.scorers.base import BaseScorer from metagpt.exp_pool.scorers.simple import SimpleScorer __all__ = ["BaseScorer", "SimpleScorer"]
python
MIT
fc6e8433747be02826dec818627ed5cec0950e77
2026-01-04T14:38:37.890126Z
false
FoundationAgents/MetaGPT
https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/exp_pool/scorers/base.py
metagpt/exp_pool/scorers/base.py
"""Base scorer.""" from abc import ABC, abstractmethod from pydantic import BaseModel, ConfigDict from metagpt.exp_pool.schema import Score class BaseScorer(BaseModel, ABC): model_config = ConfigDict(arbitrary_types_allowed=True) @abstractmethod async def evaluate(self, req: str, resp: str) -> Score: """Evaluates the quality of a response relative to a given request."""
python
MIT
fc6e8433747be02826dec818627ed5cec0950e77
2026-01-04T14:38:37.890126Z
false
FoundationAgents/MetaGPT
https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/exp_pool/context_builders/role_zero.py
metagpt/exp_pool/context_builders/role_zero.py
"""RoleZero context builder.""" import copy from typing import Any from metagpt.const import EXPERIENCE_MASK from metagpt.exp_pool.context_builders.base import BaseContextBuilder class RoleZeroContextBuilder(BaseContextBuilder): async def build(self, req: Any) -> list[dict]: """Builds the role zero context string. Note: 1. The expected format for `req`, e.g., [{...}, {"role": "user", "content": "context"}]. 2. Returns the original `req` if it is empty. 3. Creates a copy of req and replaces the example content in the copied req with actual experiences. """ if not req: return req exps = self.format_exps() if not exps: return req req_copy = copy.deepcopy(req) req_copy[-1]["content"] = self.replace_example_content(req_copy[-1].get("content", ""), exps) return req_copy def replace_example_content(self, text: str, new_example_content: str) -> str: return self.fill_experience(text, new_example_content) @staticmethod def fill_experience(text: str, new_example_content: str) -> str: replaced_text = text.replace(EXPERIENCE_MASK, new_example_content) return replaced_text
python
MIT
fc6e8433747be02826dec818627ed5cec0950e77
2026-01-04T14:38:37.890126Z
false
FoundationAgents/MetaGPT
https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/exp_pool/context_builders/simple.py
metagpt/exp_pool/context_builders/simple.py
"""Simple context builder.""" from typing import Any from metagpt.exp_pool.context_builders.base import BaseContextBuilder SIMPLE_CONTEXT_TEMPLATE = """ ## Context ### Experiences ----- {exps} ----- ## User Requirement {req} ## Instruction Consider **Experiences** to generate a better answer. """ class SimpleContextBuilder(BaseContextBuilder): async def build(self, req: Any) -> str: return SIMPLE_CONTEXT_TEMPLATE.format(req=req, exps=self.format_exps())
python
MIT
fc6e8433747be02826dec818627ed5cec0950e77
2026-01-04T14:38:37.890126Z
false
FoundationAgents/MetaGPT
https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/exp_pool/context_builders/__init__.py
metagpt/exp_pool/context_builders/__init__.py
"""Context builders init.""" from metagpt.exp_pool.context_builders.base import BaseContextBuilder from metagpt.exp_pool.context_builders.simple import SimpleContextBuilder from metagpt.exp_pool.context_builders.role_zero import RoleZeroContextBuilder __all__ = ["BaseContextBuilder", "SimpleContextBuilder", "RoleZeroContextBuilder"]
python
MIT
fc6e8433747be02826dec818627ed5cec0950e77
2026-01-04T14:38:37.890126Z
false
FoundationAgents/MetaGPT
https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/exp_pool/context_builders/base.py
metagpt/exp_pool/context_builders/base.py
"""Base context builder.""" from abc import ABC, abstractmethod from typing import Any from pydantic import BaseModel, ConfigDict from metagpt.exp_pool.schema import Experience EXP_TEMPLATE = """Given the request: {req}, We can get the response: {resp}, which scored: {score}.""" class BaseContextBuilder(BaseModel, ABC): model_config = ConfigDict(arbitrary_types_allowed=True) exps: list[Experience] = [] @abstractmethod async def build(self, req: Any) -> Any: """Build context from req. Do not modify `req`. If modification is necessary, use copy.deepcopy to create a copy first. """ def format_exps(self) -> str: """Format experiences into a numbered list of strings. Example: 1. Given the request: req1, We can get the response: resp1, which scored: 8. 2. Given the request: req2, We can get the response: resp2, which scored: 9. Returns: str: The formatted experiences as a string. """ result = [] for i, exp in enumerate(self.exps, start=1): score_val = exp.metric.score.val if exp.metric and exp.metric.score else "N/A" result.append(f"{i}. " + EXP_TEMPLATE.format(req=exp.req, resp=exp.resp, score=score_val)) return "\n".join(result)
python
MIT
fc6e8433747be02826dec818627ed5cec0950e77
2026-01-04T14:38:37.890126Z
false
FoundationAgents/MetaGPT
https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/exp_pool/context_builders/action_node.py
metagpt/exp_pool/context_builders/action_node.py
"""Action Node context builder.""" from typing import Any from metagpt.exp_pool.context_builders.base import BaseContextBuilder ACTION_NODE_CONTEXT_TEMPLATE = """ {req} ### Experiences ----- {exps} ----- ## Instruction Consider **Experiences** to generate a better answer. """ class ActionNodeContextBuilder(BaseContextBuilder): async def build(self, req: Any) -> str: """Builds the action node context string. If there are no experiences, returns the original `req`; otherwise returns context with `req` and formatted experiences. """ exps = self.format_exps() return ACTION_NODE_CONTEXT_TEMPLATE.format(req=req, exps=exps) if exps else req
python
MIT
fc6e8433747be02826dec818627ed5cec0950e77
2026-01-04T14:38:37.890126Z
false
FoundationAgents/MetaGPT
https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/strategy/task_type.py
metagpt/strategy/task_type.py
from enum import Enum from pydantic import BaseModel from metagpt.prompts.task_type import ( DATA_PREPROCESS_PROMPT, EDA_PROMPT, FEATURE_ENGINEERING_PROMPT, IMAGE2WEBPAGE_PROMPT, MODEL_EVALUATE_PROMPT, MODEL_TRAIN_PROMPT, WEB_SCRAPING_PROMPT, ) class TaskTypeDef(BaseModel): name: str desc: str = "" guidance: str = "" class TaskType(Enum): """By identifying specific types of tasks, we can inject human priors (guidance) to help task solving""" EDA = TaskTypeDef( name="eda", desc="For performing exploratory data analysis", guidance=EDA_PROMPT, ) DATA_PREPROCESS = TaskTypeDef( name="data preprocessing", desc="For preprocessing dataset in a data analysis or machine learning task ONLY," "general data operation doesn't fall into this type", guidance=DATA_PREPROCESS_PROMPT, ) FEATURE_ENGINEERING = TaskTypeDef( name="feature engineering", desc="Only for creating new columns for input data.", guidance=FEATURE_ENGINEERING_PROMPT, ) MODEL_TRAIN = TaskTypeDef( name="model train", desc="Only for training model.", guidance=MODEL_TRAIN_PROMPT, ) MODEL_EVALUATE = TaskTypeDef( name="model evaluate", desc="Only for evaluating model.", guidance=MODEL_EVALUATE_PROMPT, ) IMAGE2WEBPAGE = TaskTypeDef( name="image2webpage", desc="For converting image into webpage code.", guidance=IMAGE2WEBPAGE_PROMPT, ) OTHER = TaskTypeDef(name="other", desc="Any tasks not in the defined categories") # Legacy TaskType to support tool recommendation using type match. You don't need to define task types if you have no human priors to inject. TEXT2IMAGE = TaskTypeDef( name="text2image", desc="Related to text2image, image2image using stable diffusion model.", ) WEBSCRAPING = TaskTypeDef( name="web scraping", desc="For scraping data from web pages.", guidance=WEB_SCRAPING_PROMPT, ) EMAIL_LOGIN = TaskTypeDef( name="email login", desc="For logging to an email.", ) DEVELOP_SOFTWARE = TaskTypeDef( name="develop software", desc="SOP related to develop software such as Writes a PRD, Writes a design, Writes a project plan and Writes code to implement designed features according to the project plan", ) @property def type_name(self): return self.value.name @classmethod def get_type(cls, type_name): for member in cls: if member.type_name == type_name: return member.value return None
python
MIT
fc6e8433747be02826dec818627ed5cec0950e77
2026-01-04T14:38:37.890126Z
false
FoundationAgents/MetaGPT
https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/strategy/search_space.py
metagpt/strategy/search_space.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """ @Time : 2024/1/30 17:15 @Author : alexanderwu @File : search_space.py """ class SearchSpace: """SearchSpace: 用于定义一个搜索空间,搜索空间中的节点是 ActionNode 类。""" def __init__(self): self.search_space = {} def add_node(self, node): self.search_space[node.key] = node def get_node(self, key): return self.search_space[key]
python
MIT
fc6e8433747be02826dec818627ed5cec0950e77
2026-01-04T14:38:37.890126Z
false
FoundationAgents/MetaGPT
https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/strategy/tot.py
metagpt/strategy/tot.py
# -*- coding: utf-8 -*- # @Date : 12/23/2023 4:51 PM # @Author : stellahong (stellahong@fuzhi.ai) # @Desc : from __future__ import annotations import asyncio from typing import Any, List, Optional from pydantic import BaseModel, ConfigDict, Field from metagpt.llm import LLM from metagpt.logs import logger from metagpt.provider.base_llm import BaseLLM from metagpt.strategy.base import ThoughtNode, ThoughtTree from metagpt.strategy.tot_schema import MethodSelect, Strategy, ThoughtSolverConfig from metagpt.utils.common import CodeParser OUTPUT_FORMAT = """ Each output should be strictly a list of nodes, in json format, like this: ```json [ { "node_id": str = "unique identifier for a solution, can be an ordinal", "node_state_instruction": "specified sample of solution", }, ... ] ``` """ class ThoughtSolverBase(BaseModel): model_config = ConfigDict(arbitrary_types_allowed=True) thought_tree: Optional[ThoughtTree] = Field(default=None) llm: BaseLLM = Field(default_factory=LLM, exclude=True) config: ThoughtSolverConfig = Field(default_factory=ThoughtSolverConfig) def __init__(self, **kwargs: Any): super().__init__(**kwargs) self.llm.use_system_prompt = False async def solve(self, init_prompt): """ Solve method for subclasses to implement. """ raise NotImplementedError("Subclasses must implement the solve method") async def generate_thoughts(self, current_state="", current_node=None) -> List[ThoughtNode]: """ Generate children thoughts based on the current state. Args: current_state (str): The current state for which thoughts are generated. current_node (ThoughtNode): The current node in the thought tree. Returns: List[ThoughtNode]: List of nodes representing the generated thoughts. """ state_prompt = self.config.parser.propose( current_state=current_state, **{"n_generate_sample": self.config.n_generate_sample} ) rsp = await self.llm.aask(msg=state_prompt + "\n" + OUTPUT_FORMAT) thoughts = CodeParser.parse_code(text=rsp) thoughts = eval(thoughts) # fixme 避免不跟随,生成过多nodes # valid_thoughts = [_node for idx, _node in enumerate(thoughts) if idx < self.n_generate_sample] return self.thought_tree.update_node(thoughts, current_node=current_node) async def evaluate_node(self, node, parent_value) -> None: """ Evaluate a node and update its status and value. Args: node (ThoughtNode): The node to be evaluated. parent_value (float): The parent node's value. Returns: None """ eval_prompt = self.config.parser.value(input=node.name, **{"node_id": node.id}) evaluation = await self.llm.aask(msg=eval_prompt) value = self.config.evaluator(evaluation, **{"node_id": node.id}) status = self.config.evaluator.status_verify(value) node.update_valid_status(status=status) # 累计分数 node.update_value(parent_value + value) def select_nodes(self, thought_nodes: List[ThoughtNode]) -> List[ThoughtNode]: """ Select nodes based on the configured selection method. Args: thought_nodes (List[ThoughtNode]): List of nodes to be selected. Returns: List[ThoughtNode]: List of selected nodes. """ # nodes to be selected nodes = [] if self.config.method_select == MethodSelect.SAMPLE: raise NotImplementedError elif self.config.method_select == MethodSelect.GREEDY: nodes = sorted(thought_nodes, key=lambda x: x.value, reverse=True)[: self.config.n_select_sample] for node in thought_nodes: if node not in nodes: node.parent = None # 从树中删除节点 return nodes def update_solution(self): """ Select the result with the highest score. Returns: - List[ThoughtNode]: List of nodes representing the best solution. - List[str]: List of node names forming the best solution path. """ best_node = max(self.thought_tree.all_nodes, key=lambda x: x.value, default=None) best_solution_path = self.thought_tree.parse_node_path(best_node) return [best_node], best_solution_path class BFSSolver(ThoughtSolverBase): async def solve(self, init_prompt=""): """ Solve the problem using Breadth-First Search (BFS) strategy. Args: init_prompt (str): The initial prompt for the solver. Returns: List[str]: The best solution path obtained through BFS. """ root = ThoughtNode(init_prompt) self.thought_tree = ThoughtTree(root) current_nodes = [root] for step in range(self.config.max_steps): solutions = await self._bfs_build(current_nodes) selected_nodes = self.select_nodes(solutions) current_nodes = selected_nodes self.thought_tree.show() best_solution, best_solution_path = self.update_solution() logger.info(f"best solution is: {best_solution_path}") return best_solution_path async def _bfs_build(self, current_nodes): """ Build the thought tree using Breadth-First Search (BFS) strategy. Args: current_nodes (List[ThoughtNode]): Current nodes to expand. Returns: List[ThoughtNode]: The solutions obtained after expanding the current nodes. """ tasks = [] for node in current_nodes: current_state = self.config.parser(node.name) current_value = node.value tasks.append(self.generate_and_evaluate_nodes(current_state, current_value, node)) thought_nodes_list = await asyncio.gather(*tasks) solutions = [child_node for thought_nodes in thought_nodes_list for child_node in thought_nodes] return solutions async def generate_and_evaluate_nodes(self, current_state, current_value, node): thought_nodes = await self.generate_thoughts(current_state, current_node=node) await asyncio.gather( *(self.evaluate_node(child_node, parent_value=current_value) for child_node in thought_nodes) ) return thought_nodes class DFSSolver(ThoughtSolverBase): async def _dfs(self, root_node): """ Perform Depth-First Search (DFS) on the thought tree. Args: root_node (ThoughtNode): The root node of the thought tree. Returns: List[str]: The solution path obtained through DFS. """ impossible_state_cnt = 0 node = root_node for step in range(self.max_steps): current_state = self.config.parser(node.name) current_value = node.value thought_nodes = await self.generate_thoughts(current_state, current_node=node) await self.evaluate_node(thought_nodes[0], parent_value=current_value) if thought_nodes[0].valid_status is False: impossible_state_cnt += 1 if impossible_state_cnt >= 2: logger.info("impossible state reached, break") break node = thought_nodes[0] _solution_path = self.thought_tree.parse_node_path(node) self.thought_tree.show() return _solution_path async def solve(self, init_prompt="", root=ThoughtNode("")): """ Solve the problem using Depth-First Search (DFS) strategy. Args: init_prompt (str): The initial prompt for the solver. Returns: List[str]: The best solution path obtained through DFS. """ root = ThoughtNode(init_prompt) self.thought_tree = ThoughtTree(root) for n in range(self.config.n_solution_sample): # fixme: 需要产生回退,当前节点不可用时回退到父节点,产生新的节点继续探索 await self._dfs(root) best_solution, best_solution_path = self.update_solution() logger.info(f"best solution is: {best_solution_path}") return best_solution_path class MCTSSolver(ThoughtSolverBase): async def solve(self, init_prompt=""): raise NotImplementedError class TreeofThought(BaseModel): config: ThoughtSolverConfig = Field(default_factory=ThoughtSolverConfig) solver: ThoughtSolverBase = Field(default_factory=ThoughtSolverBase) strategy: Strategy = Field(default=Strategy.BFS) class Config: arbitrary_types_allowed = True def __init__(self, **kwargs: Any): super().__init__(**kwargs) self._initialize_solver(self.strategy) def _initialize_solver(self, strategy): """ Initialize the solver based on the chosen strategy. Args: strategy (Strategy): The strategy to use for solving. Returns: ThoughtSolverBase: An instance of the appropriate solver. """ if strategy == Strategy.BFS: self.solver = BFSSolver(config=self.config) elif strategy == Strategy.DFS: self.solver = DFSSolver(config=self.config) elif strategy == Strategy.MCTS: self.solver = MCTSSolver(config=self.config) else: raise NotImplementedError(f"Invalid strategy: {strategy}, only support BFS/DFS/MCTS currently!") async def solve(self, init_prompt=""): """ Solve the problem using the specified strategy. Args: init_prompt (str): The initial prompt for the solver. strategy (str): The strategy to use for solving. Returns: Any: The solution obtained using the selected strategy. """ await self.solver.solve(init_prompt)
python
MIT
fc6e8433747be02826dec818627ed5cec0950e77
2026-01-04T14:38:37.890126Z
false
FoundationAgents/MetaGPT
https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/strategy/tot_schema.py
metagpt/strategy/tot_schema.py
# -*- coding: utf-8 -*- # @Date : 12/25/2023 9:14 PM # @Author : stellahong (stellahong@fuzhi.ai) # @Desc : from enum import Enum from pydantic import BaseModel, Field from metagpt.strategy.base import BaseEvaluator, BaseParser class MethodSelect(Enum): SAMPLE = "sample" GREEDY = "greedy" class Strategy(Enum): BFS = "BFS" DFS = "DFS" MCTS = "MCTS" class ThoughtSolverConfig(BaseModel): max_steps: int = 3 method_select: str = MethodSelect.GREEDY # ["sample"/"greedy"] n_generate_sample: int = 5 # per node n_select_sample: int = 3 # per path n_solution_sample: int = 5 # only for dfs parser: BaseParser = Field(default_factory=BaseParser) evaluator: BaseEvaluator = Field(default_factory=BaseEvaluator)
python
MIT
fc6e8433747be02826dec818627ed5cec0950e77
2026-01-04T14:38:37.890126Z
false
FoundationAgents/MetaGPT
https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/strategy/experience_retriever.py
metagpt/strategy/experience_retriever.py
from typing import Literal from pydantic import BaseModel class ExpRetriever(BaseModel): """interface for experience retriever""" def retrieve(self, context: str = "") -> str: raise NotImplementedError class DummyExpRetriever(ExpRetriever): """A dummy experience retriever that returns empty string.""" def retrieve(self, context: str = "") -> str: return self.EXAMPLE EXAMPLE: str = "" class TRDAllExpRetriever(ExpRetriever): def retrieve(self, context: str = "") -> str: return self.EXAMPLE EXAMPLE: str = """ ## example 1 User Requirement: Given some user requirements, write a software framework. Explanation: Given a complete user requirement, to write a TRD and software framework, you must follow all of the following steps to complete the TRD output required by the user: 1. Call 'write_trd' to generate TRD; 2. Call 'write_framework' to implement TRD into the software framework. ```json [ { "command_name": "write_trd_and_framework", "task_id": "1", "dependent_task_ids": [], "instruction": "Execute `write_trd_and_framework` to write a TRD and software framework based on user requirements", "args": { "user_requirements": "This is user requirement balabala...", "use_case_actors": "These are actors involved in the use case, balabala...", "additional_technical_requirements": "These are additional technical requirements, balabala..." } } ] ``` ## example 2 User Requirement: Given some user requirements, write a software framework. Explanation: Given a complete user requirement, to write a software framework, you must follow all of the following steps to complete the TRD output required by the user: 1. Call 'write_trd' to generate TRD; 2. Call 'write_framework' to implement TRD into the software framework. ```json [ { "command_name": "write_trd", "task_id": "1", "dependent_task_ids": [], "instruction": "Execute `write_trd` to write the TRD based on user requirements", "args": { "user_requirements": "This is user requirement balabala...", "use_case_actors": "These are actors involved in the use case, balabala...", } }, { "command_name": "write_framework", "task_id": "2", "dependent_task_ids": ["1"], "instruction": "Execute `write_framework` to write the framework based on the TRD", "args": { "use_case_actors": "These are actors involved in the use case, balabala...", "trd": "<trd> returned by `write_trd`", "additional_technical_requirements": "These are additional technical requirements, balabala..." } } ] ``` ## example 3 User Requirement: Given some user requirements, write a TRD, and implement the TRD within a software framework. Explanation: Given a complete requirement, 要写TRD需要follow如下步骤: 1. 调用`CompressExternalInterfaces.run`,从acknowledgement中抽取external interfaces的信息; 2. 按顺序执行如下步骤: 2.1. 执行`DetectInteraction.run`; 2.2. 执行`WriteTRD.run`; 2.3. 执行`EvaluateTRD.run`; 2.4. 检查`EvaluateTRD.run`的结果: 2.4.1. 如果`EvaluateTRD.run`的结果被判定为pass,则执行步骤3; 2.4.2. 如果`EvaluateTRD.run`的结果被判定为deny,则继续执行步骤2; 3. 按顺序执行如下步骤: 3.1. 执行`WriteFramework.run`; 3.2. 执行`EvaluateFramework.run`; 3.3. 检查`EvaluateFramework.run`的结果: 3.3.1. 如果`EvaluateFramework.run`的结果被判定为pass,则执行步骤4; 3.3.2. 如果`EvaluateFramework.run`的结果被判定为deny,则继续执行步骤3; 3.3.3. 如果已经重复执行步骤3超过9次,则执行步骤4; 4. 执行`save_framework`,将`WriteFramework.run`的结果保存下来; ```json [ { "command_name": "CompressExternalInterfaces.run", "args": { "task_id": "1", "dependent_task_ids": [], "instruction": "Execute `DetectInteraction.run` to extract external interfaces information from acknowledgement.", "acknowledge": "## Interfaces\n balabala..." } }, { "command_name": "DetectInteraction.run", "args": { "task_id": "2", "dependent_task_ids": ["1"], "instruction": "Execute `DetectInteraction.run` to extract external interfaces information from acknowledgement.", "user_requirements": "This is user requirement balabala...", "use_case_actors": "These are actors involved in the use case, balabala...", } }, { "command_name": "WriteTRD.run", "args": { "task_id": "3", "dependent_task_ids": ["2"], "instruction": "Execute `WriteTRD.run` to write TRD", "user_requirements": "This is user requirement balabala...", "use_case_actors": "These are actors involved in the use case, balabala...", "available_external_interfaces": "<compressed_external_interfaces_output> returned by `CompressExternalInterfaces.run`", "interaction_events": "<detected_interaction_events_output> returned by `DetectInteraction.run`" } }, { "command_name": "EvaluateTRD.run", "args": { "task_id": "4", "dependent_task_ids": ["3"], "instruction": "Execute `EvaluateTRD.run` to evaluate the TRD", "user_requirements": "This is user requirement balabala...", "use_case_actors": "These are actors involved in the use case, balabala...", "available_external_interfaces": "<compressed_external_interfaces_output> returned by `CompressExternalInterfaces.run`", "interaction_events": "<detected_interaction_events_output>", "trd": "<trd> returned by `EvaluateTRD.run`" } }, { "command_name": "DetectInteraction.run", "args": { "task_id": "5", "dependent_task_ids": ["4"], "instruction": "Execute `DetectInteraction.run` to extract external interfaces information from acknowledgement.", "user_requirements": "This is user requirement balabala...", "use_case_actors": "These are actors involved in the use case, balabala...", "evaluation_conclusion": "<evaluation_conclusion> returned by `EvaluateTRD.run`" } }, { "command_name": "WriteTRD.run", "args": { "task_id": "6", "dependent_task_ids": ["5"], "instruction": "Execute `WriteTRD.run` to write TRD", "user_requirements": "This is user requirement balabala...", "use_case_actors": "These are actors involved in the use case, balabala...", "available_external_interfaces": "<compressed_external_interfaces_output> returned by `CompressExternalInterfaces.run`", "interaction_events": "<detected_interaction_events_output> returned by `DetectInteraction.run`", "previous_version_trd": "<trd> returned by `WriteTRD.run`" } }, { "command_name": "EvaluateTRD.run", "args": { "task_id": "7", "dependent_task_ids": ["6"], "instruction": "Execute `EvaluateTRD.run` to evaluate the TRD", "user_requirements": "This is user requirement balabala...", "use_case_actors": "These are actors involved in the use case, balabala...", "available_external_interfaces": "<compressed_external_interfaces_output> returned by `CompressExternalInterfaces.run`", "interaction_events": "<detected_interaction_events_output> returned by `DetectInteraction.run`", "trd": "<trd> returned by `WriteTRD.run`", } }, { "command_name": "WriteFramework.run", "args": { "task_id": "8", "dependent_task_ids": ["7"], "instruction": "Execute `WriteFramework.run` to write a software framework according to the TRD", "use_case_actors": "These are actors involved in the use case, balabala...", "trd": "<trd> returned by `WriteTRD.run`", "acknowledge": "## Interfaces\n balabala...", "additional_technical_requirements": "These are additional technical requirements, balabala...", } }, { "command_name": "EvaluateFramework.run", "args": { "task_id": "9", "dependent_task_ids": ["8"], "instruction": "Execute `EvaluateFramework.run` to evaluate the software framework returned by `WriteFramework.run`", "use_case_actors": "These are actors involved in the use case, balabala...", "trd": "<trd> returned by `WriteTRD.run`", "acknowledge": "## Interfaces\n balabala...", "legacy_output": "<framework> returned by `WriteFramework.run`", "additional_technical_requirements": "These are additional technical requirements, balabala...", } }, { "command_name": "WriteFramework.run", "args": { "task_id": "10", "dependent_task_ids": ["9"], "instruction": "Execute `WriteFramework.run` to write a software framework according to the TRD", "use_case_actors": "These are actors involved in the use case, balabala...", "trd": "<trd> returned by `WriteTRD.run`", "acknowledge": "## Interfaces\n balabala...", "additional_technical_requirements": "These are additional technical requirements, balabala...", } }, { "command_name": "EvaluateFramework.run", "args": { "task_id": "11", "dependent_task_ids": ["10"], "instruction": "Execute `EvaluateFramework.run` to evaluate the software framework returned by `WriteFramework.run`", "use_case_actors": "These are actors involved in the use case, balabala...", "trd": "<trd> returned by `WriteTRD.run`", "acknowledge": "## Interfaces\n balabala...", "legacy_output": "<framework> returned by `WriteFramework.run`", "additional_technical_requirements": "These are additional technical requirements, balabala...", } }, { "command_name": "save_framework", "args": { "task_id": "12", "dependent_task_ids": ["11"], "instruction": "Execute `save_framework` to save the software framework returned by `WriteFramework.run`", "dir_data": "<framework> returned by `WriteFramework.run`", } } ] ``` """ class TRDToolExpRetriever(ExpRetriever): """A TRD-related experience retriever that returns empty string.""" def retrieve(self, context: str = "") -> str: return self.EXAMPLE EXAMPLE: str = """ ## example 1 User Requirement: Given some user requirements, write a software framework. Explanation: Given a complete user requirement, to write a TRD and software framework, you must follow all of the following steps to complete the TRD output required by the user: 1. Call 'write_trd' to generate TRD; 2. Call 'write_framework' to implement TRD into the software framework. ```json [ { "command_name": "write_trd_and_framework", "task_id": "1", "dependent_task_ids": [], "instruction": "Execute `write_trd_and_framework` to write a TRD and software framework based on user requirements", "args": { "user_requirements": "This is user requirement balabala...", "use_case_actors": "These are actors involved in the use case, balabala...", "additional_technical_requirements": "These are additional technical requirements, balabala..." } } ] """ # EXAMPLE: str = """ # ## example 1 # User Requirement: Given some user requirements, write a software framework. # Explanation: Given a complete user requirement, to write a software framework, you must follow all of the following steps to complete the TRD output required by the user: 1. Call 'write_trd' to generate TRD; 2. Call 'write_framework' to implement TRD into the software framework. # ```json # [ # { # "command_name": "write_trd", # "task_id": "1", # "dependent_task_ids": [], # "instruction": "Execute `write_trd` to write the TRD based on user requirements", # "args": { # "user_requirements": "This is user requirement balabala...", # "use_case_actors": "These are actors involved in the use case, balabala...", # } # }, # { # "command_name": "write_framework", # "task_id": "2", # "dependent_task_ids": ["1"], # "instruction": "Execute `write_framework` to write the framework based on the TRD", # "args": { # "use_case_actors": "These are actors involved in the use case, balabala...", # "trd": "<trd> returned by `write_trd`", # "additional_technical_requirements": "These are additional technical requirements, balabala..." # } # } # ] # ``` # """ class TRDExpRetriever(ExpRetriever): """A TRD-related experience retriever that returns empty string.""" def retrieve(self, context: str = "") -> str: return self.EXAMPLE EXAMPLE: str = """ ## example 1 User Requirement: Given some user requirements, write a TRD, and implement the TRD within a software framework. Explanation: Given a complete requirement, 要写TRD需要follow如下步骤: 1. 调用`CompressExternalInterfaces.run`,从acknowledgement中抽取external interfaces的信息; 2. 按顺序执行如下步骤: 2.1. 执行`DetectInteraction.run`; 2.2. 执行`WriteTRD.run`; 2.3. 执行`EvaluateTRD.run`; 2.4. 检查`EvaluateTRD.run`的结果: 2.4.1. 如果`EvaluateTRD.run`的结果被判定为pass,则执行步骤3; 2.4.2. 如果`EvaluateTRD.run`的结果被判定为deny,则继续执行步骤2; 3. 按顺序执行如下步骤: 3.1. 执行`WriteFramework.run`; 3.2. 执行`EvaluateFramework.run`; 3.3. 检查`EvaluateFramework.run`的结果: 3.3.1. 如果`EvaluateFramework.run`的结果被判定为pass,则执行步骤4; 3.3.2. 如果`EvaluateFramework.run`的结果被判定为deny,则继续执行步骤3; 3.3.3. 如果已经重复执行步骤3超过9次,则执行步骤4; 4. 执行`save_framework`,将`WriteFramework.run`的结果保存下来; ```json [ { "command_name": "CompressExternalInterfaces.run", "args": { "task_id": "1", "dependent_task_ids": [], "instruction": "Execute `DetectInteraction.run` to extract external interfaces information from acknowledgement.", "acknowledge": "## Interfaces\n balabala..." } }, { "command_name": "DetectInteraction.run", "args": { "task_id": "2", "dependent_task_ids": ["1"], "instruction": "Execute `DetectInteraction.run` to extract external interfaces information from acknowledgement.", "user_requirements": "This is user requirement balabala...", "use_case_actors": "These are actors involved in the use case, balabala...", } }, { "command_name": "WriteTRD.run", "args": { "task_id": "3", "dependent_task_ids": ["2"], "instruction": "Execute `WriteTRD.run` to write TRD", "user_requirements": "This is user requirement balabala...", "use_case_actors": "These are actors involved in the use case, balabala...", "available_external_interfaces": "<compressed_external_interfaces_output> returned by `CompressExternalInterfaces.run`", "interaction_events": "<detected_interaction_events_output> returned by `DetectInteraction.run`" } }, { "command_name": "EvaluateTRD.run", "args": { "task_id": "4", "dependent_task_ids": ["3"], "instruction": "Execute `EvaluateTRD.run` to evaluate the TRD", "user_requirements": "This is user requirement balabala...", "use_case_actors": "These are actors involved in the use case, balabala...", "available_external_interfaces": "<compressed_external_interfaces_output> returned by `CompressExternalInterfaces.run`", "interaction_events": "<detected_interaction_events_output>", "trd": "<trd> returned by `EvaluateTRD.run`" } }, { "command_name": "DetectInteraction.run", "args": { "task_id": "5", "dependent_task_ids": ["4"], "instruction": "Execute `DetectInteraction.run` to extract external interfaces information from acknowledgement.", "user_requirements": "This is user requirement balabala...", "use_case_actors": "These are actors involved in the use case, balabala...", "evaluation_conclusion": "<evaluation_conclusion> returned by `EvaluateTRD.run`" } }, { "command_name": "WriteTRD.run", "args": { "task_id": "6", "dependent_task_ids": ["5"], "instruction": "Execute `WriteTRD.run` to write TRD", "user_requirements": "This is user requirement balabala...", "use_case_actors": "These are actors involved in the use case, balabala...", "available_external_interfaces": "<compressed_external_interfaces_output> returned by `CompressExternalInterfaces.run`", "interaction_events": "<detected_interaction_events_output> returned by `DetectInteraction.run`", "previous_version_trd": "<trd> returned by `WriteTRD.run`" } }, { "command_name": "EvaluateTRD.run", "args": { "task_id": "7", "dependent_task_ids": ["6"], "instruction": "Execute `EvaluateTRD.run` to evaluate the TRD", "user_requirements": "This is user requirement balabala...", "use_case_actors": "These are actors involved in the use case, balabala...", "available_external_interfaces": "<compressed_external_interfaces_output> returned by `CompressExternalInterfaces.run`", "interaction_events": "<detected_interaction_events_output> returned by `DetectInteraction.run`", "trd": "<trd> returned by `WriteTRD.run`", } }, { "command_name": "WriteFramework.run", "args": { "task_id": "8", "dependent_task_ids": ["7"], "instruction": "Execute `WriteFramework.run` to write a software framework according to the TRD", "use_case_actors": "These are actors involved in the use case, balabala...", "trd": "<trd> returned by `WriteTRD.run`", "acknowledge": "## Interfaces\n balabala...", "additional_technical_requirements": "These are additional technical requirements, balabala...", } }, { "command_name": "EvaluateFramework.run", "args": { "task_id": "9", "dependent_task_ids": ["8"], "instruction": "Execute `EvaluateFramework.run` to evaluate the software framework returned by `WriteFramework.run`", "use_case_actors": "These are actors involved in the use case, balabala...", "trd": "<trd> returned by `WriteTRD.run`", "acknowledge": "## Interfaces\n balabala...", "legacy_output": "<framework> returned by `WriteFramework.run`", "additional_technical_requirements": "These are additional technical requirements, balabala...", } }, { "command_name": "WriteFramework.run", "args": { "task_id": "10", "dependent_task_ids": ["9"], "instruction": "Execute `WriteFramework.run` to write a software framework according to the TRD", "use_case_actors": "These are actors involved in the use case, balabala...", "trd": "<trd> returned by `WriteTRD.run`", "acknowledge": "## Interfaces\n balabala...", "additional_technical_requirements": "These are additional technical requirements, balabala...", } }, { "command_name": "EvaluateFramework.run", "args": { "task_id": "11", "dependent_task_ids": ["10"], "instruction": "Execute `EvaluateFramework.run` to evaluate the software framework returned by `WriteFramework.run`", "use_case_actors": "These are actors involved in the use case, balabala...", "trd": "<trd> returned by `WriteTRD.run`", "acknowledge": "## Interfaces\n balabala...", "legacy_output": "<framework> returned by `WriteFramework.run`", "additional_technical_requirements": "These are additional technical requirements, balabala...", } }, { "command_name": "save_framework", "args": { "task_id": "12", "dependent_task_ids": ["11"], "instruction": "Execute `save_framework` to save the software framework returned by `WriteFramework.run`", "dir_data": "<framework> returned by `WriteFramework.run`", } } ] ``` """ TL_EXAMPLE = """ ## example 1 User Requirement: Create a cli snake game. Explanation: The requirement is about software development. Assign each tasks to a different team member based on their expertise. When publishing message to Product Manager, we copy original user requirement directly to ensure no information loss. ```json [ { "command_name": "Plan.append_task", "args": { "task_id": "1", "dependent_task_ids": [], "instruction": "Use Vite, React, MUI, Tailwind CSS for the program. And create a product requirement document (PRD). ", "assignee": "Alice" } }, { "command_name": "Plan.append_task", "args": { "task_id": "2", "dependent_task_ids": ["1"], "instruction": "Use Vite, React, MUI, Tailwind CSS for the program. Design the software architecture for the CLI snake game.", "assignee": "Bob" } }, { "command_name": "Plan.append_task", "args": { "task_id": "3", "dependent_task_ids": ["2"], "instruction": "Break down the architecture into manageable tasks, identify task dependencies, and prepare a detailed task list for implementation.", "assignee": "Eve" } }, { "command_name": "Plan.append_task", "args": { "task_id": "4", "dependent_task_ids": ["3"], "instruction": "Use Vite, React, MUI, Tailwind CSS for the program. Implement the core game logic for the CLI snake game, including snake movement, food generation, and score tracking.", "assignee": "Alex" } }, { "command_name": "TeamLeader.publish_message", "args": { "content": "Use Vite, React, MUI, Tailwind CSS for the program. Create a cli snake game.", "send_to": "Alice" } }, { "command_name": "RoleZero.reply_to_human", "args": { "content": "I have assigned the tasks to the team members. Alice will create the PRD, Bob will design the software architecture, Eve will break down the architecture into tasks, Alex will implement the core game logic, and Edward will write comprehensive tests. The team will work on the project accordingly" } }, { "command_name": "end" } ] ``` ## example 2 User Requirement: Run data analysis on sklearn Wine recognition dataset, include a plot, and train a model to predict wine class (20% as validation), and show validation accuracy. Explanation: DON'T decompose requirement if it is a DATA-RELATED task, assign a single task directly to Data Analyst David. He will manage the decomposition and implementation. ```json [ { "command_name": "Plan.append_task", "args": { "task_id": "1", "dependent_task_ids": [], "instruction": "Run data analysis on sklearn Wine recognition dataset, include a plot, and train a model to predict wine class (20% as validation), and show validation accuracy.", "assignee": "David" } }, { "command_name": "TeamLeader.publish_message", "args": { "content": "Run data analysis on sklearn Wine recognition dataset, include a plot, and train a model to predict wine class (20% as validation), and show validation accuracy.", "send_to": "David" } }, { "command_name": "RoleZero.reply_to_human", "args": { "content": "I have assigned the task to David. He will break down the task further by himself and starts solving it.", } }, { "command_name": "end" } ] ``` ## example 3 Conversation History: [ ..., {'role': 'assistant', 'content': 'from Alice(Product Manager) to {'<all>'}: Request is completed, with outputs: Command WritePRD executed: PRD filename: "/tmp/workspace/snake_game/docs/prd.json"'}, ] Explanation: You received a message from Alice, the Product Manager, that she has completed the PRD, use Plan.finish_current_task to mark her task as finished and moves the plan to the next task. Based on plan status, next task is for Bob (Architect), publish a message asking him to start. The message content should contain important path info. ```json [ { "command_name": "Plan.finish_current_task", "args": {} }, { "command_name": "TeamLeader.publish_message", "args": { "content": "Please design the software architecture for the snake game based on the PRD created by Alice. The PRD is at '/tmp/workspace/snake_game/docs/prd.json'.", "send_to": "Bob" } }, { "command_name": "RoleZero.reply_to_human", "args": { "content": "Alice has completed the PRD. I have marked her task as finished and sent the PRD to Bob. Bob will work on the software architecture." } }, { "command_name": "end" } ] ``` ## example 4 User Question: how does the project go? Explanation: The user is asking for a general update on the project status. Give a straight answer about the current task the team is working on and provide a summary of the completed tasks. ```json [ { "command_name": "RoleZero.reply_to_human", "args": { "content": "The team is currently working on ... We have completed ..." } }, { "command_name": "end" } ] ``` ## example 5 OBSERVATION : current task is none and all task is finished. Explanation: Last task is "Plan.finish_current_task" or 'RoleZero.reply_to_human' and now the current task is none, it means everything is done.Just coutput command "end". ```json [ { "command_name": "end" } ] ## example 6 OBSERVATION : The previously completed task is identical to the current task. Explanation: The current task has been accomplished previously. ```json [ { "command_name": "Plan.finish_current_task", "args": {} }, ] ``` ## example 7 OBSERVATION : the task assigned to Alice is still ongoing as it has not been marked as finished. The current task in the plan is for Alice to create the PRD. Explanation: "I attempted to locate historical records containing 'send to [<all>]', and discovered an entry stating 'PRD is finished and masked.' This indicates that Alice's task has been completed. ```json [ { "command_name": "Plan.finish_current_task", "args": {} }, ] ``` """ class SimpleExpRetriever(ExpRetriever): """A simple experience retriever that returns manually crafted examples.""" def retrieve(self, context: str = "") -> str: return TL_EXAMPLE class KeywordExpRetriever(ExpRetriever): """An experience retriever that returns examples based on keywords in the context.""" def retrieve(self, context: str, exp_type: Literal["plan", "task"] = "plan") -> str: if exp_type == "plan": if "deploy" in context.lower(): return DEPLOY_EXAMPLE elif "issue" in context.lower(): return FIX_ISSUE_EXAMPLE elif "https:" in context.lower() or "http:" in context.lower() or "search" in context.lower(): if "search" in context.lower() or "click" in context.lower(): return WEB_SCRAPING_EXAMPLE return WEB_SCRAPING_EXAMPLE_SIMPLE # elif exp_type == "task": # if "diagnose" in context.lower(): # return SEARCH_SYMBOL_EXAMPLE return "" DEPLOY_EXAMPLE = """ ## example 1 User Requirement: launch a service from workspace/web_snake_game/web_snake_game, and deploy it to public Explanation: Launching a service requires Terminal tool with daemon mode, write this into task instruction. ```json [ { "command_name": "Plan.append_task", "args": { "task_id": "1", "dependent_task_ids": [], "instruction": "Use the Terminal tool to launch the service in daemon mode", "assignee": "David" } }, { "command_name": "Plan.append_task", "args": { "task_id": "2", "dependent_task_ids": ["1"], "instruction": "Test the service with a simple request", "assignee": "David" } }, { "command_name": "Plan.append_task", "args": { "task_id": "3", "dependent_task_ids": ["2"], "instruction": "Deploy the service to public", "assignee": "David" } }, ] """ FIX_ISSUE_EXAMPLE = """ ## example 1 User Requirement: Write a fix for this issue: https://github.com/xxx/xxx/issues/xxx, and commit, push your changes, and create a PR to the target repo. Explanation: The requirement is to fix an issue in an existing repository. The process is broken down into several steps, each demanding specific actions and tools. ```json [ { "command_name": "Plan.append_task", "args": { "task_id": "1", "dependent_task_ids": [], "instruction": "Read the issue description to understand the problem using the Browser tool.", "assignee": "David" } }, { "command_name": "Plan.append_task", "args": { "task_id": "2", "dependent_task_ids": ["1"], "instruction": "Clone the repository using the Terminal tool.", "assignee": "David" } }, { "command_name": "Plan.append_task", "args": { "task_id": "3", "dependent_task_ids": ["2"], "instruction": "Use Editor to search relevant function(s) or open relevant files, then diagnose and identify the source of the problem.", "assignee": "David" } }, { "command_name": "Plan.append_task", "args": { "task_id": "4", "dependent_task_ids": ["3"], "instruction": "Use Editor tool to fix the problem in the corresponding file(s).", "assignee": "David" } }, { "command_name": "Plan.append_task", "args": { "task_id": "5", "dependent_task_ids": ["4"],
python
MIT
fc6e8433747be02826dec818627ed5cec0950e77
2026-01-04T14:38:37.890126Z
true
FoundationAgents/MetaGPT
https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/strategy/thinking_command.py
metagpt/strategy/thinking_command.py
from __future__ import annotations from enum import Enum from pydantic import BaseModel from metagpt.environment.mgx.mgx_env import MGXEnv from metagpt.memory import Memory from metagpt.roles import Role from metagpt.schema import Message class CommandDef(BaseModel): name: str signature: str = "" desc: str = "" class Command(Enum): # commands for planning APPEND_TASK = CommandDef( name="append_task", signature="append_task(task_id: str, dependent_task_ids: list[str], instruction: str, assignee: str)", desc="Append a new task with task_id (number) to the end of existing task sequences. If dependent_task_ids is not empty, the task will depend on the tasks with the ids in the list.", ) RESET_TASK = CommandDef( name="reset_task", signature="reset_task(task_id: str)", desc="Reset a task based on task_id, i.e. set Task.is_finished=False and request redo. This also resets all tasks depending on it.", ) REPLACE_TASK = CommandDef( name="replace_task", signature="replace_task(task_id: str, new_dependent_task_ids: list[str], new_instruction: str, new_assignee: str)", desc="Replace an existing task (can be current task) based on task_id, and reset all tasks depending on it.", ) FINISH_CURRENT_TASK = CommandDef( name="finish_current_task", signature="finish_current_task()", desc="Finishes current task, set Task.is_finished=True, set current task to next task", ) # commands for env interaction PUBLISH_MESSAGE = CommandDef( name="publish_message", signature="publish_message(content: str, send_to: str)", desc="Publish a message to a team member, use member name to fill send_to args. You may copy the full original content or add additional information from upstream. This will make team members start their work. DONT omit any necessary info such as path, link, environment, programming language, framework, requirement, constraint from original content to team members because you are their sole info source.", ) REPLY_TO_HUMAN = CommandDef( name="reply_to_human", signature="reply_to_human(content: str)", desc="Reply to human user with the content provided. Use this when you have a clear answer or solution to the user's question.", ) ASK_HUMAN = CommandDef( name="ask_human", signature="ask_human(question: str)", desc="Use this when you fail the current task or if you are unsure of the situation encountered. Your response should contain a brief summary of your situation, ended with a clear and concise question.", ) # common commands PASS = CommandDef( name="pass", signature="pass", desc="Pass and do nothing, if you don't think the plan needs to be updated nor a message to be published or forwarded. The reasons can be the latest message is unnecessary or obsolete, or you want to wait for more information before making a move.", ) @property def cmd_name(self): return self.value.name def prepare_command_prompt(commands: list[Command]) -> str: command_prompt = "" for i, command in enumerate(commands): command_prompt += f"{i+1}. {command.value.signature}:\n{command.value.desc}\n\n" return command_prompt async def run_env_command(role: Role, cmd: list[dict], role_memory: Memory = None): if not isinstance(role.rc.env, MGXEnv): return if cmd["command_name"] == Command.PUBLISH_MESSAGE.cmd_name: role.publish_message(Message(**cmd["args"])) if cmd["command_name"] == Command.ASK_HUMAN.cmd_name: # TODO: Operation on role memory should not appear here, consider moving it into role role.rc.working_memory.add(Message(content=cmd["args"]["question"], role="assistant")) human_rsp = await role.rc.env.ask_human(sent_from=role, **cmd["args"]) role.rc.working_memory.add(Message(content=human_rsp, role="user")) elif cmd["command_name"] == Command.REPLY_TO_HUMAN.cmd_name: # TODO: consider if the message should go into memory await role.rc.env.reply_to_human(sent_from=role, **cmd["args"]) def run_plan_command(role: Role, cmd: list[dict]): if cmd["command_name"] == Command.APPEND_TASK.cmd_name: role.planner.plan.append_task(**cmd["args"]) elif cmd["command_name"] == Command.RESET_TASK.cmd_name: role.planner.plan.reset_task(**cmd["args"]) elif cmd["command_name"] == Command.REPLACE_TASK.cmd_name: role.planner.plan.replace_task(**cmd["args"]) elif cmd["command_name"] == Command.FINISH_CURRENT_TASK.cmd_name: if role.planner.plan.is_plan_finished(): return if role.task_result: role.planner.plan.current_task.update_task_result(task_result=role.task_result) role.planner.plan.finish_current_task() role.rc.working_memory.clear() async def run_commands(role: Role, cmds: list[dict], role_memory: Memory = None): print(*cmds, sep="\n") for cmd in cmds: await run_env_command(role, cmd, role_memory) run_plan_command(role, cmd) if role.planner.plan.is_plan_finished(): role._set_state(-1)
python
MIT
fc6e8433747be02826dec818627ed5cec0950e77
2026-01-04T14:38:37.890126Z
false
FoundationAgents/MetaGPT
https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/strategy/solver.py
metagpt/strategy/solver.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """ @Time : 2024/1/30 17:13 @Author : alexanderwu @File : solver.py """ from abc import abstractmethod from metagpt.actions.action_graph import ActionGraph from metagpt.provider.base_llm import BaseLLM from metagpt.strategy.search_space import SearchSpace class BaseSolver: """AbstractSolver: defines the interface of a solver.""" def __init__(self, graph: ActionGraph, search_space: SearchSpace, llm: BaseLLM, context): """ :param graph: ActionGraph :param search_space: SearchSpace :param llm: BaseLLM :param context: Context """ self.graph = graph self.search_space = search_space self.llm = llm self.context = context @abstractmethod async def solve(self): """abstract method to solve the problem.""" class NaiveSolver(BaseSolver): """NaiveSolver: Iterate all the nodes in the graph and execute them one by one.""" async def solve(self): self.graph.topological_sort() for key in self.graph.execution_order: op = self.graph.nodes[key] await op.fill(req=self.context, llm=self.llm, mode="root") class TOTSolver(BaseSolver): """TOTSolver: Tree of Thought""" async def solve(self): raise NotImplementedError class DataInterpreterSolver(BaseSolver): """DataInterpreterSolver: Write&Run code in the graph""" async def solve(self): raise NotImplementedError class ReActSolver(BaseSolver): """ReActSolver: ReAct algorithm""" async def solve(self): raise NotImplementedError class IOSolver(BaseSolver): """IOSolver: use LLM directly to solve the problem""" async def solve(self): raise NotImplementedError class COTSolver(BaseSolver): """COTSolver: Chain of Thought""" async def solve(self): raise NotImplementedError
python
MIT
fc6e8433747be02826dec818627ed5cec0950e77
2026-01-04T14:38:37.890126Z
false
FoundationAgents/MetaGPT
https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/strategy/__init__.py
metagpt/strategy/__init__.py
# -*- coding: utf-8 -*- # @Date : 12/23/2023 4:51 PM # @Author : stellahong (stellahong@fuzhi.ai) # @Desc :
python
MIT
fc6e8433747be02826dec818627ed5cec0950e77
2026-01-04T14:38:37.890126Z
false
FoundationAgents/MetaGPT
https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/strategy/planner.py
metagpt/strategy/planner.py
from __future__ import annotations import json from typing import List from pydantic import BaseModel, Field from metagpt.actions.di.ask_review import AskReview, ReviewConst from metagpt.actions.di.write_plan import ( WritePlan, precheck_update_plan_from_rsp, update_plan_from_rsp, ) from metagpt.logs import logger from metagpt.memory import Memory from metagpt.schema import Message, Plan, Task, TaskResult from metagpt.strategy.task_type import TaskType from metagpt.utils.common import remove_comments STRUCTURAL_CONTEXT = """ ## User Requirement {user_requirement} ## Context {context} ## Current Plan {tasks} ## Current Task {current_task} """ PLAN_STATUS = """ ## Finished Tasks ### code ```python {code_written} ``` ### execution result {task_results} ## Current Task {current_task} ## Finished Section of Current Task ### code ```python {current_task_code} ``` ### execution result {current_task_result} ## Task Guidance Write code for the incomplete sections of 'Current Task'. And avoid duplicating code from 'Finished Tasks' and 'Finished Section of Current Task', such as repeated import of packages, reading data, etc. Specifically, {guidance} """ class Planner(BaseModel): plan: Plan working_memory: Memory = Field( default_factory=Memory ) # memory for working on each task, discarded each time a task is done auto_run: bool = False def __init__(self, goal: str = "", plan: Plan = None, **kwargs): plan = plan or Plan(goal=goal) super().__init__(plan=plan, **kwargs) @property def current_task(self): return self.plan.current_task @property def current_task_id(self): return self.plan.current_task_id async def update_plan(self, goal: str = "", max_tasks: int = 3, max_retries: int = 3): if goal: self.plan = Plan(goal=goal) plan_confirmed = False while not plan_confirmed: context = self.get_useful_memories() rsp = await WritePlan().run(context, max_tasks=max_tasks) self.working_memory.add(Message(content=rsp, role="assistant", cause_by=WritePlan)) # precheck plan before asking reviews is_plan_valid, error = precheck_update_plan_from_rsp(rsp, self.plan) if not is_plan_valid and max_retries > 0: error_msg = f"The generated plan is not valid with error: {error}, try regenerating, remember to generate either the whole plan or the single changed task only" logger.warning(error_msg) self.working_memory.add(Message(content=error_msg, role="assistant", cause_by=WritePlan)) max_retries -= 1 continue _, plan_confirmed = await self.ask_review(trigger=ReviewConst.TASK_REVIEW_TRIGGER) update_plan_from_rsp(rsp=rsp, current_plan=self.plan) self.working_memory.clear() async def process_task_result(self, task_result: TaskResult): # ask for acceptance, users can other refuse and change tasks in the plan review, task_result_confirmed = await self.ask_review(task_result) if task_result_confirmed: # tick off this task and record progress await self.confirm_task(self.current_task, task_result, review) elif "redo" in review: # Ask the Role to redo this task with help of review feedback, # useful when the code run is successful but the procedure or result is not what we want pass # simply pass, not confirming the result else: # update plan according to user's feedback and to take on changed tasks await self.update_plan() async def ask_review( self, task_result: TaskResult = None, auto_run: bool = None, trigger: str = ReviewConst.TASK_REVIEW_TRIGGER, review_context_len: int = 5, ): """ Ask to review the task result, reviewer needs to provide confirmation or request change. If human confirms the task result, then we deem the task completed, regardless of whether the code run succeeds; if auto mode, then the code run has to succeed for the task to be considered completed. """ auto_run = auto_run if auto_run is not None else self.auto_run if not auto_run: context = self.get_useful_memories() review, confirmed = await AskReview().run( context=context[-review_context_len:], plan=self.plan, trigger=trigger ) if not confirmed: self.working_memory.add(Message(content=review, role="user", cause_by=AskReview)) return review, confirmed confirmed = task_result.is_success if task_result else True return "", confirmed async def confirm_task(self, task: Task, task_result: TaskResult, review: str): task.update_task_result(task_result=task_result) self.plan.finish_current_task() self.working_memory.clear() confirmed_and_more = ( ReviewConst.CONTINUE_WORDS[0] in review.lower() and review.lower() not in ReviewConst.CONTINUE_WORDS[0] ) # "confirm, ... (more content, such as changing downstream tasks)" if confirmed_and_more: self.working_memory.add(Message(content=review, role="user", cause_by=AskReview)) await self.update_plan() def get_useful_memories(self, task_exclude_field=None) -> list[Message]: """find useful memories only to reduce context length and improve performance""" user_requirement = self.plan.goal context = self.plan.context tasks = [task.dict(exclude=task_exclude_field) for task in self.plan.tasks] tasks = json.dumps(tasks, indent=4, ensure_ascii=False) current_task = self.plan.current_task.json() if self.plan.current_task else {} context = STRUCTURAL_CONTEXT.format( user_requirement=user_requirement, context=context, tasks=tasks, current_task=current_task ) context_msg = [Message(content=context, role="user")] return context_msg + self.working_memory.get() def get_plan_status(self, exclude: List[str] = None) -> str: # prepare components of a plan status exclude = exclude or [] exclude_prompt = "omit here" finished_tasks = self.plan.get_finished_tasks() code_written = [remove_comments(task.code) for task in finished_tasks] code_written = "\n\n".join(code_written) task_results = [task.result for task in finished_tasks] task_results = "\n\n".join(task_results) task_type_name = self.current_task.task_type task_type = TaskType.get_type(task_type_name) guidance = task_type.guidance if task_type else "" # combine components in a prompt prompt = PLAN_STATUS.format( code_written=code_written if "code" not in exclude else exclude_prompt, task_results=task_results if "task_result" not in exclude else exclude_prompt, current_task=self.current_task.instruction, current_task_code=self.current_task.code if "code" not in exclude else exclude_prompt, current_task_result=self.current_task.result if "task_result" not in exclude else exclude_prompt, guidance=guidance, ) return prompt
python
MIT
fc6e8433747be02826dec818627ed5cec0950e77
2026-01-04T14:38:37.890126Z
false
FoundationAgents/MetaGPT
https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/strategy/base.py
metagpt/strategy/base.py
# -*- coding: utf-8 -*- # @Date : 12/25/2023 9:16 PM # @Author : stellahong (stellahong@fuzhi.ai) # @Desc : from abc import ABC from typing import List from anytree import Node, RenderTree from pydantic import BaseModel class BaseParser(BaseModel, ABC): def __call__(self, *args, **kwargs): raise NotImplementedError def propose(self, current_state: str, **kwargs) -> str: raise NotImplementedError def sample(self, current_state: str, **kwargs) -> str: raise NotImplementedError def value(self, input: str, **kwargs) -> str: raise NotImplementedError class BaseEvaluator(BaseModel, ABC): def __call__(self, *args, **kwargs): raise NotImplementedError def status_verify(self, *args, **kwargs): raise NotImplementedError class ThoughtNode(Node): """A node representing a thought in the thought tree.""" name: str = "" value: int = 0 id: int = 0 valid_status: bool = True def update_value(self, value) -> None: """Update the value of the thought node.""" self.value = value def update_valid_status(self, status) -> None: """Update the validity status of the thought node.""" self.valid_status = status class ThoughtTree(RenderTree): """A tree structure to represent thoughts.""" @property def all_nodes(self) -> List[ThoughtNode]: """ Get a list of all nodes in the thought tree. Returns: List[ThoughtNode]: A list containing all nodes in the thought tree. """ all_nodes = [node for _, _, node in self] return all_nodes def update_node(self, thought: List[dict] = [], current_node: ThoughtNode = None) -> List[ThoughtNode]: """ Update the tree with new thoughts. Args: thought (List[dict]): A list of dictionaries representing thought information. current_node (ThoughtNode): The current node under which new thoughts will be added. Returns: List[ThoughtNode]: A list of ThoughtNode instances representing the updated tree nodes. """ nodes = [] for node_info in thought: node = ThoughtNode( name=node_info["node_state_instruction"], parent=current_node, id=int(node_info["node_id"]) ) nodes.append(node) return nodes def parse_node_path(self, node) -> List[str]: """ Parse and retrieve the hierarchical path of the given thought node. This method traverses the parent nodes of the provided 'node' and constructs the full path from the root node to the given node. Args: node: The thought node for which the hierarchical path needs to be parsed. Returns: List[str]: A list representing the full hierarchical path of the given thought node. The list is ordered from the root node to the provided node. """ full_node_path = [] while node is not None: full_node_path.append(node.name) node = node.parent full_node_path.reverse() return full_node_path def show(self) -> None: """Print the updated tree.""" print("\nUpdated Tree:") for pre, _, node in self: print(f"{pre}{node.name}, value: {node.value}, valid_status: {node.valid_status}")
python
MIT
fc6e8433747be02826dec818627ed5cec0950e77
2026-01-04T14:38:37.890126Z
false
FoundationAgents/MetaGPT
https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/learn/text_to_speech.py
metagpt/learn/text_to_speech.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """ @Time : 2023/8/17 @Author : mashenquan @File : text_to_speech.py @Desc : Text-to-Speech skill, which provides text-to-speech functionality """ from typing import Optional from metagpt.config2 import Config from metagpt.const import BASE64_FORMAT from metagpt.tools.azure_tts import oas3_azsure_tts from metagpt.tools.iflytek_tts import oas3_iflytek_tts from metagpt.utils.s3 import S3 async def text_to_speech( text, lang="zh-CN", voice="zh-CN-XiaomoNeural", style="affectionate", role="Girl", config: Optional[Config] = None, ): """Text to speech For more details, check out:`https://learn.microsoft.com/en-us/azure/ai-services/speech-service/language-support?tabs=tts` :param lang: The value can contain a language code such as en (English), or a locale such as en-US (English - United States). For more details, checkout: `https://learn.microsoft.com/en-us/azure/ai-services/speech-service/language-support?tabs=tts` :param voice: For more details, checkout: `https://learn.microsoft.com/en-us/azure/ai-services/speech-service/language-support?tabs=tts`, `https://speech.microsoft.com/portal/voicegallery` :param style: Speaking style to express different emotions like cheerfulness, empathy, and calm. For more details, checkout: `https://learn.microsoft.com/en-us/azure/ai-services/speech-service/language-support?tabs=tts` :param role: With roles, the same voice can act as a different age and gender. For more details, checkout: `https://learn.microsoft.com/en-us/azure/ai-services/speech-service/language-support?tabs=tts` :param text: The text used for voice conversion. :param subscription_key: key is used to access your Azure AI service API, see: `https://portal.azure.com/` > `Resource Management` > `Keys and Endpoint` :param region: This is the location (or region) of your resource. You may need to use this field when making calls to this API. :param iflytek_app_id: Application ID is used to access your iFlyTek service API, see: `https://console.xfyun.cn/services/tts` :param iflytek_api_key: WebAPI argument, see: `https://console.xfyun.cn/services/tts` :param iflytek_api_secret: WebAPI argument, see: `https://console.xfyun.cn/services/tts` :return: Returns the Base64-encoded .wav/.mp3 file data if successful, otherwise an empty string. """ config = config if config else Config.default() subscription_key = config.azure_tts_subscription_key region = config.azure_tts_region if subscription_key and region: audio_declaration = "data:audio/wav;base64," base64_data = await oas3_azsure_tts(text, lang, voice, style, role, subscription_key, region) s3 = S3(config.s3) url = await s3.cache(data=base64_data, file_ext=".wav", format=BASE64_FORMAT) if url: return f"[{text}]({url})" return audio_declaration + base64_data if base64_data else base64_data iflytek_app_id = config.iflytek_app_id iflytek_api_key = config.iflytek_api_key iflytek_api_secret = config.iflytek_api_secret if iflytek_app_id and iflytek_api_key and iflytek_api_secret: audio_declaration = "data:audio/mp3;base64," base64_data = await oas3_iflytek_tts( text=text, app_id=iflytek_app_id, api_key=iflytek_api_key, api_secret=iflytek_api_secret ) s3 = S3(config.s3) url = await s3.cache(data=base64_data, file_ext=".mp3", format=BASE64_FORMAT) if url: return f"[{text}]({url})" return audio_declaration + base64_data if base64_data else base64_data raise ValueError( "azure_tts_subscription_key, azure_tts_region, iflytek_app_id, iflytek_api_key, iflytek_api_secret error" )
python
MIT
fc6e8433747be02826dec818627ed5cec0950e77
2026-01-04T14:38:37.890126Z
false
FoundationAgents/MetaGPT
https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/learn/skill_loader.py
metagpt/learn/skill_loader.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """ @Time : 2023/8/18 @Author : mashenquan @File : skill_loader.py @Desc : Skill YAML Configuration Loader. """ from pathlib import Path from typing import Dict, List, Optional import yaml from pydantic import BaseModel, Field from metagpt.context import Context from metagpt.utils.common import aread class Example(BaseModel): ask: str answer: str class Returns(BaseModel): type: str format: Optional[str] = None class Parameter(BaseModel): type: str description: str = None class Skill(BaseModel): name: str description: str = None id: str = None x_prerequisite: Dict = Field(default=None, alias="x-prerequisite") parameters: Dict[str, Parameter] = None examples: List[Example] returns: Returns @property def arguments(self) -> Dict: if not self.parameters: return {} ret = {} for k, v in self.parameters.items(): ret[k] = v.description if v.description else "" return ret class Entity(BaseModel): name: str = None skills: List[Skill] class Components(BaseModel): pass class SkillsDeclaration(BaseModel): skillapi: str entities: Dict[str, Entity] components: Components = None @staticmethod async def load(skill_yaml_file_name: Path = None) -> "SkillsDeclaration": if not skill_yaml_file_name: skill_yaml_file_name = Path(__file__).parent.parent.parent / "docs/.well-known/skills.yaml" data = await aread(filename=skill_yaml_file_name) skill_data = yaml.safe_load(data) return SkillsDeclaration(**skill_data) def get_skill_list(self, entity_name: str = "Assistant", context: Context = None) -> Dict: """Return the skill name based on the skill description.""" entity = self.entities.get(entity_name) if not entity: return {} # List of skills that the agent chooses to activate. ctx = context or Context() agent_skills = ctx.kwargs.agent_skills if not agent_skills: return {} class _AgentSkill(BaseModel): name: str names = [_AgentSkill(**i).name for i in agent_skills] return {s.description: s.name for s in entity.skills if s.name in names} def get_skill(self, name, entity_name: str = "Assistant") -> Skill: """Return a skill by name.""" entity = self.entities.get(entity_name) if not entity: return None for sk in entity.skills: if sk.name == name: return sk
python
MIT
fc6e8433747be02826dec818627ed5cec0950e77
2026-01-04T14:38:37.890126Z
false
FoundationAgents/MetaGPT
https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/learn/text_to_embedding.py
metagpt/learn/text_to_embedding.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """ @Time : 2023/8/18 @Author : mashenquan @File : text_to_embedding.py @Desc : Text-to-Embedding skill, which provides text-to-embedding functionality. """ from typing import Optional from metagpt.config2 import Config from metagpt.tools.openai_text_to_embedding import oas3_openai_text_to_embedding async def text_to_embedding(text, model="text-embedding-ada-002", config: Optional[Config] = None): """Text to embedding :param text: The text used for embedding. :param model: One of ['text-embedding-ada-002'], ID of the model to use. For more details, checkout: `https://api.openai.com/v1/models`. :param config: OpenAI config with API key, For more details, checkout: `https://platform.openai.com/account/api-keys` :return: A json object of :class:`ResultEmbedding` class if successful, otherwise `{}`. """ config = config if config else Config.default() openai_api_key = config.get_openai_llm().api_key proxy = config.get_openai_llm().proxy return await oas3_openai_text_to_embedding(text, model=model, openai_api_key=openai_api_key, proxy=proxy)
python
MIT
fc6e8433747be02826dec818627ed5cec0950e77
2026-01-04T14:38:37.890126Z
false
FoundationAgents/MetaGPT
https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/learn/text_to_image.py
metagpt/learn/text_to_image.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """ @Time : 2023/8/18 @Author : mashenquan @File : text_to_image.py @Desc : Text-to-Image skill, which provides text-to-image functionality. """ import base64 from typing import Optional from metagpt.config2 import Config from metagpt.const import BASE64_FORMAT from metagpt.llm import LLM from metagpt.tools.metagpt_text_to_image import oas3_metagpt_text_to_image from metagpt.tools.openai_text_to_image import oas3_openai_text_to_image from metagpt.utils.s3 import S3 async def text_to_image(text, size_type: str = "512x512", config: Optional[Config] = None): """Text to image :param text: The text used for image conversion. :param size_type: If using OPENAI, the available size options are ['256x256', '512x512', '1024x1024'], while for MetaGPT, the options are ['512x512', '512x768']. :param config: Config :return: The image data is returned in Base64 encoding. """ config = config if config else Config.default() image_declaration = "data:image/png;base64," model_url = config.metagpt_tti_url if model_url: binary_data = await oas3_metagpt_text_to_image(text, size_type, model_url) elif config.get_openai_llm(): llm = LLM(llm_config=config.get_openai_llm()) binary_data = await oas3_openai_text_to_image(text, size_type, llm=llm) else: raise ValueError("Missing necessary parameters.") base64_data = base64.b64encode(binary_data).decode("utf-8") s3 = S3(config.s3) url = await s3.cache(data=base64_data, file_ext=".png", format=BASE64_FORMAT) if url: return f"![{text}]({url})" return image_declaration + base64_data if base64_data else ""
python
MIT
fc6e8433747be02826dec818627ed5cec0950e77
2026-01-04T14:38:37.890126Z
false
FoundationAgents/MetaGPT
https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/learn/google_search.py
metagpt/learn/google_search.py
from metagpt.tools.search_engine import SearchEngine async def google_search(query: str, max_results: int = 6, **kwargs): """Perform a web search and retrieve search results. :param query: The search query. :param max_results: The number of search results to retrieve :return: The web search results in markdown format. """ results = await SearchEngine(**kwargs).run(query, max_results=max_results, as_string=False) return "\n".join(f"{i}. [{j['title']}]({j['link']}): {j['snippet']}" for i, j in enumerate(results, 1))
python
MIT
fc6e8433747be02826dec818627ed5cec0950e77
2026-01-04T14:38:37.890126Z
false
FoundationAgents/MetaGPT
https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/learn/__init__.py
metagpt/learn/__init__.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """ @Time : 2023/4/30 20:57 @Author : alexanderwu @File : __init__.py """ from metagpt.learn.text_to_image import text_to_image from metagpt.learn.text_to_speech import text_to_speech from metagpt.learn.google_search import google_search __all__ = ["text_to_image", "text_to_speech", "google_search"]
python
MIT
fc6e8433747be02826dec818627ed5cec0950e77
2026-01-04T14:38:37.890126Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/setup.py
setup.py
from setuptools import setup, find_packages import os from pathlib import Path import shutil # Note: Most configuration is now in pyproject.toml # This setup.py is kept for backwards compatibility # Create the .crawl4ai folder in the user's home directory if it doesn't exist # If the folder already exists, remove the cache folder base_dir = os.getenv("CRAWL4_AI_BASE_DIRECTORY") crawl4ai_folder = Path(base_dir) if base_dir else Path.home() crawl4ai_folder = crawl4ai_folder / ".crawl4ai" cache_folder = crawl4ai_folder / "cache" content_folders = [ "html_content", "cleaned_html", "markdown_content", "extracted_content", "screenshots", ] # Clean up old cache if exists if cache_folder.exists(): shutil.rmtree(cache_folder) # Create new folder structure crawl4ai_folder.mkdir(exist_ok=True) cache_folder.mkdir(exist_ok=True) for folder in content_folders: (crawl4ai_folder / folder).mkdir(exist_ok=True) version = "0.0.0" # This will be overridden by pyproject.toml's dynamic version try: with open("crawl4ai/__version__.py") as f: for line in f: if line.startswith("__version__"): version = line.split("=")[1].strip().strip('"') break except Exception: pass # Let pyproject.toml handle version setup( name="Crawl4AI", version=version, description="🚀🤖 Crawl4AI: Open-source LLM Friendly Web Crawler & scraper", long_description=open("README.md", encoding="utf-8").read(), long_description_content_type="text/markdown", url="https://github.com/unclecode/crawl4ai", author="Unclecode", author_email="unclecode@kidocode.com", license="Apache-2.0", packages=find_packages(), package_data={"crawl4ai": ["js_snippet/*.js"]}, classifiers=[ "Development Status :: 3 - Alpha", "Intended Audience :: Developers", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", ], python_requires=">=3.10", )
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/test_webhook_implementation.py
test_webhook_implementation.py
""" Simple test script to validate webhook implementation without running full server. This script tests: 1. Webhook module imports and syntax 2. WebhookDeliveryService initialization 3. Payload construction logic 4. Configuration parsing """ import sys import os import json from datetime import datetime, timezone # Add deploy/docker to path to import modules # sys.path.insert(0, '/home/user/crawl4ai/deploy/docker') sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'deploy', 'docker')) def test_imports(): """Test that all webhook-related modules can be imported""" print("=" * 60) print("TEST 1: Module Imports") print("=" * 60) try: from webhook import WebhookDeliveryService print("✅ webhook.WebhookDeliveryService imported successfully") except Exception as e: print(f"❌ Failed to import webhook module: {e}") return False try: from schemas import WebhookConfig, WebhookPayload print("✅ schemas.WebhookConfig imported successfully") print("✅ schemas.WebhookPayload imported successfully") except Exception as e: print(f"❌ Failed to import schemas: {e}") return False return True def test_webhook_service_init(): """Test WebhookDeliveryService initialization""" print("\n" + "=" * 60) print("TEST 2: WebhookDeliveryService Initialization") print("=" * 60) try: from webhook import WebhookDeliveryService # Test with default config config = { "webhooks": { "enabled": True, "default_url": None, "data_in_payload": False, "retry": { "max_attempts": 5, "initial_delay_ms": 1000, "max_delay_ms": 32000, "timeout_ms": 30000 }, "headers": { "User-Agent": "Crawl4AI-Webhook/1.0" } } } service = WebhookDeliveryService(config) print(f"✅ Service initialized successfully") print(f" - Max attempts: {service.max_attempts}") print(f" - Initial delay: {service.initial_delay}s") print(f" - Max delay: {service.max_delay}s") print(f" - Timeout: {service.timeout}s") # Verify calculations assert service.max_attempts == 5, "Max attempts should be 5" assert service.initial_delay == 1.0, "Initial delay should be 1.0s" assert service.max_delay == 32.0, "Max delay should be 32.0s" assert service.timeout == 30.0, "Timeout should be 30.0s" print("✅ All configuration values correct") return True except Exception as e: print(f"❌ Service initialization failed: {e}") import traceback traceback.print_exc() return False def test_webhook_config_model(): """Test WebhookConfig Pydantic model""" print("\n" + "=" * 60) print("TEST 3: WebhookConfig Model Validation") print("=" * 60) try: from schemas import WebhookConfig from pydantic import ValidationError # Test valid config valid_config = { "webhook_url": "https://example.com/webhook", "webhook_data_in_payload": True, "webhook_headers": {"X-Secret": "token123"} } config = WebhookConfig(**valid_config) print(f"✅ Valid config accepted:") print(f" - URL: {config.webhook_url}") print(f" - Data in payload: {config.webhook_data_in_payload}") print(f" - Headers: {config.webhook_headers}") # Test minimal config minimal_config = { "webhook_url": "https://example.com/webhook" } config2 = WebhookConfig(**minimal_config) print(f"✅ Minimal config accepted (defaults applied):") print(f" - URL: {config2.webhook_url}") print(f" - Data in payload: {config2.webhook_data_in_payload}") print(f" - Headers: {config2.webhook_headers}") # Test invalid URL try: invalid_config = { "webhook_url": "not-a-url" } config3 = WebhookConfig(**invalid_config) print(f"❌ Invalid URL should have been rejected") return False except ValidationError as e: print(f"✅ Invalid URL correctly rejected") return True except Exception as e: print(f"❌ Model validation test failed: {e}") import traceback traceback.print_exc() return False def test_payload_construction(): """Test webhook payload construction logic""" print("\n" + "=" * 60) print("TEST 4: Payload Construction") print("=" * 60) try: # Simulate payload construction from notify_job_completion task_id = "crawl_abc123" task_type = "crawl" status = "completed" urls = ["https://example.com"] payload = { "task_id": task_id, "task_type": task_type, "status": status, "timestamp": datetime.now(timezone.utc).isoformat(), "urls": urls } print(f"✅ Basic payload constructed:") print(json.dumps(payload, indent=2)) # Test with error error_payload = { "task_id": "crawl_xyz789", "task_type": "crawl", "status": "failed", "timestamp": datetime.now(timezone.utc).isoformat(), "urls": ["https://example.com"], "error": "Connection timeout" } print(f"\n✅ Error payload constructed:") print(json.dumps(error_payload, indent=2)) # Test with data data_payload = { "task_id": "crawl_def456", "task_type": "crawl", "status": "completed", "timestamp": datetime.now(timezone.utc).isoformat(), "urls": ["https://example.com"], "data": { "results": [ {"url": "https://example.com", "markdown": "# Example"} ] } } print(f"\n✅ Data payload constructed:") print(json.dumps(data_payload, indent=2)) return True except Exception as e: print(f"❌ Payload construction failed: {e}") import traceback traceback.print_exc() return False def test_exponential_backoff(): """Test exponential backoff calculation""" print("\n" + "=" * 60) print("TEST 5: Exponential Backoff Calculation") print("=" * 60) try: initial_delay = 1.0 # 1 second max_delay = 32.0 # 32 seconds print("Backoff delays for 5 attempts:") for attempt in range(5): delay = min(initial_delay * (2 ** attempt), max_delay) print(f" Attempt {attempt + 1}: {delay}s") # Verify the sequence: 1s, 2s, 4s, 8s, 16s expected = [1.0, 2.0, 4.0, 8.0, 16.0] actual = [min(initial_delay * (2 ** i), max_delay) for i in range(5)] assert actual == expected, f"Expected {expected}, got {actual}" print("✅ Exponential backoff sequence correct") return True except Exception as e: print(f"❌ Backoff calculation failed: {e}") return False def test_api_integration(): """Test that api.py imports webhook module correctly""" print("\n" + "=" * 60) print("TEST 6: API Integration") print("=" * 60) try: # Check if api.py can import webhook module api_path = os.path.join(os.path.dirname(__file__), 'deploy', 'docker', 'api.py') with open(api_path, 'r') as f: api_content = f.read() if 'from webhook import WebhookDeliveryService' in api_content: print("✅ api.py imports WebhookDeliveryService") else: print("❌ api.py missing webhook import") return False if 'WebhookDeliveryService(config)' in api_content: print("✅ api.py initializes WebhookDeliveryService") else: print("❌ api.py doesn't initialize WebhookDeliveryService") return False if 'notify_job_completion' in api_content: print("✅ api.py calls notify_job_completion") else: print("❌ api.py doesn't call notify_job_completion") return False return True except Exception as e: print(f"❌ API integration check failed: {e}") return False def main(): """Run all tests""" print("\n🧪 Webhook Implementation Validation Tests") print("=" * 60) results = [] # Run tests results.append(("Module Imports", test_imports())) results.append(("Service Initialization", test_webhook_service_init())) results.append(("Config Model", test_webhook_config_model())) results.append(("Payload Construction", test_payload_construction())) results.append(("Exponential Backoff", test_exponential_backoff())) results.append(("API Integration", test_api_integration())) # Print summary print("\n" + "=" * 60) print("TEST SUMMARY") print("=" * 60) passed = sum(1 for _, result in results if result) total = len(results) for test_name, result in results: status = "✅ PASS" if result else "❌ FAIL" print(f"{status} - {test_name}") print(f"\n{'=' * 60}") print(f"Results: {passed}/{total} tests passed") print(f"{'=' * 60}") if passed == total: print("\n🎉 All tests passed! Webhook implementation is valid.") return 0 else: print(f"\n⚠️ {total - passed} test(s) failed. Please review the output above.") return 1 if __name__ == "__main__": exit(main())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/test_llm_webhook_feature.py
test_llm_webhook_feature.py
#!/usr/bin/env python3 """ Test script to validate webhook implementation for /llm/job endpoint. This tests that the /llm/job endpoint now supports webhooks following the same pattern as /crawl/job. """ import sys import os # Add deploy/docker to path sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'deploy', 'docker')) def test_llm_job_payload_model(): """Test that LlmJobPayload includes webhook_config field""" print("=" * 60) print("TEST 1: LlmJobPayload Model") print("=" * 60) try: from job import LlmJobPayload from schemas import WebhookConfig from pydantic import ValidationError # Test with webhook_config payload_dict = { "url": "https://example.com", "q": "Extract main content", "schema": None, "cache": False, "provider": None, "webhook_config": { "webhook_url": "https://myapp.com/webhook", "webhook_data_in_payload": True, "webhook_headers": {"X-Secret": "token"} } } payload = LlmJobPayload(**payload_dict) print(f"✅ LlmJobPayload accepts webhook_config") print(f" - URL: {payload.url}") print(f" - Query: {payload.q}") print(f" - Webhook URL: {payload.webhook_config.webhook_url}") print(f" - Data in payload: {payload.webhook_config.webhook_data_in_payload}") # Test without webhook_config (should be optional) minimal_payload = { "url": "https://example.com", "q": "Extract content" } payload2 = LlmJobPayload(**minimal_payload) assert payload2.webhook_config is None, "webhook_config should be optional" print(f"✅ LlmJobPayload works without webhook_config (optional)") return True except Exception as e: print(f"❌ Failed: {e}") import traceback traceback.print_exc() return False def test_handle_llm_request_signature(): """Test that handle_llm_request accepts webhook_config parameter""" print("\n" + "=" * 60) print("TEST 2: handle_llm_request Function Signature") print("=" * 60) try: from api import handle_llm_request import inspect sig = inspect.signature(handle_llm_request) params = list(sig.parameters.keys()) print(f"Function parameters: {params}") if 'webhook_config' in params: print(f"✅ handle_llm_request has webhook_config parameter") # Check that it's optional with default None webhook_param = sig.parameters['webhook_config'] if webhook_param.default is None or webhook_param.default == inspect.Parameter.empty: print(f"✅ webhook_config is optional (default: {webhook_param.default})") else: print(f"⚠️ webhook_config default is: {webhook_param.default}") return True else: print(f"❌ handle_llm_request missing webhook_config parameter") return False except Exception as e: print(f"❌ Failed: {e}") import traceback traceback.print_exc() return False def test_process_llm_extraction_signature(): """Test that process_llm_extraction accepts webhook_config parameter""" print("\n" + "=" * 60) print("TEST 3: process_llm_extraction Function Signature") print("=" * 60) try: from api import process_llm_extraction import inspect sig = inspect.signature(process_llm_extraction) params = list(sig.parameters.keys()) print(f"Function parameters: {params}") if 'webhook_config' in params: print(f"✅ process_llm_extraction has webhook_config parameter") webhook_param = sig.parameters['webhook_config'] if webhook_param.default is None or webhook_param.default == inspect.Parameter.empty: print(f"✅ webhook_config is optional (default: {webhook_param.default})") else: print(f"⚠️ webhook_config default is: {webhook_param.default}") return True else: print(f"❌ process_llm_extraction missing webhook_config parameter") return False except Exception as e: print(f"❌ Failed: {e}") import traceback traceback.print_exc() return False def test_webhook_integration_in_api(): """Test that api.py properly integrates webhook notifications""" print("\n" + "=" * 60) print("TEST 4: Webhook Integration in process_llm_extraction") print("=" * 60) try: api_file = os.path.join(os.path.dirname(__file__), 'deploy', 'docker', 'api.py') with open(api_file, 'r') as f: api_content = f.read() # Check for WebhookDeliveryService initialization if 'webhook_service = WebhookDeliveryService(config)' in api_content: print("✅ process_llm_extraction initializes WebhookDeliveryService") else: print("❌ Missing WebhookDeliveryService initialization in process_llm_extraction") return False # Check for notify_job_completion calls with llm_extraction if 'task_type="llm_extraction"' in api_content: print("✅ Uses correct task_type='llm_extraction' for notifications") else: print("❌ Missing task_type='llm_extraction' in webhook notifications") return False # Count webhook notification calls (should have at least 3: success + 2 failure paths) notification_count = api_content.count('await webhook_service.notify_job_completion') # Find only in process_llm_extraction function llm_func_start = api_content.find('async def process_llm_extraction') llm_func_end = api_content.find('\nasync def ', llm_func_start + 1) if llm_func_end == -1: llm_func_end = len(api_content) llm_func_content = api_content[llm_func_start:llm_func_end] llm_notification_count = llm_func_content.count('await webhook_service.notify_job_completion') print(f"✅ Found {llm_notification_count} webhook notification calls in process_llm_extraction") if llm_notification_count >= 3: print(f"✅ Sufficient notification points (success + failure paths)") else: print(f"⚠️ Expected at least 3 notification calls, found {llm_notification_count}") return True except Exception as e: print(f"❌ Failed: {e}") import traceback traceback.print_exc() return False def test_job_endpoint_integration(): """Test that /llm/job endpoint extracts and passes webhook_config""" print("\n" + "=" * 60) print("TEST 5: /llm/job Endpoint Integration") print("=" * 60) try: job_file = os.path.join(os.path.dirname(__file__), 'deploy', 'docker', 'job.py') with open(job_file, 'r') as f: job_content = f.read() # Find the llm_job_enqueue function llm_job_start = job_content.find('async def llm_job_enqueue') llm_job_end = job_content.find('\n\n@router', llm_job_start + 1) if llm_job_end == -1: llm_job_end = job_content.find('\n\nasync def', llm_job_start + 1) llm_job_func = job_content[llm_job_start:llm_job_end] # Check for webhook_config extraction if 'webhook_config = None' in llm_job_func: print("✅ llm_job_enqueue initializes webhook_config variable") else: print("❌ Missing webhook_config initialization") return False if 'if payload.webhook_config:' in llm_job_func: print("✅ llm_job_enqueue checks for payload.webhook_config") else: print("❌ Missing webhook_config check") return False if 'webhook_config = payload.webhook_config.model_dump(mode=\'json\')' in llm_job_func: print("✅ llm_job_enqueue converts webhook_config to dict") else: print("❌ Missing webhook_config.model_dump conversion") return False if 'webhook_config=webhook_config' in llm_job_func: print("✅ llm_job_enqueue passes webhook_config to handle_llm_request") else: print("❌ Missing webhook_config parameter in handle_llm_request call") return False return True except Exception as e: print(f"❌ Failed: {e}") import traceback traceback.print_exc() return False def test_create_new_task_integration(): """Test that create_new_task stores webhook_config in Redis""" print("\n" + "=" * 60) print("TEST 6: create_new_task Webhook Storage") print("=" * 60) try: api_file = os.path.join(os.path.dirname(__file__), 'deploy', 'docker', 'api.py') with open(api_file, 'r') as f: api_content = f.read() # Find create_new_task function create_task_start = api_content.find('async def create_new_task') create_task_end = api_content.find('\nasync def ', create_task_start + 1) if create_task_end == -1: create_task_end = len(api_content) create_task_func = api_content[create_task_start:create_task_end] # Check for webhook_config storage if 'if webhook_config:' in create_task_func: print("✅ create_new_task checks for webhook_config") else: print("❌ Missing webhook_config check in create_new_task") return False if 'task_data["webhook_config"] = json.dumps(webhook_config)' in create_task_func: print("✅ create_new_task stores webhook_config in Redis task data") else: print("❌ Missing webhook_config storage in task_data") return False # Check that webhook_config is passed to process_llm_extraction if 'webhook_config' in create_task_func and 'background_tasks.add_task' in create_task_func: print("✅ create_new_task passes webhook_config to background task") else: print("⚠️ Could not verify webhook_config passed to background task") return True except Exception as e: print(f"❌ Failed: {e}") import traceback traceback.print_exc() return False def test_pattern_consistency(): """Test that /llm/job follows the same pattern as /crawl/job""" print("\n" + "=" * 60) print("TEST 7: Pattern Consistency with /crawl/job") print("=" * 60) try: api_file = os.path.join(os.path.dirname(__file__), 'deploy', 'docker', 'api.py') with open(api_file, 'r') as f: api_content = f.read() # Find handle_crawl_job to compare pattern crawl_job_start = api_content.find('async def handle_crawl_job') crawl_job_end = api_content.find('\nasync def ', crawl_job_start + 1) if crawl_job_end == -1: crawl_job_end = len(api_content) crawl_job_func = api_content[crawl_job_start:crawl_job_end] # Find process_llm_extraction llm_extract_start = api_content.find('async def process_llm_extraction') llm_extract_end = api_content.find('\nasync def ', llm_extract_start + 1) if llm_extract_end == -1: llm_extract_end = len(api_content) llm_extract_func = api_content[llm_extract_start:llm_extract_end] print("Checking pattern consistency...") # Both should initialize WebhookDeliveryService crawl_has_service = 'webhook_service = WebhookDeliveryService(config)' in crawl_job_func llm_has_service = 'webhook_service = WebhookDeliveryService(config)' in llm_extract_func if crawl_has_service and llm_has_service: print("✅ Both initialize WebhookDeliveryService") else: print(f"❌ Service initialization mismatch (crawl: {crawl_has_service}, llm: {llm_has_service})") return False # Both should call notify_job_completion on success crawl_notifies_success = 'status="completed"' in crawl_job_func and 'notify_job_completion' in crawl_job_func llm_notifies_success = 'status="completed"' in llm_extract_func and 'notify_job_completion' in llm_extract_func if crawl_notifies_success and llm_notifies_success: print("✅ Both notify on success") else: print(f"❌ Success notification mismatch (crawl: {crawl_notifies_success}, llm: {llm_notifies_success})") return False # Both should call notify_job_completion on failure crawl_notifies_failure = 'status="failed"' in crawl_job_func and 'error=' in crawl_job_func llm_notifies_failure = 'status="failed"' in llm_extract_func and 'error=' in llm_extract_func if crawl_notifies_failure and llm_notifies_failure: print("✅ Both notify on failure") else: print(f"❌ Failure notification mismatch (crawl: {crawl_notifies_failure}, llm: {llm_notifies_failure})") return False print("✅ /llm/job follows the same pattern as /crawl/job") return True except Exception as e: print(f"❌ Failed: {e}") import traceback traceback.print_exc() return False def main(): """Run all tests""" print("\n🧪 LLM Job Webhook Feature Validation") print("=" * 60) print("Testing that /llm/job now supports webhooks like /crawl/job") print("=" * 60 + "\n") results = [] # Run all tests results.append(("LlmJobPayload Model", test_llm_job_payload_model())) results.append(("handle_llm_request Signature", test_handle_llm_request_signature())) results.append(("process_llm_extraction Signature", test_process_llm_extraction_signature())) results.append(("Webhook Integration", test_webhook_integration_in_api())) results.append(("/llm/job Endpoint", test_job_endpoint_integration())) results.append(("create_new_task Storage", test_create_new_task_integration())) results.append(("Pattern Consistency", test_pattern_consistency())) # Print summary print("\n" + "=" * 60) print("TEST SUMMARY") print("=" * 60) passed = sum(1 for _, result in results if result) total = len(results) for test_name, result in results: status = "✅ PASS" if result else "❌ FAIL" print(f"{status} - {test_name}") print(f"\n{'=' * 60}") print(f"Results: {passed}/{total} tests passed") print(f"{'=' * 60}") if passed == total: print("\n🎉 All tests passed! /llm/job webhook feature is correctly implemented.") print("\n📝 Summary of changes:") print(" 1. LlmJobPayload model includes webhook_config field") print(" 2. /llm/job endpoint extracts and passes webhook_config") print(" 3. handle_llm_request accepts webhook_config parameter") print(" 4. create_new_task stores webhook_config in Redis") print(" 5. process_llm_extraction sends webhook notifications") print(" 6. Follows the same pattern as /crawl/job") return 0 else: print(f"\n⚠️ {total - passed} test(s) failed. Please review the output above.") return 1 if __name__ == "__main__": exit(main())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/deploy/docker/schemas.py
deploy/docker/schemas.py
from typing import List, Optional, Dict from enum import Enum from pydantic import BaseModel, Field, HttpUrl from utils import FilterType class CrawlRequest(BaseModel): urls: List[str] = Field(min_length=1, max_length=100) browser_config: Optional[Dict] = Field(default_factory=dict) crawler_config: Optional[Dict] = Field(default_factory=dict) class HookConfig(BaseModel): """Configuration for user-provided hooks""" code: Dict[str, str] = Field( default_factory=dict, description="Map of hook points to Python code strings" ) timeout: int = Field( default=30, ge=1, le=120, description="Timeout in seconds for each hook execution" ) class Config: schema_extra = { "example": { "code": { "on_page_context_created": """ async def hook(page, context, **kwargs): # Block images to speed up crawling await context.route("**/*.{png,jpg,jpeg,gif}", lambda route: route.abort()) return page """, "before_retrieve_html": """ async def hook(page, context, **kwargs): # Scroll to load lazy content await page.evaluate("window.scrollTo(0, document.body.scrollHeight)") await page.wait_for_timeout(2000) return page """ }, "timeout": 30 } } class CrawlRequestWithHooks(CrawlRequest): """Extended crawl request with hooks support""" hooks: Optional[HookConfig] = Field( default=None, description="Optional user-provided hook functions" ) class MarkdownRequest(BaseModel): """Request body for the /md endpoint.""" url: str = Field(..., description="Absolute http/https URL to fetch") f: FilterType = Field(FilterType.FIT, description="Content‑filter strategy: fit, raw, bm25, or llm") q: Optional[str] = Field(None, description="Query string used by BM25/LLM filters") c: Optional[str] = Field("0", description="Cache‑bust / revision counter") provider: Optional[str] = Field(None, description="LLM provider override (e.g., 'anthropic/claude-3-opus')") temperature: Optional[float] = Field(None, description="LLM temperature override (0.0-2.0)") base_url: Optional[str] = Field(None, description="LLM API base URL override") class RawCode(BaseModel): code: str class HTMLRequest(BaseModel): url: str class ScreenshotRequest(BaseModel): url: str screenshot_wait_for: Optional[float] = 2 output_path: Optional[str] = None class PDFRequest(BaseModel): url: str output_path: Optional[str] = None class JSEndpointRequest(BaseModel): url: str scripts: List[str] = Field( ..., description="List of separated JavaScript snippets to execute" ) class WebhookConfig(BaseModel): """Configuration for webhook notifications.""" webhook_url: HttpUrl webhook_data_in_payload: bool = False webhook_headers: Optional[Dict[str, str]] = None class WebhookPayload(BaseModel): """Payload sent to webhook endpoints.""" task_id: str task_type: str # "crawl", "llm_extraction", etc. status: str # "completed" or "failed" timestamp: str # ISO 8601 format urls: List[str] error: Optional[str] = None data: Optional[Dict] = None # Included only if webhook_data_in_payload=True
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/deploy/docker/api.py
deploy/docker/api.py
import os import json import asyncio from typing import List, Tuple, Dict from functools import partial from uuid import uuid4 from datetime import datetime, timezone from base64 import b64encode import logging from typing import Optional, AsyncGenerator from urllib.parse import unquote from fastapi import HTTPException, Request, status from fastapi.background import BackgroundTasks from fastapi.responses import JSONResponse from redis import asyncio as aioredis from crawl4ai import ( AsyncWebCrawler, CrawlerRunConfig, LLMExtractionStrategy, CacheMode, BrowserConfig, MemoryAdaptiveDispatcher, RateLimiter, LLMConfig ) from crawl4ai.utils import perform_completion_with_backoff from crawl4ai.content_filter_strategy import ( PruningContentFilter, BM25ContentFilter, LLMContentFilter ) from crawl4ai.markdown_generation_strategy import DefaultMarkdownGenerator from crawl4ai.content_scraping_strategy import LXMLWebScrapingStrategy from utils import ( TaskStatus, FilterType, get_base_url, is_task_id, should_cleanup_task, decode_redis_hash, get_llm_api_key, validate_llm_provider, get_llm_temperature, get_llm_base_url ) from webhook import WebhookDeliveryService import psutil, time logger = logging.getLogger(__name__) # --- Helper to get memory --- def _get_memory_mb(): try: return psutil.Process().memory_info().rss / (1024 * 1024) except Exception as e: logger.warning(f"Could not get memory info: {e}") return None async def handle_llm_qa( url: str, query: str, config: dict ) -> str: """Process QA using LLM with crawled content as context.""" from crawler_pool import get_crawler try: if not url.startswith(('http://', 'https://')) and not url.startswith(("raw:", "raw://")): url = 'https://' + url # Extract base URL by finding last '?q=' occurrence last_q_index = url.rfind('?q=') if last_q_index != -1: url = url[:last_q_index] # Get markdown content (use default config) from utils import load_config cfg = load_config() browser_cfg = BrowserConfig( extra_args=cfg["crawler"]["browser"].get("extra_args", []), **cfg["crawler"]["browser"].get("kwargs", {}), ) crawler = await get_crawler(browser_cfg) result = await crawler.arun(url) if not result.success: raise HTTPException( status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=result.error_message ) content = result.markdown.fit_markdown or result.markdown.raw_markdown # Create prompt and get LLM response prompt = f"""Use the following content as context to answer the question. Content: {content} Question: {query} Answer:""" # api_token=os.environ.get(config["llm"].get("api_key_env", "")) response = perform_completion_with_backoff( provider=config["llm"]["provider"], prompt_with_variables=prompt, api_token=get_llm_api_key(config), # Returns None to let litellm handle it temperature=get_llm_temperature(config), base_url=get_llm_base_url(config), base_delay=config["llm"].get("backoff_base_delay", 2), max_attempts=config["llm"].get("backoff_max_attempts", 3), exponential_factor=config["llm"].get("backoff_exponential_factor", 2) ) return response.choices[0].message.content except Exception as e: logger.error(f"QA processing error: {str(e)}", exc_info=True) raise HTTPException( status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=str(e) ) async def process_llm_extraction( redis: aioredis.Redis, config: dict, task_id: str, url: str, instruction: str, schema: Optional[str] = None, cache: str = "0", provider: Optional[str] = None, webhook_config: Optional[Dict] = None, temperature: Optional[float] = None, base_url: Optional[str] = None ) -> None: """Process LLM extraction in background.""" # Initialize webhook service webhook_service = WebhookDeliveryService(config) try: # Validate provider is_valid, error_msg = validate_llm_provider(config, provider) if not is_valid: await redis.hset(f"task:{task_id}", mapping={ "status": TaskStatus.FAILED, "error": error_msg }) # Send webhook notification on failure await webhook_service.notify_job_completion( task_id=task_id, task_type="llm_extraction", status="failed", urls=[url], webhook_config=webhook_config, error=error_msg ) return api_key = get_llm_api_key(config, provider) # Returns None to let litellm handle it llm_strategy = LLMExtractionStrategy( llm_config=LLMConfig( provider=provider or config["llm"]["provider"], api_token=api_key, temperature=temperature or get_llm_temperature(config, provider), base_url=base_url or get_llm_base_url(config, provider) ), instruction=instruction, schema=json.loads(schema) if schema else None, ) cache_mode = CacheMode.ENABLED if cache == "1" else CacheMode.WRITE_ONLY async with AsyncWebCrawler() as crawler: result = await crawler.arun( url=url, config=CrawlerRunConfig( extraction_strategy=llm_strategy, scraping_strategy=LXMLWebScrapingStrategy(), cache_mode=cache_mode ) ) if not result.success: await redis.hset(f"task:{task_id}", mapping={ "status": TaskStatus.FAILED, "error": result.error_message }) # Send webhook notification on failure await webhook_service.notify_job_completion( task_id=task_id, task_type="llm_extraction", status="failed", urls=[url], webhook_config=webhook_config, error=result.error_message ) return try: content = json.loads(result.extracted_content) except json.JSONDecodeError: content = result.extracted_content result_data = {"extracted_content": content} await redis.hset(f"task:{task_id}", mapping={ "status": TaskStatus.COMPLETED, "result": json.dumps(content) }) # Send webhook notification on successful completion await webhook_service.notify_job_completion( task_id=task_id, task_type="llm_extraction", status="completed", urls=[url], webhook_config=webhook_config, result=result_data ) except Exception as e: logger.error(f"LLM extraction error: {str(e)}", exc_info=True) await redis.hset(f"task:{task_id}", mapping={ "status": TaskStatus.FAILED, "error": str(e) }) # Send webhook notification on failure await webhook_service.notify_job_completion( task_id=task_id, task_type="llm_extraction", status="failed", urls=[url], webhook_config=webhook_config, error=str(e) ) async def handle_markdown_request( url: str, filter_type: FilterType, query: Optional[str] = None, cache: str = "0", config: Optional[dict] = None, provider: Optional[str] = None, temperature: Optional[float] = None, base_url: Optional[str] = None ) -> str: """Handle markdown generation requests.""" try: # Validate provider if using LLM filter if filter_type == FilterType.LLM: is_valid, error_msg = validate_llm_provider(config, provider) if not is_valid: raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail=error_msg ) decoded_url = unquote(url) if not decoded_url.startswith(('http://', 'https://')) and not decoded_url.startswith(("raw:", "raw://")): decoded_url = 'https://' + decoded_url if filter_type == FilterType.RAW: md_generator = DefaultMarkdownGenerator() else: content_filter = { FilterType.FIT: PruningContentFilter(), FilterType.BM25: BM25ContentFilter(user_query=query or ""), FilterType.LLM: LLMContentFilter( llm_config=LLMConfig( provider=provider or config["llm"]["provider"], api_token=get_llm_api_key(config, provider), # Returns None to let litellm handle it temperature=temperature or get_llm_temperature(config, provider), base_url=base_url or get_llm_base_url(config, provider) ), instruction=query or "Extract main content" ) }[filter_type] md_generator = DefaultMarkdownGenerator(content_filter=content_filter) cache_mode = CacheMode.ENABLED if cache == "1" else CacheMode.WRITE_ONLY from crawler_pool import get_crawler from utils import load_config as _load_config _cfg = _load_config() browser_cfg = BrowserConfig( extra_args=_cfg["crawler"]["browser"].get("extra_args", []), **_cfg["crawler"]["browser"].get("kwargs", {}), ) crawler = await get_crawler(browser_cfg) result = await crawler.arun( url=decoded_url, config=CrawlerRunConfig( markdown_generator=md_generator, scraping_strategy=LXMLWebScrapingStrategy(), cache_mode=cache_mode ) ) if not result.success: raise HTTPException( status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=result.error_message ) return (result.markdown.raw_markdown if filter_type == FilterType.RAW else result.markdown.fit_markdown) except Exception as e: logger.error(f"Markdown error: {str(e)}", exc_info=True) raise HTTPException( status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=str(e) ) async def handle_llm_request( redis: aioredis.Redis, background_tasks: BackgroundTasks, request: Request, input_path: str, query: Optional[str] = None, schema: Optional[str] = None, cache: str = "0", config: Optional[dict] = None, provider: Optional[str] = None, webhook_config: Optional[Dict] = None, temperature: Optional[float] = None, api_base_url: Optional[str] = None ) -> JSONResponse: """Handle LLM extraction requests.""" base_url = get_base_url(request) try: if is_task_id(input_path): return await handle_task_status( redis, input_path, base_url ) if not query: return JSONResponse({ "message": "Please provide an instruction", "_links": { "example": { "href": f"{base_url}/llm/{input_path}?q=Extract+main+content", "title": "Try this example" } } }) return await create_new_task( redis, background_tasks, input_path, query, schema, cache, base_url, config, provider, webhook_config, temperature, api_base_url ) except Exception as e: logger.error(f"LLM endpoint error: {str(e)}", exc_info=True) return JSONResponse({ "error": str(e), "_links": { "retry": {"href": str(request.url)} } }, status_code=status.HTTP_500_INTERNAL_SERVER_ERROR) async def handle_task_status( redis: aioredis.Redis, task_id: str, base_url: str, *, keep: bool = False ) -> JSONResponse: """Handle task status check requests.""" task = await redis.hgetall(f"task:{task_id}") if not task: raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail="Task not found" ) task = decode_redis_hash(task) response = create_task_response(task, task_id, base_url) if task["status"] in [TaskStatus.COMPLETED, TaskStatus.FAILED]: if not keep and should_cleanup_task(task["created_at"]): await redis.delete(f"task:{task_id}") return JSONResponse(response) async def create_new_task( redis: aioredis.Redis, background_tasks: BackgroundTasks, input_path: str, query: str, schema: Optional[str], cache: str, base_url: str, config: dict, provider: Optional[str] = None, webhook_config: Optional[Dict] = None, temperature: Optional[float] = None, api_base_url: Optional[str] = None ) -> JSONResponse: """Create and initialize a new task.""" decoded_url = unquote(input_path) if not decoded_url.startswith(('http://', 'https://')) and not decoded_url.startswith(("raw:", "raw://")): decoded_url = 'https://' + decoded_url from datetime import datetime task_id = f"llm_{int(datetime.now().timestamp())}_{id(background_tasks)}" task_data = { "status": TaskStatus.PROCESSING, "created_at": datetime.now().isoformat(), "url": decoded_url } # Store webhook config if provided if webhook_config: task_data["webhook_config"] = json.dumps(webhook_config) await redis.hset(f"task:{task_id}", mapping=task_data) background_tasks.add_task( process_llm_extraction, redis, config, task_id, decoded_url, query, schema, cache, provider, webhook_config, temperature, api_base_url ) return JSONResponse({ "task_id": task_id, "status": TaskStatus.PROCESSING, "url": decoded_url, "_links": { "self": {"href": f"{base_url}/llm/{task_id}"}, "status": {"href": f"{base_url}/llm/{task_id}"} } }) def create_task_response(task: dict, task_id: str, base_url: str) -> dict: """Create response for task status check.""" response = { "task_id": task_id, "status": task["status"], "created_at": task["created_at"], "url": task["url"], "_links": { "self": {"href": f"{base_url}/llm/{task_id}"}, "refresh": {"href": f"{base_url}/llm/{task_id}"} } } if task["status"] == TaskStatus.COMPLETED: response["result"] = json.loads(task["result"]) elif task["status"] == TaskStatus.FAILED: response["error"] = task["error"] return response async def stream_results(crawler: AsyncWebCrawler, results_gen: AsyncGenerator) -> AsyncGenerator[bytes, None]: """Stream results with heartbeats and completion markers.""" import json from utils import datetime_handler try: async for result in results_gen: try: server_memory_mb = _get_memory_mb() result_dict = result.model_dump() result_dict['server_memory_mb'] = server_memory_mb # Ensure fit_html is JSON-serializable if "fit_html" in result_dict and not (result_dict["fit_html"] is None or isinstance(result_dict["fit_html"], str)): result_dict["fit_html"] = None # If PDF exists, encode it to base64 if result_dict.get('pdf') is not None: result_dict['pdf'] = b64encode(result_dict['pdf']).decode('utf-8') logger.info(f"Streaming result for {result_dict.get('url', 'unknown')}") data = json.dumps(result_dict, default=datetime_handler) + "\n" yield data.encode('utf-8') except Exception as e: logger.error(f"Serialization error: {e}") error_response = {"error": str(e), "url": getattr(result, 'url', 'unknown')} yield (json.dumps(error_response) + "\n").encode('utf-8') yield json.dumps({"status": "completed"}).encode('utf-8') except asyncio.CancelledError: logger.warning("Client disconnected during streaming") finally: # try: # await crawler.close() # except Exception as e: # logger.error(f"Crawler cleanup error: {e}") pass async def handle_crawl_request( urls: List[str], browser_config: dict, crawler_config: dict, config: dict, hooks_config: Optional[dict] = None ) -> dict: """Handle non-streaming crawl requests with optional hooks.""" # Track request start request_id = f"req_{uuid4().hex[:8]}" try: from monitor import get_monitor await get_monitor().track_request_start( request_id, "/crawl", urls[0] if urls else "batch", browser_config ) except: pass # Monitor not critical start_mem_mb = _get_memory_mb() # <--- Get memory before start_time = time.time() mem_delta_mb = None peak_mem_mb = start_mem_mb hook_manager = None try: urls = [('https://' + url) if not url.startswith(('http://', 'https://')) and not url.startswith(("raw:", "raw://")) else url for url in urls] browser_config = BrowserConfig.load(browser_config) crawler_config = CrawlerRunConfig.load(crawler_config) dispatcher = MemoryAdaptiveDispatcher( memory_threshold_percent=config["crawler"]["memory_threshold_percent"], rate_limiter=RateLimiter( base_delay=tuple(config["crawler"]["rate_limiter"]["base_delay"]) ) if config["crawler"]["rate_limiter"]["enabled"] else None ) from crawler_pool import get_crawler crawler = await get_crawler(browser_config) # crawler: AsyncWebCrawler = AsyncWebCrawler(config=browser_config) # await crawler.start() # Attach hooks if provided hooks_status = {} if hooks_config: from hook_manager import attach_user_hooks_to_crawler, UserHookManager hook_manager = UserHookManager(timeout=hooks_config.get('timeout', 30)) hooks_status, hook_manager = await attach_user_hooks_to_crawler( crawler, hooks_config.get('code', {}), timeout=hooks_config.get('timeout', 30), hook_manager=hook_manager ) logger.info(f"Hooks attachment status: {hooks_status['status']}") base_config = config["crawler"]["base_config"] # Iterate on key-value pairs in global_config then use hasattr to set them for key, value in base_config.items(): if hasattr(crawler_config, key): current_value = getattr(crawler_config, key) # Only set base config if user didn't provide a value if current_value is None or current_value == "": setattr(crawler_config, key, value) results = [] func = getattr(crawler, "arun" if len(urls) == 1 else "arun_many") partial_func = partial(func, urls[0] if len(urls) == 1 else urls, config=crawler_config, dispatcher=dispatcher) results = await partial_func() # Ensure results is always a list if not isinstance(results, list): results = [results] # await crawler.close() end_mem_mb = _get_memory_mb() # <--- Get memory after end_time = time.time() if start_mem_mb is not None and end_mem_mb is not None: mem_delta_mb = end_mem_mb - start_mem_mb # <--- Calculate delta peak_mem_mb = max(peak_mem_mb if peak_mem_mb else 0, end_mem_mb) # <--- Get peak memory logger.info(f"Memory usage: Start: {start_mem_mb} MB, End: {end_mem_mb} MB, Delta: {mem_delta_mb} MB, Peak: {peak_mem_mb} MB") # Process results to handle PDF bytes processed_results = [] for result in results: try: # Check if result has model_dump method (is a proper CrawlResult) if hasattr(result, 'model_dump'): result_dict = result.model_dump() elif isinstance(result, dict): result_dict = result else: # Handle unexpected result type logger.warning(f"Unexpected result type: {type(result)}") result_dict = { "url": str(result) if hasattr(result, '__str__') else "unknown", "success": False, "error_message": f"Unexpected result type: {type(result).__name__}" } # if fit_html is not a string, set it to None to avoid serialization errors if "fit_html" in result_dict and not (result_dict["fit_html"] is None or isinstance(result_dict["fit_html"], str)): result_dict["fit_html"] = None # If PDF exists, encode it to base64 if result_dict.get('pdf') is not None and isinstance(result_dict.get('pdf'), bytes): result_dict['pdf'] = b64encode(result_dict['pdf']).decode('utf-8') processed_results.append(result_dict) except Exception as e: logger.error(f"Error processing result: {e}") processed_results.append({ "url": "unknown", "success": False, "error_message": str(e) }) response = { "success": True, "results": processed_results, "server_processing_time_s": end_time - start_time, "server_memory_delta_mb": mem_delta_mb, "server_peak_memory_mb": peak_mem_mb } # Track request completion try: from monitor import get_monitor await get_monitor().track_request_end( request_id, success=True, pool_hit=True, status_code=200 ) except: pass # Add hooks information if hooks were used if hooks_config and hook_manager: from hook_manager import UserHookManager if isinstance(hook_manager, UserHookManager): try: # Ensure all hook data is JSON serializable hook_data = { "status": hooks_status, "execution_log": hook_manager.execution_log, "errors": hook_manager.errors, "summary": hook_manager.get_summary() } # Test that it's serializable json.dumps(hook_data) response["hooks"] = hook_data except (TypeError, ValueError) as e: logger.error(f"Hook data not JSON serializable: {e}") response["hooks"] = { "status": {"status": "error", "message": "Hook data serialization failed"}, "execution_log": [], "errors": [{"error": str(e)}], "summary": {} } return response except Exception as e: logger.error(f"Crawl error: {str(e)}", exc_info=True) # Track request error try: from monitor import get_monitor await get_monitor().track_request_end( request_id, success=False, error=str(e), status_code=500 ) except: pass if 'crawler' in locals() and crawler.ready: # Check if crawler was initialized and started # try: # await crawler.close() # except Exception as close_e: # logger.error(f"Error closing crawler during exception handling: {close_e}") logger.error(f"Error closing crawler during exception handling: {str(e)}") # Measure memory even on error if possible end_mem_mb_error = _get_memory_mb() if start_mem_mb is not None and end_mem_mb_error is not None: mem_delta_mb = end_mem_mb_error - start_mem_mb raise HTTPException( status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=json.dumps({ # Send structured error "error": str(e), "server_memory_delta_mb": mem_delta_mb, "server_peak_memory_mb": max(peak_mem_mb if peak_mem_mb else 0, end_mem_mb_error or 0) }) ) async def handle_stream_crawl_request( urls: List[str], browser_config: dict, crawler_config: dict, config: dict, hooks_config: Optional[dict] = None ) -> Tuple[AsyncWebCrawler, AsyncGenerator, Optional[Dict]]: """Handle streaming crawl requests with optional hooks.""" hooks_info = None try: browser_config = BrowserConfig.load(browser_config) # browser_config.verbose = True # Set to False or remove for production stress testing browser_config.verbose = False crawler_config = CrawlerRunConfig.load(crawler_config) crawler_config.scraping_strategy = LXMLWebScrapingStrategy() crawler_config.stream = True dispatcher = MemoryAdaptiveDispatcher( memory_threshold_percent=config["crawler"]["memory_threshold_percent"], rate_limiter=RateLimiter( base_delay=tuple(config["crawler"]["rate_limiter"]["base_delay"]) ) ) from crawler_pool import get_crawler crawler = await get_crawler(browser_config) # crawler = AsyncWebCrawler(config=browser_config) # await crawler.start() # Attach hooks if provided if hooks_config: from hook_manager import attach_user_hooks_to_crawler, UserHookManager hook_manager = UserHookManager(timeout=hooks_config.get('timeout', 30)) hooks_status, hook_manager = await attach_user_hooks_to_crawler( crawler, hooks_config.get('code', {}), timeout=hooks_config.get('timeout', 30), hook_manager=hook_manager ) logger.info(f"Hooks attachment status for streaming: {hooks_status['status']}") # Include hook manager in hooks_info for proper tracking hooks_info = {'status': hooks_status, 'manager': hook_manager} results_gen = await crawler.arun_many( urls=urls, config=crawler_config, dispatcher=dispatcher ) return crawler, results_gen, hooks_info except Exception as e: # Make sure to close crawler if started during an error here if 'crawler' in locals() and crawler.ready: # try: # await crawler.close() # except Exception as close_e: # logger.error(f"Error closing crawler during stream setup exception: {close_e}") logger.error(f"Error closing crawler during stream setup exception: {str(e)}") logger.error(f"Stream crawl error: {str(e)}", exc_info=True) # Raising HTTPException here will prevent streaming response raise HTTPException( status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=str(e) ) async def handle_crawl_job( redis, background_tasks: BackgroundTasks, urls: List[str], browser_config: Dict, crawler_config: Dict, config: Dict, webhook_config: Optional[Dict] = None, ) -> Dict: """ Fire-and-forget version of handle_crawl_request. Creates a task in Redis, runs the heavy work in a background task, lets /crawl/job/{task_id} polling fetch the result. """ task_id = f"crawl_{uuid4().hex[:8]}" # Store task data in Redis task_data = { "status": TaskStatus.PROCESSING, # <-- keep enum values consistent "created_at": datetime.now(timezone.utc).replace(tzinfo=None).isoformat(), "url": json.dumps(urls), # store list as JSON string "result": "", "error": "", } # Store webhook config if provided if webhook_config: task_data["webhook_config"] = json.dumps(webhook_config) await redis.hset(f"task:{task_id}", mapping=task_data) # Initialize webhook service webhook_service = WebhookDeliveryService(config) async def _runner(): try: result = await handle_crawl_request( urls=urls, browser_config=browser_config, crawler_config=crawler_config, config=config, ) await redis.hset(f"task:{task_id}", mapping={ "status": TaskStatus.COMPLETED, "result": json.dumps(result), }) # Send webhook notification on successful completion await webhook_service.notify_job_completion( task_id=task_id, task_type="crawl", status="completed", urls=urls, webhook_config=webhook_config, result=result ) await asyncio.sleep(5) # Give Redis time to process the update except Exception as exc: await redis.hset(f"task:{task_id}", mapping={ "status": TaskStatus.FAILED, "error": str(exc), }) # Send webhook notification on failure await webhook_service.notify_job_completion( task_id=task_id, task_type="crawl", status="failed", urls=urls, webhook_config=webhook_config, error=str(exc) ) background_tasks.add_task(_runner) return {"task_id": task_id}
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/deploy/docker/monitor_routes.py
deploy/docker/monitor_routes.py
# monitor_routes.py - Monitor API endpoints from fastapi import APIRouter, HTTPException, WebSocket, WebSocketDisconnect from pydantic import BaseModel from typing import Optional from monitor import get_monitor import logging import asyncio import json logger = logging.getLogger(__name__) router = APIRouter(prefix="/monitor", tags=["monitor"]) @router.get("/health") async def get_health(): """Get current system health snapshot.""" try: monitor = get_monitor() return await monitor.get_health_summary() except Exception as e: logger.error(f"Error getting health: {e}") raise HTTPException(500, str(e)) @router.get("/requests") async def get_requests(status: str = "all", limit: int = 50): """Get active and completed requests. Args: status: Filter by 'active', 'completed', 'success', 'error', or 'all' limit: Max number of completed requests to return (default 50) """ # Input validation if status not in ["all", "active", "completed", "success", "error"]: raise HTTPException(400, f"Invalid status: {status}. Must be one of: all, active, completed, success, error") if limit < 1 or limit > 1000: raise HTTPException(400, f"Invalid limit: {limit}. Must be between 1 and 1000") try: monitor = get_monitor() if status == "active": return {"active": monitor.get_active_requests(), "completed": []} elif status == "completed": return {"active": [], "completed": monitor.get_completed_requests(limit)} elif status in ["success", "error"]: return {"active": [], "completed": monitor.get_completed_requests(limit, status)} else: # "all" return { "active": monitor.get_active_requests(), "completed": monitor.get_completed_requests(limit) } except Exception as e: logger.error(f"Error getting requests: {e}") raise HTTPException(500, str(e)) @router.get("/browsers") async def get_browsers(): """Get detailed browser pool information.""" try: monitor = get_monitor() browsers = await monitor.get_browser_list() # Calculate summary stats total_browsers = len(browsers) total_memory = sum(b["memory_mb"] for b in browsers) # Calculate reuse rate from recent requests recent = monitor.get_completed_requests(100) pool_hits = sum(1 for r in recent if r.get("pool_hit", False)) reuse_rate = (pool_hits / len(recent) * 100) if recent else 0 return { "browsers": browsers, "summary": { "total_count": total_browsers, "total_memory_mb": total_memory, "reuse_rate_percent": round(reuse_rate, 1) } } except Exception as e: logger.error(f"Error getting browsers: {e}") raise HTTPException(500, str(e)) @router.get("/endpoints/stats") async def get_endpoint_stats(): """Get aggregated endpoint statistics.""" try: monitor = get_monitor() return monitor.get_endpoint_stats_summary() except Exception as e: logger.error(f"Error getting endpoint stats: {e}") raise HTTPException(500, str(e)) @router.get("/timeline") async def get_timeline(metric: str = "memory", window: str = "5m"): """Get timeline data for charts. Args: metric: 'memory', 'requests', or 'browsers' window: Time window (only '5m' supported for now) """ # Input validation if metric not in ["memory", "requests", "browsers"]: raise HTTPException(400, f"Invalid metric: {metric}. Must be one of: memory, requests, browsers") if window != "5m": raise HTTPException(400, f"Invalid window: {window}. Only '5m' is currently supported") try: monitor = get_monitor() return monitor.get_timeline_data(metric, window) except Exception as e: logger.error(f"Error getting timeline: {e}") raise HTTPException(500, str(e)) @router.get("/logs/janitor") async def get_janitor_log(limit: int = 100): """Get recent janitor cleanup events.""" # Input validation if limit < 1 or limit > 1000: raise HTTPException(400, f"Invalid limit: {limit}. Must be between 1 and 1000") try: monitor = get_monitor() return {"events": monitor.get_janitor_log(limit)} except Exception as e: logger.error(f"Error getting janitor log: {e}") raise HTTPException(500, str(e)) @router.get("/logs/errors") async def get_errors_log(limit: int = 100): """Get recent errors.""" # Input validation if limit < 1 or limit > 1000: raise HTTPException(400, f"Invalid limit: {limit}. Must be between 1 and 1000") try: monitor = get_monitor() return {"errors": monitor.get_errors_log(limit)} except Exception as e: logger.error(f"Error getting errors log: {e}") raise HTTPException(500, str(e)) # ========== Control Actions ========== class KillBrowserRequest(BaseModel): sig: str @router.post("/actions/cleanup") async def force_cleanup(): """Force immediate janitor cleanup (kills idle cold pool browsers).""" try: from crawler_pool import COLD_POOL, LAST_USED, USAGE_COUNT, LOCK import time from contextlib import suppress killed_count = 0 now = time.time() async with LOCK: for sig in list(COLD_POOL.keys()): # Kill all cold pool browsers immediately logger.info(f"🧹 Force cleanup: closing cold browser (sig={sig[:8]})") with suppress(Exception): await COLD_POOL[sig].close() COLD_POOL.pop(sig, None) LAST_USED.pop(sig, None) USAGE_COUNT.pop(sig, None) killed_count += 1 monitor = get_monitor() await monitor.track_janitor_event("force_cleanup", "manual", {"killed": killed_count}) return {"success": True, "killed_browsers": killed_count} except Exception as e: logger.error(f"Error during force cleanup: {e}") raise HTTPException(500, str(e)) @router.post("/actions/kill_browser") async def kill_browser(req: KillBrowserRequest): """Kill a specific browser by signature (hot or cold only). Args: sig: Browser config signature (first 8 chars) """ try: from crawler_pool import HOT_POOL, COLD_POOL, LAST_USED, USAGE_COUNT, LOCK, DEFAULT_CONFIG_SIG from contextlib import suppress # Find full signature matching prefix target_sig = None pool_type = None async with LOCK: # Check hot pool for sig in HOT_POOL.keys(): if sig.startswith(req.sig): target_sig = sig pool_type = "hot" break # Check cold pool if not target_sig: for sig in COLD_POOL.keys(): if sig.startswith(req.sig): target_sig = sig pool_type = "cold" break # Check if trying to kill permanent if DEFAULT_CONFIG_SIG and DEFAULT_CONFIG_SIG.startswith(req.sig): raise HTTPException(403, "Cannot kill permanent browser. Use restart instead.") if not target_sig: raise HTTPException(404, f"Browser with sig={req.sig} not found") # Warn if there are active requests (browser might be in use) monitor = get_monitor() active_count = len(monitor.get_active_requests()) if active_count > 0: logger.warning(f"Killing browser {target_sig[:8]} while {active_count} requests are active - may cause failures") # Kill the browser if pool_type == "hot": browser = HOT_POOL.pop(target_sig) else: browser = COLD_POOL.pop(target_sig) with suppress(Exception): await browser.close() LAST_USED.pop(target_sig, None) USAGE_COUNT.pop(target_sig, None) logger.info(f"🔪 Killed {pool_type} browser (sig={target_sig[:8]})") monitor = get_monitor() await monitor.track_janitor_event("kill_browser", target_sig, {"pool": pool_type, "manual": True}) return {"success": True, "killed_sig": target_sig[:8], "pool_type": pool_type} except HTTPException: raise except Exception as e: logger.error(f"Error killing browser: {e}") raise HTTPException(500, str(e)) @router.post("/actions/restart_browser") async def restart_browser(req: KillBrowserRequest): """Restart a browser (kill + recreate). Works for permanent too. Args: sig: Browser config signature (first 8 chars), or "permanent" """ try: from crawler_pool import (PERMANENT, HOT_POOL, COLD_POOL, LAST_USED, USAGE_COUNT, LOCK, DEFAULT_CONFIG_SIG, init_permanent) from crawl4ai import AsyncWebCrawler, BrowserConfig from contextlib import suppress import time # Handle permanent browser restart if req.sig == "permanent" or (DEFAULT_CONFIG_SIG and DEFAULT_CONFIG_SIG.startswith(req.sig)): async with LOCK: if PERMANENT: with suppress(Exception): await PERMANENT.close() # Reinitialize permanent from utils import load_config config = load_config() await init_permanent(BrowserConfig( extra_args=config["crawler"]["browser"].get("extra_args", []), **config["crawler"]["browser"].get("kwargs", {}), )) logger.info("🔄 Restarted permanent browser") return {"success": True, "restarted": "permanent"} # Handle hot/cold browser restart target_sig = None pool_type = None browser_config = None async with LOCK: # Find browser for sig in HOT_POOL.keys(): if sig.startswith(req.sig): target_sig = sig pool_type = "hot" # Would need to reconstruct config (not stored currently) break if not target_sig: for sig in COLD_POOL.keys(): if sig.startswith(req.sig): target_sig = sig pool_type = "cold" break if not target_sig: raise HTTPException(404, f"Browser with sig={req.sig} not found") # Kill existing if pool_type == "hot": browser = HOT_POOL.pop(target_sig) else: browser = COLD_POOL.pop(target_sig) with suppress(Exception): await browser.close() # Note: We can't easily recreate with same config without storing it # For now, just kill and let new requests create fresh ones LAST_USED.pop(target_sig, None) USAGE_COUNT.pop(target_sig, None) logger.info(f"🔄 Restarted {pool_type} browser (sig={target_sig[:8]})") monitor = get_monitor() await monitor.track_janitor_event("restart_browser", target_sig, {"pool": pool_type}) return {"success": True, "restarted_sig": target_sig[:8], "note": "Browser will be recreated on next request"} except HTTPException: raise except Exception as e: logger.error(f"Error restarting browser: {e}") raise HTTPException(500, str(e)) @router.post("/stats/reset") async def reset_stats(): """Reset today's endpoint counters.""" try: monitor = get_monitor() monitor.endpoint_stats.clear() await monitor._persist_endpoint_stats() return {"success": True, "message": "Endpoint stats reset"} except Exception as e: logger.error(f"Error resetting stats: {e}") raise HTTPException(500, str(e)) @router.websocket("/ws") async def websocket_endpoint(websocket: WebSocket): """WebSocket endpoint for real-time monitoring updates. Sends updates every 2 seconds with: - Health stats - Active/completed requests - Browser pool status - Timeline data """ await websocket.accept() logger.info("WebSocket client connected") try: while True: try: # Gather all monitoring data monitor = get_monitor() data = { "timestamp": asyncio.get_event_loop().time(), "health": await monitor.get_health_summary(), "requests": { "active": monitor.get_active_requests(), "completed": monitor.get_completed_requests(limit=10) }, "browsers": await monitor.get_browser_list(), "timeline": { "memory": monitor.get_timeline_data("memory", "5m"), "requests": monitor.get_timeline_data("requests", "5m"), "browsers": monitor.get_timeline_data("browsers", "5m") }, "janitor": monitor.get_janitor_log(limit=10), "errors": monitor.get_errors_log(limit=10) } # Send update to client await websocket.send_json(data) # Wait 2 seconds before next update await asyncio.sleep(2) except WebSocketDisconnect: logger.info("WebSocket client disconnected") break except Exception as e: logger.error(f"WebSocket error: {e}", exc_info=True) await asyncio.sleep(2) # Continue trying except Exception as e: logger.error(f"WebSocket connection error: {e}", exc_info=True) finally: logger.info("WebSocket connection closed")
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/deploy/docker/hook_manager.py
deploy/docker/hook_manager.py
""" Hook Manager for User-Provided Hook Functions Handles validation, compilation, and safe execution of user-provided hook code """ import ast import asyncio import traceback from typing import Dict, Callable, Optional, Tuple, List, Any import logging logger = logging.getLogger(__name__) class UserHookManager: """Manages user-provided hook functions with error isolation""" # Expected signatures for each hook point HOOK_SIGNATURES = { "on_browser_created": ["browser"], "on_page_context_created": ["page", "context"], "before_goto": ["page", "context", "url"], "after_goto": ["page", "context", "url", "response"], "on_user_agent_updated": ["page", "context", "user_agent"], "on_execution_started": ["page", "context"], "before_retrieve_html": ["page", "context"], "before_return_html": ["page", "context", "html"] } # Default timeout for hook execution (in seconds) DEFAULT_TIMEOUT = 30 def __init__(self, timeout: int = DEFAULT_TIMEOUT): self.timeout = timeout self.errors: List[Dict[str, Any]] = [] self.compiled_hooks: Dict[str, Callable] = {} self.execution_log: List[Dict[str, Any]] = [] def validate_hook_structure(self, hook_code: str, hook_point: str) -> Tuple[bool, str]: """ Validate the structure of user-provided hook code Args: hook_code: The Python code string containing the hook function hook_point: The hook point name (e.g., 'on_page_context_created') Returns: Tuple of (is_valid, error_message) """ try: # Parse the code tree = ast.parse(hook_code) # Check if it's empty if not tree.body: return False, "Hook code is empty" # Find the function definition func_def = None for node in tree.body: if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)): func_def = node break if not func_def: return False, "Hook must contain a function definition (def or async def)" # Check if it's async (all hooks should be async) if not isinstance(func_def, ast.AsyncFunctionDef): return False, f"Hook function must be async (use 'async def' instead of 'def')" # Get function name for better error messages func_name = func_def.name # Validate parameters expected_params = self.HOOK_SIGNATURES.get(hook_point, []) if not expected_params: return False, f"Unknown hook point: {hook_point}" func_params = [arg.arg for arg in func_def.args.args] # Check if it has **kwargs for flexibility has_kwargs = func_def.args.kwarg is not None # Must have at least the expected parameters missing_params = [] for expected in expected_params: if expected not in func_params: missing_params.append(expected) if missing_params and not has_kwargs: return False, f"Hook function '{func_name}' must accept parameters: {', '.join(expected_params)} (missing: {', '.join(missing_params)})" # Check if it returns something (should return page or browser) has_return = any(isinstance(node, ast.Return) for node in ast.walk(func_def)) if not has_return: # Warning, not error - we'll handle this logger.warning(f"Hook function '{func_name}' should return the {expected_params[0]} object") return True, "Valid" except SyntaxError as e: return False, f"Syntax error at line {e.lineno}: {str(e)}" except Exception as e: return False, f"Failed to parse hook code: {str(e)}" def compile_hook(self, hook_code: str, hook_point: str) -> Optional[Callable]: """ Compile user-provided hook code into a callable function Args: hook_code: The Python code string hook_point: The hook point name Returns: Compiled function or None if compilation failed """ try: # Create a safe namespace for the hook # Use a more complete builtins that includes __import__ import builtins safe_builtins = {} # Add safe built-in functions allowed_builtins = [ 'print', 'len', 'str', 'int', 'float', 'bool', 'list', 'dict', 'set', 'tuple', 'range', 'enumerate', 'zip', 'map', 'filter', 'any', 'all', 'sum', 'min', 'max', 'sorted', 'reversed', 'abs', 'round', 'isinstance', 'type', 'getattr', 'hasattr', 'setattr', 'callable', 'iter', 'next', '__import__', '__build_class__' # Required for exec ] for name in allowed_builtins: if hasattr(builtins, name): safe_builtins[name] = getattr(builtins, name) namespace = { '__name__': f'user_hook_{hook_point}', '__builtins__': safe_builtins } # Add commonly needed imports exec("import asyncio", namespace) exec("import json", namespace) exec("import re", namespace) exec("from typing import Dict, List, Optional", namespace) # Execute the code to define the function exec(hook_code, namespace) # Find the async function in the namespace for name, obj in namespace.items(): if callable(obj) and not name.startswith('_') and asyncio.iscoroutinefunction(obj): return obj # If no async function found, look for any function for name, obj in namespace.items(): if callable(obj) and not name.startswith('_'): logger.warning(f"Found non-async function '{name}' - wrapping it") # Wrap sync function in async async def async_wrapper(*args, **kwargs): return obj(*args, **kwargs) return async_wrapper raise ValueError("No callable function found in hook code") except Exception as e: error = { 'hook_point': hook_point, 'error': f"Failed to compile hook: {str(e)}", 'type': 'compilation_error', 'traceback': traceback.format_exc() } self.errors.append(error) logger.error(f"Hook compilation failed for {hook_point}: {str(e)}") return None async def execute_hook_safely( self, hook_func: Callable, hook_point: str, *args, **kwargs ) -> Tuple[Any, Optional[Dict]]: """ Execute a user hook with error isolation and timeout Args: hook_func: The compiled hook function hook_point: The hook point name *args, **kwargs: Arguments to pass to the hook Returns: Tuple of (result, error_dict) """ start_time = asyncio.get_event_loop().time() try: # Add timeout to prevent infinite loops result = await asyncio.wait_for( hook_func(*args, **kwargs), timeout=self.timeout ) # Log successful execution execution_time = asyncio.get_event_loop().time() - start_time self.execution_log.append({ 'hook_point': hook_point, 'status': 'success', 'execution_time': execution_time, 'timestamp': start_time }) return result, None except asyncio.TimeoutError: error = { 'hook_point': hook_point, 'error': f'Hook execution timed out ({self.timeout}s limit)', 'type': 'timeout', 'execution_time': self.timeout } self.errors.append(error) self.execution_log.append({ 'hook_point': hook_point, 'status': 'timeout', 'error': error['error'], 'execution_time': self.timeout, 'timestamp': start_time }) # Return the first argument (usually page/browser) to continue return args[0] if args else None, error except Exception as e: execution_time = asyncio.get_event_loop().time() - start_time error = { 'hook_point': hook_point, 'error': str(e), 'type': type(e).__name__, 'traceback': traceback.format_exc(), 'execution_time': execution_time } self.errors.append(error) self.execution_log.append({ 'hook_point': hook_point, 'status': 'failed', 'error': str(e), 'error_type': type(e).__name__, 'execution_time': execution_time, 'timestamp': start_time }) # Return the first argument (usually page/browser) to continue return args[0] if args else None, error def get_summary(self) -> Dict[str, Any]: """Get a summary of hook execution""" total_hooks = len(self.execution_log) successful = sum(1 for log in self.execution_log if log['status'] == 'success') failed = sum(1 for log in self.execution_log if log['status'] == 'failed') timed_out = sum(1 for log in self.execution_log if log['status'] == 'timeout') return { 'total_executions': total_hooks, 'successful': successful, 'failed': failed, 'timed_out': timed_out, 'success_rate': (successful / total_hooks * 100) if total_hooks > 0 else 0, 'total_errors': len(self.errors) } class IsolatedHookWrapper: """Wraps user hooks with error isolation and reporting""" def __init__(self, hook_manager: UserHookManager): self.hook_manager = hook_manager def create_hook_wrapper(self, user_hook: Callable, hook_point: str) -> Callable: """ Create a wrapper that isolates hook errors from main process Args: user_hook: The compiled user hook function hook_point: The hook point name Returns: Wrapped async function that handles errors gracefully """ async def wrapped_hook(*args, **kwargs): """Wrapped hook with error isolation""" # Get the main return object (page/browser) # This ensures we always have something to return return_obj = None if args: return_obj = args[0] elif 'page' in kwargs: return_obj = kwargs['page'] elif 'browser' in kwargs: return_obj = kwargs['browser'] try: # Execute user hook with safety result, error = await self.hook_manager.execute_hook_safely( user_hook, hook_point, *args, **kwargs ) if error: # Hook failed but we continue with original object logger.warning(f"User hook failed at {hook_point}: {error['error']}") return return_obj # Hook succeeded - return its result or the original object if result is None: logger.debug(f"Hook at {hook_point} returned None, using original object") return return_obj return result except Exception as e: # This should rarely happen due to execute_hook_safely logger.error(f"Unexpected error in hook wrapper for {hook_point}: {e}") return return_obj # Set function name for debugging wrapped_hook.__name__ = f"wrapped_{hook_point}" return wrapped_hook async def process_user_hooks( hooks_input: Dict[str, str], timeout: int = 30 ) -> Tuple[Dict[str, Callable], List[Dict], UserHookManager]: """ Process and compile user-provided hook functions Args: hooks_input: Dictionary mapping hook points to code strings timeout: Timeout for each hook execution Returns: Tuple of (compiled_hooks, validation_errors, hook_manager) """ hook_manager = UserHookManager(timeout=timeout) wrapper = IsolatedHookWrapper(hook_manager) compiled_hooks = {} validation_errors = [] for hook_point, hook_code in hooks_input.items(): # Skip empty hooks if not hook_code or not hook_code.strip(): continue # Validate hook point if hook_point not in UserHookManager.HOOK_SIGNATURES: validation_errors.append({ 'hook_point': hook_point, 'error': f'Unknown hook point. Valid points: {", ".join(UserHookManager.HOOK_SIGNATURES.keys())}', 'code_preview': hook_code[:100] + '...' if len(hook_code) > 100 else hook_code }) continue # Validate structure is_valid, message = hook_manager.validate_hook_structure(hook_code, hook_point) if not is_valid: validation_errors.append({ 'hook_point': hook_point, 'error': message, 'code_preview': hook_code[:100] + '...' if len(hook_code) > 100 else hook_code }) continue # Compile the hook hook_func = hook_manager.compile_hook(hook_code, hook_point) if hook_func: # Wrap with error isolation wrapped_hook = wrapper.create_hook_wrapper(hook_func, hook_point) compiled_hooks[hook_point] = wrapped_hook logger.info(f"Successfully compiled hook for {hook_point}") else: validation_errors.append({ 'hook_point': hook_point, 'error': 'Failed to compile hook function - check syntax and structure', 'code_preview': hook_code[:100] + '...' if len(hook_code) > 100 else hook_code }) return compiled_hooks, validation_errors, hook_manager async def process_user_hooks_with_manager( hooks_input: Dict[str, str], hook_manager: UserHookManager ) -> Tuple[Dict[str, Callable], List[Dict]]: """ Process and compile user-provided hook functions with existing manager Args: hooks_input: Dictionary mapping hook points to code strings hook_manager: Existing UserHookManager instance Returns: Tuple of (compiled_hooks, validation_errors) """ wrapper = IsolatedHookWrapper(hook_manager) compiled_hooks = {} validation_errors = [] for hook_point, hook_code in hooks_input.items(): # Skip empty hooks if not hook_code or not hook_code.strip(): continue # Validate hook point if hook_point not in UserHookManager.HOOK_SIGNATURES: validation_errors.append({ 'hook_point': hook_point, 'error': f'Unknown hook point. Valid points: {", ".join(UserHookManager.HOOK_SIGNATURES.keys())}', 'code_preview': hook_code[:100] + '...' if len(hook_code) > 100 else hook_code }) continue # Validate structure is_valid, message = hook_manager.validate_hook_structure(hook_code, hook_point) if not is_valid: validation_errors.append({ 'hook_point': hook_point, 'error': message, 'code_preview': hook_code[:100] + '...' if len(hook_code) > 100 else hook_code }) continue # Compile the hook hook_func = hook_manager.compile_hook(hook_code, hook_point) if hook_func: # Wrap with error isolation wrapped_hook = wrapper.create_hook_wrapper(hook_func, hook_point) compiled_hooks[hook_point] = wrapped_hook logger.info(f"Successfully compiled hook for {hook_point}") else: validation_errors.append({ 'hook_point': hook_point, 'error': 'Failed to compile hook function - check syntax and structure', 'code_preview': hook_code[:100] + '...' if len(hook_code) > 100 else hook_code }) return compiled_hooks, validation_errors async def attach_user_hooks_to_crawler( crawler, # AsyncWebCrawler instance user_hooks: Dict[str, str], timeout: int = 30, hook_manager: Optional[UserHookManager] = None ) -> Tuple[Dict[str, Any], UserHookManager]: """ Attach user-provided hooks to crawler with full error reporting Args: crawler: AsyncWebCrawler instance user_hooks: Dictionary mapping hook points to code strings timeout: Timeout for each hook execution hook_manager: Optional existing UserHookManager instance Returns: Tuple of (status_dict, hook_manager) """ # Use provided hook_manager or create a new one if hook_manager is None: hook_manager = UserHookManager(timeout=timeout) # Process hooks with the hook_manager compiled_hooks, validation_errors = await process_user_hooks_with_manager( user_hooks, hook_manager ) # Log validation errors if validation_errors: logger.warning(f"Hook validation errors: {validation_errors}") # Attach successfully compiled hooks attached_hooks = [] for hook_point, wrapped_hook in compiled_hooks.items(): try: crawler.crawler_strategy.set_hook(hook_point, wrapped_hook) attached_hooks.append(hook_point) logger.info(f"Attached hook to {hook_point}") except Exception as e: logger.error(f"Failed to attach hook to {hook_point}: {e}") validation_errors.append({ 'hook_point': hook_point, 'error': f'Failed to attach hook: {str(e)}' }) status = 'success' if not validation_errors else ('partial' if attached_hooks else 'failed') status_dict = { 'status': status, 'attached_hooks': attached_hooks, 'validation_errors': validation_errors, 'total_hooks_provided': len(user_hooks), 'successfully_attached': len(attached_hooks), 'failed_validation': len(validation_errors) } return status_dict, hook_manager
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/deploy/docker/crawler_pool.py
deploy/docker/crawler_pool.py
# crawler_pool.py - Smart browser pool with tiered management import asyncio, json, hashlib, time from contextlib import suppress from typing import Dict, Optional from crawl4ai import AsyncWebCrawler, BrowserConfig from utils import load_config, get_container_memory_percent import logging logger = logging.getLogger(__name__) CONFIG = load_config() # Pool tiers PERMANENT: Optional[AsyncWebCrawler] = None # Always-ready default browser HOT_POOL: Dict[str, AsyncWebCrawler] = {} # Frequent configs COLD_POOL: Dict[str, AsyncWebCrawler] = {} # Rare configs LAST_USED: Dict[str, float] = {} USAGE_COUNT: Dict[str, int] = {} LOCK = asyncio.Lock() # Config MEM_LIMIT = CONFIG.get("crawler", {}).get("memory_threshold_percent", 95.0) BASE_IDLE_TTL = CONFIG.get("crawler", {}).get("pool", {}).get("idle_ttl_sec", 300) DEFAULT_CONFIG_SIG = None # Cached sig for default config def _sig(cfg: BrowserConfig) -> str: """Generate config signature.""" payload = json.dumps(cfg.to_dict(), sort_keys=True, separators=(",",":")) return hashlib.sha1(payload.encode()).hexdigest() def _is_default_config(sig: str) -> bool: """Check if config matches default.""" return sig == DEFAULT_CONFIG_SIG async def get_crawler(cfg: BrowserConfig) -> AsyncWebCrawler: """Get crawler from pool with tiered strategy.""" sig = _sig(cfg) async with LOCK: # Check permanent browser for default config if PERMANENT and _is_default_config(sig): LAST_USED[sig] = time.time() USAGE_COUNT[sig] = USAGE_COUNT.get(sig, 0) + 1 logger.info("🔥 Using permanent browser") return PERMANENT # Check hot pool if sig in HOT_POOL: LAST_USED[sig] = time.time() USAGE_COUNT[sig] = USAGE_COUNT.get(sig, 0) + 1 logger.info(f"♨️ Using hot pool browser (sig={sig[:8]})") return HOT_POOL[sig] # Check cold pool (promote to hot if used 3+ times) if sig in COLD_POOL: LAST_USED[sig] = time.time() USAGE_COUNT[sig] = USAGE_COUNT.get(sig, 0) + 1 if USAGE_COUNT[sig] >= 3: logger.info(f"⬆️ Promoting to hot pool (sig={sig[:8]}, count={USAGE_COUNT[sig]})") HOT_POOL[sig] = COLD_POOL.pop(sig) # Track promotion in monitor try: from monitor import get_monitor await get_monitor().track_janitor_event("promote", sig, {"count": USAGE_COUNT[sig]}) except: pass return HOT_POOL[sig] logger.info(f"❄️ Using cold pool browser (sig={sig[:8]})") return COLD_POOL[sig] # Memory check before creating new mem_pct = get_container_memory_percent() if mem_pct >= MEM_LIMIT: logger.error(f"💥 Memory pressure: {mem_pct:.1f}% >= {MEM_LIMIT}%") raise MemoryError(f"Memory at {mem_pct:.1f}%, refusing new browser") # Create new in cold pool logger.info(f"🆕 Creating new browser in cold pool (sig={sig[:8]}, mem={mem_pct:.1f}%)") crawler = AsyncWebCrawler(config=cfg, thread_safe=False) await crawler.start() COLD_POOL[sig] = crawler LAST_USED[sig] = time.time() USAGE_COUNT[sig] = 1 return crawler async def init_permanent(cfg: BrowserConfig): """Initialize permanent default browser.""" global PERMANENT, DEFAULT_CONFIG_SIG async with LOCK: if PERMANENT: return DEFAULT_CONFIG_SIG = _sig(cfg) logger.info("🔥 Creating permanent default browser") PERMANENT = AsyncWebCrawler(config=cfg, thread_safe=False) await PERMANENT.start() LAST_USED[DEFAULT_CONFIG_SIG] = time.time() USAGE_COUNT[DEFAULT_CONFIG_SIG] = 0 async def close_all(): """Close all browsers.""" async with LOCK: tasks = [] if PERMANENT: tasks.append(PERMANENT.close()) tasks.extend([c.close() for c in HOT_POOL.values()]) tasks.extend([c.close() for c in COLD_POOL.values()]) await asyncio.gather(*tasks, return_exceptions=True) HOT_POOL.clear() COLD_POOL.clear() LAST_USED.clear() USAGE_COUNT.clear() async def janitor(): """Adaptive cleanup based on memory pressure.""" while True: mem_pct = get_container_memory_percent() # Adaptive intervals and TTLs if mem_pct > 80: interval, cold_ttl, hot_ttl = 10, 30, 120 elif mem_pct > 60: interval, cold_ttl, hot_ttl = 30, 60, 300 else: interval, cold_ttl, hot_ttl = 60, BASE_IDLE_TTL, BASE_IDLE_TTL * 2 await asyncio.sleep(interval) now = time.time() async with LOCK: # Clean cold pool for sig in list(COLD_POOL.keys()): if now - LAST_USED.get(sig, now) > cold_ttl: idle_time = now - LAST_USED[sig] logger.info(f"🧹 Closing cold browser (sig={sig[:8]}, idle={idle_time:.0f}s)") with suppress(Exception): await COLD_POOL[sig].close() COLD_POOL.pop(sig, None) LAST_USED.pop(sig, None) USAGE_COUNT.pop(sig, None) # Track in monitor try: from monitor import get_monitor await get_monitor().track_janitor_event("close_cold", sig, {"idle_seconds": int(idle_time), "ttl": cold_ttl}) except: pass # Clean hot pool (more conservative) for sig in list(HOT_POOL.keys()): if now - LAST_USED.get(sig, now) > hot_ttl: idle_time = now - LAST_USED[sig] logger.info(f"🧹 Closing hot browser (sig={sig[:8]}, idle={idle_time:.0f}s)") with suppress(Exception): await HOT_POOL[sig].close() HOT_POOL.pop(sig, None) LAST_USED.pop(sig, None) USAGE_COUNT.pop(sig, None) # Track in monitor try: from monitor import get_monitor await get_monitor().track_janitor_event("close_hot", sig, {"idle_seconds": int(idle_time), "ttl": hot_ttl}) except: pass # Log pool stats if mem_pct > 60: logger.info(f"📊 Pool: hot={len(HOT_POOL)}, cold={len(COLD_POOL)}, mem={mem_pct:.1f}%")
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/deploy/docker/utils.py
deploy/docker/utils.py
import dns.resolver import logging import yaml import os from datetime import datetime from enum import Enum from pathlib import Path from fastapi import Request from typing import Dict, Optional class TaskStatus(str, Enum): PROCESSING = "processing" FAILED = "failed" COMPLETED = "completed" class FilterType(str, Enum): RAW = "raw" FIT = "fit" BM25 = "bm25" LLM = "llm" def load_config() -> Dict: """Load and return application configuration with environment variable overrides.""" config_path = Path(__file__).parent / "config.yml" with open(config_path, "r") as config_file: config = yaml.safe_load(config_file) # Override LLM provider from environment if set llm_provider = os.environ.get("LLM_PROVIDER") if llm_provider: config["llm"]["provider"] = llm_provider logging.info(f"LLM provider overridden from environment: {llm_provider}") # Also support direct API key from environment if the provider-specific key isn't set llm_api_key = os.environ.get("LLM_API_KEY") if llm_api_key and "api_key" not in config["llm"]: config["llm"]["api_key"] = llm_api_key logging.info("LLM API key loaded from LLM_API_KEY environment variable") return config def setup_logging(config: Dict) -> None: """Configure application logging.""" logging.basicConfig( level=config["logging"]["level"], format=config["logging"]["format"] ) def get_base_url(request: Request) -> str: """Get base URL including scheme and host.""" return f"{request.url.scheme}://{request.url.netloc}" def is_task_id(value: str) -> bool: """Check if the value matches task ID pattern.""" return value.startswith("llm_") and "_" in value def datetime_handler(obj: any) -> Optional[str]: """Handle datetime serialization for JSON.""" if hasattr(obj, 'isoformat'): return obj.isoformat() raise TypeError(f"Object of type {type(obj)} is not JSON serializable") def should_cleanup_task(created_at: str, ttl_seconds: int = 3600) -> bool: """Check if task should be cleaned up based on creation time.""" created = datetime.fromisoformat(created_at) return (datetime.now() - created).total_seconds() > ttl_seconds def decode_redis_hash(hash_data: Dict[bytes, bytes]) -> Dict[str, str]: """Decode Redis hash data from bytes to strings.""" return {k.decode('utf-8'): v.decode('utf-8') for k, v in hash_data.items()} def get_llm_api_key(config: Dict, provider: Optional[str] = None) -> Optional[str]: """Get the appropriate API key based on the LLM provider. Args: config: The application configuration dictionary provider: Optional provider override (e.g., "openai/gpt-4") Returns: The API key if directly configured, otherwise None to let litellm handle it """ # Check if direct API key is configured (for backward compatibility) if "api_key" in config["llm"]: return config["llm"]["api_key"] # Return None - litellm will automatically find the right environment variable return None def validate_llm_provider(config: Dict, provider: Optional[str] = None) -> tuple[bool, str]: """Validate that the LLM provider has an associated API key. Args: config: The application configuration dictionary provider: Optional provider override (e.g., "openai/gpt-4") Returns: Tuple of (is_valid, error_message) """ # If a direct API key is configured, validation passes if "api_key" in config["llm"]: return True, "" # Otherwise, trust that litellm will find the appropriate environment variable # We can't easily validate this without reimplementing litellm's logic return True, "" def get_llm_temperature(config: Dict, provider: Optional[str] = None) -> Optional[float]: """Get temperature setting based on the LLM provider. Priority order: 1. Provider-specific environment variable (e.g., OPENAI_TEMPERATURE) 2. Global LLM_TEMPERATURE environment variable 3. None (to use litellm/provider defaults) Args: config: The application configuration dictionary provider: Optional provider override (e.g., "openai/gpt-4") Returns: The temperature setting if configured, otherwise None """ # Check provider-specific temperature first if provider: provider_name = provider.split('/')[0].upper() provider_temp = os.environ.get(f"{provider_name}_TEMPERATURE") if provider_temp: try: return float(provider_temp) except ValueError: logging.warning(f"Invalid temperature value for {provider_name}: {provider_temp}") # Check global LLM_TEMPERATURE global_temp = os.environ.get("LLM_TEMPERATURE") if global_temp: try: return float(global_temp) except ValueError: logging.warning(f"Invalid global temperature value: {global_temp}") # Return None to use litellm/provider defaults return None def get_llm_base_url(config: Dict, provider: Optional[str] = None) -> Optional[str]: """Get base URL setting based on the LLM provider. Priority order: 1. Provider-specific environment variable (e.g., OPENAI_BASE_URL) 2. Global LLM_BASE_URL environment variable 3. None (to use default endpoints) Args: config: The application configuration dictionary provider: Optional provider override (e.g., "openai/gpt-4") Returns: The base URL if configured, otherwise None """ # Check provider-specific base URL first if provider: provider_name = provider.split('/')[0].upper() provider_url = os.environ.get(f"{provider_name}_BASE_URL") if provider_url: return provider_url # Check global LLM_BASE_URL return os.environ.get("LLM_BASE_URL") def verify_email_domain(email: str) -> bool: try: domain = email.split('@')[1] # Try to resolve MX records for the domain. records = dns.resolver.resolve(domain, 'MX') return True if records else False except Exception as e: return False def get_container_memory_percent() -> float: """Get actual container memory usage vs limit (cgroup v1/v2 aware).""" try: # Try cgroup v2 first usage_path = Path("/sys/fs/cgroup/memory.current") limit_path = Path("/sys/fs/cgroup/memory.max") if not usage_path.exists(): # Fall back to cgroup v1 usage_path = Path("/sys/fs/cgroup/memory/memory.usage_in_bytes") limit_path = Path("/sys/fs/cgroup/memory/memory.limit_in_bytes") usage = int(usage_path.read_text()) limit = int(limit_path.read_text()) # Handle unlimited (v2: "max", v1: > 1e18) if limit > 1e18: import psutil limit = psutil.virtual_memory().total return (usage / limit) * 100 except: # Non-container or unsupported: fallback to host import psutil return psutil.virtual_memory().percent
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/deploy/docker/mcp_bridge.py
deploy/docker/mcp_bridge.py
# deploy/docker/mcp_bridge.py from __future__ import annotations import inspect, json, re, anyio from contextlib import suppress from typing import Any, Callable, Dict, List, Tuple import httpx from fastapi import FastAPI, WebSocket, WebSocketDisconnect, HTTPException from fastapi.responses import JSONResponse from fastapi import Request from sse_starlette.sse import EventSourceResponse from pydantic import BaseModel from mcp.server.sse import SseServerTransport import mcp.types as t from mcp.server.lowlevel.server import Server, NotificationOptions from mcp.server.models import InitializationOptions # ── opt‑in decorators ─────────────────────────────────────────── def mcp_resource(name: str | None = None): def deco(fn): fn.__mcp_kind__, fn.__mcp_name__ = "resource", name return fn return deco def mcp_template(name: str | None = None): def deco(fn): fn.__mcp_kind__, fn.__mcp_name__ = "template", name return fn return deco def mcp_tool(name: str | None = None): def deco(fn): fn.__mcp_kind__, fn.__mcp_name__ = "tool", name return fn return deco # ── HTTP‑proxy helper for FastAPI endpoints ───────────────────── def _make_http_proxy(base_url: str, route): method = list(route.methods - {"HEAD", "OPTIONS"})[0] async def proxy(**kwargs): # replace `/items/{id}` style params first path = route.path for k, v in list(kwargs.items()): placeholder = "{" + k + "}" if placeholder in path: path = path.replace(placeholder, str(v)) kwargs.pop(k) url = base_url.rstrip("/") + path async with httpx.AsyncClient() as client: try: r = ( await client.get(url, params=kwargs) if method == "GET" else await client.request(method, url, json=kwargs) ) r.raise_for_status() return r.text if method == "GET" else r.json() except httpx.HTTPStatusError as e: # surface FastAPI error details instead of plain 500 raise HTTPException(e.response.status_code, e.response.text) return proxy # ── main entry point ──────────────────────────────────────────── def attach_mcp( app: FastAPI, *, # keyword‑only base: str = "/mcp", name: str | None = None, base_url: str, # eg. "http://127.0.0.1:8020" ) -> None: """Call once after all routes are declared to expose WS+SSE MCP endpoints.""" server_name = name or app.title or "FastAPI-MCP" mcp = Server(server_name) # tools: Dict[str, Callable] = {} tools: Dict[str, Tuple[Callable, Callable]] = {} resources: Dict[str, Callable] = {} templates: Dict[str, Callable] = {} # register decorated FastAPI routes for route in app.routes: fn = getattr(route, "endpoint", None) kind = getattr(fn, "__mcp_kind__", None) if not kind: continue key = fn.__mcp_name__ or re.sub(r"[/{}}]", "_", route.path).strip("_") # if kind == "tool": # tools[key] = _make_http_proxy(base_url, route) if kind == "tool": proxy = _make_http_proxy(base_url, route) tools[key] = (proxy, fn) continue if kind == "resource": resources[key] = fn if kind == "template": templates[key] = fn # helpers for JSON‑Schema def _schema(model: type[BaseModel] | None) -> dict: return {"type": "object"} if model is None else model.model_json_schema() def _body_model(fn: Callable) -> type[BaseModel] | None: for p in inspect.signature(fn).parameters.values(): a = p.annotation if inspect.isclass(a) and issubclass(a, BaseModel): return a return None # MCP handlers @mcp.list_tools() async def _list_tools() -> List[t.Tool]: out = [] for k, (proxy, orig_fn) in tools.items(): desc = getattr(orig_fn, "__mcp_description__", None) or inspect.getdoc(orig_fn) or "" schema = getattr(orig_fn, "__mcp_schema__", None) or _schema(_body_model(orig_fn)) out.append( t.Tool(name=k, description=desc, inputSchema=schema) ) return out @mcp.call_tool() async def _call_tool(name: str, arguments: Dict | None) -> List[t.TextContent]: if name not in tools: raise HTTPException(404, "tool not found") proxy, _ = tools[name] try: res = await proxy(**(arguments or {})) except HTTPException as exc: # map server‑side errors into MCP "text/error" payloads err = {"error": exc.status_code, "detail": exc.detail} return [t.TextContent(type = "text", text=json.dumps(err))] return [t.TextContent(type = "text", text=json.dumps(res, default=str))] @mcp.list_resources() async def _list_resources() -> List[t.Resource]: return [ t.Resource(name=k, description=inspect.getdoc(f) or "", mime_type="application/json") for k, f in resources.items() ] @mcp.read_resource() async def _read_resource(name: str) -> List[t.TextContent]: if name not in resources: raise HTTPException(404, "resource not found") res = resources[name]() return [t.TextContent(type = "text", text=json.dumps(res, default=str))] @mcp.list_resource_templates() async def _list_templates() -> List[t.ResourceTemplate]: return [ t.ResourceTemplate( name=k, description=inspect.getdoc(f) or "", parameters={ p: {"type": "string"} for p in _path_params(app, f) }, ) for k, f in templates.items() ] init_opts = InitializationOptions( server_name=server_name, server_version="0.1.0", capabilities=mcp.get_capabilities( notification_options=NotificationOptions(), experimental_capabilities={}, ), ) # ── WebSocket transport ──────────────────────────────────── @app.websocket_route(f"{base}/ws") async def _ws(ws: WebSocket): await ws.accept() c2s_send, c2s_recv = anyio.create_memory_object_stream(100) s2c_send, s2c_recv = anyio.create_memory_object_stream(100) from pydantic import TypeAdapter from mcp.types import JSONRPCMessage adapter = TypeAdapter(JSONRPCMessage) init_done = anyio.Event() async def srv_to_ws(): first = True try: async for msg in s2c_recv: await ws.send_json(msg.model_dump()) if first: init_done.set() first = False finally: # make sure cleanup survives TaskGroup cancellation with anyio.CancelScope(shield=True): with suppress(RuntimeError): # idempotent close await ws.close() async def ws_to_srv(): try: # 1st frame is always "initialize" first = adapter.validate_python(await ws.receive_json()) await c2s_send.send(first) await init_done.wait() # block until server ready while True: data = await ws.receive_json() await c2s_send.send(adapter.validate_python(data)) except WebSocketDisconnect: await c2s_send.aclose() async with anyio.create_task_group() as tg: tg.start_soon(mcp.run, c2s_recv, s2c_send, init_opts) tg.start_soon(ws_to_srv) tg.start_soon(srv_to_ws) # ── SSE transport (official) ───────────────────────────── sse = SseServerTransport(f"{base}/messages/") @app.get(f"{base}/sse") async def _mcp_sse(request: Request): async with sse.connect_sse( request.scope, request.receive, request._send # starlette ASGI primitives ) as (read_stream, write_stream): await mcp.run(read_stream, write_stream, init_opts) # client → server frames are POSTed here app.mount(f"{base}/messages", app=sse.handle_post_message) # ── schema endpoint ─────────────────────────────────────── @app.get(f"{base}/schema") async def _schema_endpoint(): return JSONResponse({ "tools": [x.model_dump() for x in await _list_tools()], "resources": [x.model_dump() for x in await _list_resources()], "resource_templates": [x.model_dump() for x in await _list_templates()], }) # ── helpers ──────────────────────────────────────────────────── def _route_name(path: str) -> str: return re.sub(r"[/{}}]", "_", path).strip("_") def _path_params(app: FastAPI, fn: Callable) -> List[str]: for r in app.routes: if r.endpoint is fn: return list(r.param_convertors.keys()) return []
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/deploy/docker/webhook.py
deploy/docker/webhook.py
""" Webhook delivery service for Crawl4AI. This module provides webhook notification functionality with exponential backoff retry logic. """ import asyncio import httpx import logging from typing import Dict, Optional from datetime import datetime, timezone logger = logging.getLogger(__name__) class WebhookDeliveryService: """Handles webhook delivery with exponential backoff retry logic.""" def __init__(self, config: Dict): """ Initialize the webhook delivery service. Args: config: Application configuration dictionary containing webhook settings """ self.config = config.get("webhooks", {}) self.max_attempts = self.config.get("retry", {}).get("max_attempts", 5) self.initial_delay = self.config.get("retry", {}).get("initial_delay_ms", 1000) / 1000 self.max_delay = self.config.get("retry", {}).get("max_delay_ms", 32000) / 1000 self.timeout = self.config.get("retry", {}).get("timeout_ms", 30000) / 1000 async def send_webhook( self, webhook_url: str, payload: Dict, headers: Optional[Dict[str, str]] = None ) -> bool: """ Send webhook with exponential backoff retry logic. Args: webhook_url: The URL to send the webhook to payload: The JSON payload to send headers: Optional custom headers Returns: bool: True if delivered successfully, False otherwise """ default_headers = self.config.get("headers", {}) merged_headers = {**default_headers, **(headers or {})} merged_headers["Content-Type"] = "application/json" async with httpx.AsyncClient(timeout=self.timeout) as client: for attempt in range(self.max_attempts): try: logger.info( f"Sending webhook (attempt {attempt + 1}/{self.max_attempts}) to {webhook_url}" ) response = await client.post( webhook_url, json=payload, headers=merged_headers ) # Success or client error (don't retry client errors) if response.status_code < 500: if 200 <= response.status_code < 300: logger.info(f"Webhook delivered successfully to {webhook_url}") return True else: logger.warning( f"Webhook rejected with status {response.status_code}: {response.text[:200]}" ) return False # Client error - don't retry # Server error - retry with backoff logger.warning( f"Webhook failed with status {response.status_code}, will retry" ) except httpx.TimeoutException as exc: logger.error(f"Webhook timeout (attempt {attempt + 1}): {exc}") except httpx.RequestError as exc: logger.error(f"Webhook request error (attempt {attempt + 1}): {exc}") except Exception as exc: logger.error(f"Webhook delivery error (attempt {attempt + 1}): {exc}") # Calculate exponential backoff delay if attempt < self.max_attempts - 1: delay = min(self.initial_delay * (2 ** attempt), self.max_delay) logger.info(f"Retrying in {delay}s...") await asyncio.sleep(delay) logger.error( f"Webhook delivery failed after {self.max_attempts} attempts to {webhook_url}" ) return False async def notify_job_completion( self, task_id: str, task_type: str, status: str, urls: list, webhook_config: Optional[Dict], result: Optional[Dict] = None, error: Optional[str] = None ): """ Notify webhook of job completion. Args: task_id: The task identifier task_type: Type of task (e.g., "crawl", "llm_extraction") status: Task status ("completed" or "failed") urls: List of URLs that were crawled webhook_config: Webhook configuration from the job request result: Optional crawl result data error: Optional error message if failed """ # Determine webhook URL webhook_url = None data_in_payload = self.config.get("data_in_payload", False) custom_headers = None if webhook_config: webhook_url = webhook_config.get("webhook_url") data_in_payload = webhook_config.get("webhook_data_in_payload", data_in_payload) custom_headers = webhook_config.get("webhook_headers") if not webhook_url: webhook_url = self.config.get("default_url") if not webhook_url: logger.debug("No webhook URL configured, skipping notification") return # Check if webhooks are enabled if not self.config.get("enabled", True): logger.debug("Webhooks are disabled, skipping notification") return # Build payload payload = { "task_id": task_id, "task_type": task_type, "status": status, "timestamp": datetime.now(timezone.utc).isoformat(), "urls": urls } if error: payload["error"] = error if data_in_payload and result: payload["data"] = result # Send webhook (fire and forget - don't block on completion) await self.send_webhook(webhook_url, payload, custom_headers)
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/deploy/docker/test-websocket.py
deploy/docker/test-websocket.py
#!/usr/bin/env python3 """ Quick WebSocket test - Connect to monitor WebSocket and print updates """ import asyncio import websockets import json async def test_websocket(): uri = "ws://localhost:11235/monitor/ws" print(f"Connecting to {uri}...") try: async with websockets.connect(uri) as websocket: print("✅ Connected!") # Receive and print 5 updates for i in range(5): message = await websocket.recv() data = json.loads(message) print(f"\n📊 Update #{i+1}:") print(f" - Health: CPU {data['health']['container']['cpu_percent']}%, Memory {data['health']['container']['memory_percent']}%") print(f" - Active Requests: {len(data['requests']['active'])}") print(f" - Browsers: {len(data['browsers'])}") except Exception as e: print(f"❌ Error: {e}") return 1 print("\n✅ WebSocket test passed!") return 0 if __name__ == "__main__": exit(asyncio.run(test_websocket()))
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/deploy/docker/server.py
deploy/docker/server.py
# ───────────────────────── server.py ───────────────────────── """ Crawl4AI FastAPI entry‑point • Browser pool + global page cap • Rate‑limiting, security, metrics • /crawl, /crawl/stream, /md, /llm endpoints """ # ── stdlib & 3rd‑party imports ─────────────────────────────── from crawler_pool import get_crawler, close_all, janitor from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig from auth import create_access_token, get_token_dependency, TokenRequest from pydantic import BaseModel from typing import Optional, List, Dict from fastapi import Request, Depends from fastapi.responses import FileResponse import base64 import re import logging from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig from api import ( handle_markdown_request, handle_llm_qa, handle_stream_crawl_request, handle_crawl_request, stream_results ) from schemas import ( CrawlRequestWithHooks, MarkdownRequest, RawCode, HTMLRequest, ScreenshotRequest, PDFRequest, JSEndpointRequest, ) from utils import ( FilterType, load_config, setup_logging, verify_email_domain ) import os import sys import time import asyncio from typing import List from contextlib import asynccontextmanager import pathlib from fastapi import ( FastAPI, HTTPException, Request, Path, Query, Depends ) from rank_bm25 import BM25Okapi from fastapi.responses import ( StreamingResponse, RedirectResponse, PlainTextResponse, JSONResponse ) from fastapi.middleware.httpsredirect import HTTPSRedirectMiddleware from fastapi.middleware.trustedhost import TrustedHostMiddleware from fastapi.staticfiles import StaticFiles from job import init_job_router from mcp_bridge import attach_mcp, mcp_resource, mcp_template, mcp_tool import ast import crawl4ai as _c4 from pydantic import BaseModel, Field from slowapi import Limiter from slowapi.util import get_remote_address from prometheus_fastapi_instrumentator import Instrumentator from redis import asyncio as aioredis # ── internal imports (after sys.path append) ───────────────── sys.path.append(os.path.dirname(os.path.realpath(__file__))) # ────────────────── configuration / logging ────────────────── config = load_config() setup_logging(config) __version__ = "0.5.1-d1" # ── global page semaphore (hard cap) ───────────────────────── MAX_PAGES = config["crawler"]["pool"].get("max_pages", 30) GLOBAL_SEM = asyncio.Semaphore(MAX_PAGES) # ── default browser config helper ───────────────────────────── def get_default_browser_config() -> BrowserConfig: """Get default BrowserConfig from config.yml.""" return BrowserConfig( extra_args=config["crawler"]["browser"].get("extra_args", []), **config["crawler"]["browser"].get("kwargs", {}), ) # import logging # page_log = logging.getLogger("page_cap") # orig_arun = AsyncWebCrawler.arun # async def capped_arun(self, *a, **kw): # await GLOBAL_SEM.acquire() # ← take slot # try: # in_flight = MAX_PAGES - GLOBAL_SEM._value # used permits # page_log.info("🕸️ pages_in_flight=%s / %s", in_flight, MAX_PAGES) # return await orig_arun(self, *a, **kw) # finally: # GLOBAL_SEM.release() # ← free slot orig_arun = AsyncWebCrawler.arun async def capped_arun(self, *a, **kw): async with GLOBAL_SEM: return await orig_arun(self, *a, **kw) AsyncWebCrawler.arun = capped_arun # ───────────────────── FastAPI lifespan ────────────────────── @asynccontextmanager async def lifespan(_: FastAPI): from crawler_pool import init_permanent from monitor import MonitorStats import monitor as monitor_module # Initialize monitor monitor_module.monitor_stats = MonitorStats(redis) await monitor_module.monitor_stats.load_from_redis() monitor_module.monitor_stats.start_persistence_worker() # Initialize browser pool await init_permanent(BrowserConfig( extra_args=config["crawler"]["browser"].get("extra_args", []), **config["crawler"]["browser"].get("kwargs", {}), )) # Start background tasks app.state.janitor = asyncio.create_task(janitor()) app.state.timeline_updater = asyncio.create_task(_timeline_updater()) yield # Cleanup app.state.janitor.cancel() app.state.timeline_updater.cancel() # Monitor cleanup (persist stats and stop workers) from monitor import get_monitor try: await get_monitor().cleanup() except Exception as e: logger.error(f"Monitor cleanup failed: {e}") await close_all() async def _timeline_updater(): """Update timeline data every 5 seconds.""" from monitor import get_monitor while True: await asyncio.sleep(5) try: await asyncio.wait_for(get_monitor().update_timeline(), timeout=4.0) except asyncio.TimeoutError: logger.warning("Timeline update timeout after 4s") except Exception as e: logger.warning(f"Timeline update error: {e}") # ───────────────────── FastAPI instance ────────────────────── app = FastAPI( title=config["app"]["title"], version=config["app"]["version"], lifespan=lifespan, ) # ── static playground ────────────────────────────────────── STATIC_DIR = pathlib.Path(__file__).parent / "static" / "playground" if not STATIC_DIR.exists(): raise RuntimeError(f"Playground assets not found at {STATIC_DIR}") app.mount( "/playground", StaticFiles(directory=STATIC_DIR, html=True), name="play", ) # ── static monitor dashboard ──────────────────────────────── MONITOR_DIR = pathlib.Path(__file__).parent / "static" / "monitor" if not MONITOR_DIR.exists(): raise RuntimeError(f"Monitor assets not found at {MONITOR_DIR}") app.mount( "/dashboard", StaticFiles(directory=MONITOR_DIR, html=True), name="monitor_ui", ) # ── static assets (logo, etc) ──────────────────────────────── ASSETS_DIR = pathlib.Path(__file__).parent / "static" / "assets" if ASSETS_DIR.exists(): app.mount( "/static/assets", StaticFiles(directory=ASSETS_DIR), name="assets", ) @app.get("/") async def root(): return RedirectResponse("/playground") # ─────────────────── infra / middleware ───────────────────── redis = aioredis.from_url(config["redis"].get("uri", "redis://localhost")) limiter = Limiter( key_func=get_remote_address, default_limits=[config["rate_limiting"]["default_limit"]], storage_uri=config["rate_limiting"]["storage_uri"], ) def _setup_security(app_: FastAPI): sec = config["security"] if not sec["enabled"]: return if sec.get("https_redirect"): app_.add_middleware(HTTPSRedirectMiddleware) if sec.get("trusted_hosts", []) != ["*"]: app_.add_middleware( TrustedHostMiddleware, allowed_hosts=sec["trusted_hosts"] ) _setup_security(app) if config["observability"]["prometheus"]["enabled"]: Instrumentator().instrument(app).expose(app) token_dep = get_token_dependency(config) @app.middleware("http") async def add_security_headers(request: Request, call_next): resp = await call_next(request) if config["security"]["enabled"]: resp.headers.update(config["security"]["headers"]) return resp # ───────────────── safe config‑dump helper ───────────────── ALLOWED_TYPES = { "CrawlerRunConfig": CrawlerRunConfig, "BrowserConfig": BrowserConfig, } def _safe_eval_config(expr: str) -> dict: """ Accept exactly one top‑level call to CrawlerRunConfig(...) or BrowserConfig(...). Whatever is inside the parentheses is fine *except* further function calls (so no __import__('os') stuff). All public names from crawl4ai are available when we eval. """ tree = ast.parse(expr, mode="eval") # must be a single call if not isinstance(tree.body, ast.Call): raise ValueError("Expression must be a single constructor call") call = tree.body if not (isinstance(call.func, ast.Name) and call.func.id in {"CrawlerRunConfig", "BrowserConfig"}): raise ValueError( "Only CrawlerRunConfig(...) or BrowserConfig(...) are allowed") # forbid nested calls to keep the surface tiny for node in ast.walk(call): if isinstance(node, ast.Call) and node is not call: raise ValueError("Nested function calls are not permitted") # expose everything that crawl4ai exports, nothing else safe_env = {name: getattr(_c4, name) for name in dir(_c4) if not name.startswith("_")} obj = eval(compile(tree, "<config>", "eval"), {"__builtins__": {}}, safe_env) return obj.dump() # ── job router ────────────────────────────────────────────── app.include_router(init_job_router(redis, config, token_dep)) # ── monitor router ────────────────────────────────────────── from monitor_routes import router as monitor_router app.include_router(monitor_router) logger = logging.getLogger(__name__) # ──────────────────────── Endpoints ────────────────────────── @app.post("/token") async def get_token(req: TokenRequest): if not verify_email_domain(req.email): raise HTTPException(400, "Invalid email domain") token = create_access_token({"sub": req.email}) return {"email": req.email, "access_token": token, "token_type": "bearer"} @app.post("/config/dump") async def config_dump(raw: RawCode): try: return JSONResponse(_safe_eval_config(raw.code.strip())) except Exception as e: raise HTTPException(400, str(e)) @app.post("/md") @limiter.limit(config["rate_limiting"]["default_limit"]) @mcp_tool("md") async def get_markdown( request: Request, body: MarkdownRequest, _td: Dict = Depends(token_dep), ): if not body.url.startswith(("http://", "https://")) and not body.url.startswith(("raw:", "raw://")): raise HTTPException( 400, "Invalid URL format. Must start with http://, https://, or for raw HTML (raw:, raw://)") markdown = await handle_markdown_request( body.url, body.f, body.q, body.c, config, body.provider, body.temperature, body.base_url ) return JSONResponse({ "url": body.url, "filter": body.f, "query": body.q, "cache": body.c, "markdown": markdown, "success": True }) @app.post("/html") @limiter.limit(config["rate_limiting"]["default_limit"]) @mcp_tool("html") async def generate_html( request: Request, body: HTMLRequest, _td: Dict = Depends(token_dep), ): """ Crawls the URL, preprocesses the raw HTML for schema extraction, and returns the processed HTML. Use when you need sanitized HTML structures for building schemas or further processing. """ from crawler_pool import get_crawler cfg = CrawlerRunConfig() try: crawler = await get_crawler(get_default_browser_config()) results = await crawler.arun(url=body.url, config=cfg) if not results[0].success: raise HTTPException(500, detail=results[0].error_message or "Crawl failed") raw_html = results[0].html from crawl4ai.utils import preprocess_html_for_schema processed_html = preprocess_html_for_schema(raw_html) return JSONResponse({"html": processed_html, "url": body.url, "success": True}) except Exception as e: raise HTTPException(500, detail=str(e)) # Screenshot endpoint @app.post("/screenshot") @limiter.limit(config["rate_limiting"]["default_limit"]) @mcp_tool("screenshot") async def generate_screenshot( request: Request, body: ScreenshotRequest, _td: Dict = Depends(token_dep), ): """ Capture a full-page PNG screenshot of the specified URL, waiting an optional delay before capture, Use when you need an image snapshot of the rendered page. Its recommened to provide an output path to save the screenshot. Then in result instead of the screenshot you will get a path to the saved file. """ from crawler_pool import get_crawler try: cfg = CrawlerRunConfig(screenshot=True, screenshot_wait_for=body.screenshot_wait_for) crawler = await get_crawler(get_default_browser_config()) results = await crawler.arun(url=body.url, config=cfg) if not results[0].success: raise HTTPException(500, detail=results[0].error_message or "Crawl failed") screenshot_data = results[0].screenshot if body.output_path: abs_path = os.path.abspath(body.output_path) os.makedirs(os.path.dirname(abs_path), exist_ok=True) with open(abs_path, "wb") as f: f.write(base64.b64decode(screenshot_data)) return {"success": True, "path": abs_path} return {"success": True, "screenshot": screenshot_data} except Exception as e: raise HTTPException(500, detail=str(e)) # PDF endpoint @app.post("/pdf") @limiter.limit(config["rate_limiting"]["default_limit"]) @mcp_tool("pdf") async def generate_pdf( request: Request, body: PDFRequest, _td: Dict = Depends(token_dep), ): """ Generate a PDF document of the specified URL, Use when you need a printable or archivable snapshot of the page. It is recommended to provide an output path to save the PDF. Then in result instead of the PDF you will get a path to the saved file. """ from crawler_pool import get_crawler try: cfg = CrawlerRunConfig(pdf=True) crawler = await get_crawler(get_default_browser_config()) results = await crawler.arun(url=body.url, config=cfg) if not results[0].success: raise HTTPException(500, detail=results[0].error_message or "Crawl failed") pdf_data = results[0].pdf if body.output_path: abs_path = os.path.abspath(body.output_path) os.makedirs(os.path.dirname(abs_path), exist_ok=True) with open(abs_path, "wb") as f: f.write(pdf_data) return {"success": True, "path": abs_path} return {"success": True, "pdf": base64.b64encode(pdf_data).decode()} except Exception as e: raise HTTPException(500, detail=str(e)) @app.post("/execute_js") @limiter.limit(config["rate_limiting"]["default_limit"]) @mcp_tool("execute_js") async def execute_js( request: Request, body: JSEndpointRequest, _td: Dict = Depends(token_dep), ): """ Execute a sequence of JavaScript snippets on the specified URL. Return the full CrawlResult JSON (first result). Use this when you need to interact with dynamic pages using JS. REMEMBER: Scripts accept a list of separated JS snippets to execute and execute them in order. IMPORTANT: Each script should be an expression that returns a value. It can be an IIFE or an async function. You can think of it as such. Your script will replace '{script}' and execute in the browser context. So provide either an IIFE or a sync/async function that returns a value. Return Format: - The return result is an instance of CrawlResult, so you have access to markdown, links, and other stuff. If this is enough, you don't need to call again for other endpoints. ```python class CrawlResult(BaseModel): url: str html: str success: bool cleaned_html: Optional[str] = None media: Dict[str, List[Dict]] = {} links: Dict[str, List[Dict]] = {} downloaded_files: Optional[List[str]] = None js_execution_result: Optional[Dict[str, Any]] = None screenshot: Optional[str] = None pdf: Optional[bytes] = None mhtml: Optional[str] = None _markdown: Optional[MarkdownGenerationResult] = PrivateAttr(default=None) extracted_content: Optional[str] = None metadata: Optional[dict] = None error_message: Optional[str] = None session_id: Optional[str] = None response_headers: Optional[dict] = None status_code: Optional[int] = None ssl_certificate: Optional[SSLCertificate] = None dispatch_result: Optional[DispatchResult] = None redirected_url: Optional[str] = None network_requests: Optional[List[Dict[str, Any]]] = None console_messages: Optional[List[Dict[str, Any]]] = None class MarkdownGenerationResult(BaseModel): raw_markdown: str markdown_with_citations: str references_markdown: str fit_markdown: Optional[str] = None fit_html: Optional[str] = None ``` """ from crawler_pool import get_crawler try: cfg = CrawlerRunConfig(js_code=body.scripts) crawler = await get_crawler(get_default_browser_config()) results = await crawler.arun(url=body.url, config=cfg) if not results[0].success: raise HTTPException(500, detail=results[0].error_message or "Crawl failed") data = results[0].model_dump() return JSONResponse(data) except Exception as e: raise HTTPException(500, detail=str(e)) @app.get("/llm/{url:path}") async def llm_endpoint( request: Request, url: str = Path(...), q: str = Query(...), _td: Dict = Depends(token_dep), ): if not q: raise HTTPException(400, "Query parameter 'q' is required") if not url.startswith(("http://", "https://")) and not url.startswith(("raw:", "raw://")): url = "https://" + url answer = await handle_llm_qa(url, q, config) return JSONResponse({"answer": answer}) @app.get("/schema") async def get_schema(): from crawl4ai import BrowserConfig, CrawlerRunConfig return {"browser": BrowserConfig().dump(), "crawler": CrawlerRunConfig().dump()} @app.get("/hooks/info") async def get_hooks_info(): """Get information about available hook points and their signatures""" from hook_manager import UserHookManager hook_info = {} for hook_point, params in UserHookManager.HOOK_SIGNATURES.items(): hook_info[hook_point] = { "parameters": params, "description": get_hook_description(hook_point), "example": get_hook_example(hook_point) } return JSONResponse({ "available_hooks": hook_info, "timeout_limits": { "min": 1, "max": 120, "default": 30 } }) def get_hook_description(hook_point: str) -> str: """Get description for each hook point""" descriptions = { "on_browser_created": "Called after browser instance is created", "on_page_context_created": "Called after page and context are created - ideal for authentication", "before_goto": "Called before navigating to the target URL", "after_goto": "Called after navigation is complete", "on_user_agent_updated": "Called when user agent is updated", "on_execution_started": "Called when custom JavaScript execution begins", "before_retrieve_html": "Called before retrieving the final HTML - ideal for scrolling", "before_return_html": "Called just before returning the HTML content" } return descriptions.get(hook_point, "") def get_hook_example(hook_point: str) -> str: """Get example code for each hook point""" examples = { "on_page_context_created": """async def hook(page, context, **kwargs): # Add authentication cookie await context.add_cookies([{ 'name': 'session', 'value': 'my-session-id', 'domain': '.example.com' }]) return page""", "before_retrieve_html": """async def hook(page, context, **kwargs): # Scroll to load lazy content await page.evaluate("window.scrollTo(0, document.body.scrollHeight)") await page.wait_for_timeout(2000) return page""", "before_goto": """async def hook(page, context, url, **kwargs): # Set custom headers await page.set_extra_http_headers({ 'X-Custom-Header': 'value' }) return page""" } return examples.get(hook_point, "# Implement your hook logic here\nreturn page") @app.get(config["observability"]["health_check"]["endpoint"]) async def health(): return {"status": "ok", "timestamp": time.time(), "version": __version__} @app.get(config["observability"]["prometheus"]["endpoint"]) async def metrics(): return RedirectResponse(config["observability"]["prometheus"]["endpoint"]) @app.post("/crawl") @limiter.limit(config["rate_limiting"]["default_limit"]) @mcp_tool("crawl") async def crawl( request: Request, crawl_request: CrawlRequestWithHooks, _td: Dict = Depends(token_dep), ): """ Crawl a list of URLs and return the results as JSON. For streaming responses, use /crawl/stream endpoint. Supports optional user-provided hook functions for customization. """ if not crawl_request.urls: raise HTTPException(400, "At least one URL required") # Check whether it is a redirection for a streaming request crawler_config = CrawlerRunConfig.load(crawl_request.crawler_config) if crawler_config.stream: return await stream_process(crawl_request=crawl_request) # Prepare hooks config if provided hooks_config = None if crawl_request.hooks: hooks_config = { 'code': crawl_request.hooks.code, 'timeout': crawl_request.hooks.timeout } results = await handle_crawl_request( urls=crawl_request.urls, browser_config=crawl_request.browser_config, crawler_config=crawl_request.crawler_config, config=config, hooks_config=hooks_config ) # check if all of the results are not successful if all(not result["success"] for result in results["results"]): raise HTTPException(500, f"Crawl request failed: {results['results'][0]['error_message']}") return JSONResponse(results) @app.post("/crawl/stream") @limiter.limit(config["rate_limiting"]["default_limit"]) async def crawl_stream( request: Request, crawl_request: CrawlRequestWithHooks, _td: Dict = Depends(token_dep), ): if not crawl_request.urls: raise HTTPException(400, "At least one URL required") return await stream_process(crawl_request=crawl_request) async def stream_process(crawl_request: CrawlRequestWithHooks): # Prepare hooks config if provided# Prepare hooks config if provided hooks_config = None if crawl_request.hooks: hooks_config = { 'code': crawl_request.hooks.code, 'timeout': crawl_request.hooks.timeout } crawler, gen, hooks_info = await handle_stream_crawl_request( urls=crawl_request.urls, browser_config=crawl_request.browser_config, crawler_config=crawl_request.crawler_config, config=config, hooks_config=hooks_config ) # Add hooks info to response headers if available headers = { "Cache-Control": "no-cache", "Connection": "keep-alive", "X-Stream-Status": "active", } if hooks_info: import json headers["X-Hooks-Status"] = json.dumps(hooks_info['status']['status']) return StreamingResponse( stream_results(crawler, gen), media_type="application/x-ndjson", headers=headers, ) def chunk_code_functions(code_md: str) -> List[str]: """Extract each function/class from markdown code blocks per file.""" pattern = re.compile( # match "## File: <path>" then a ```py fence, then capture until the closing ``` r'##\s*File:\s*(?P<path>.+?)\s*?\r?\n' # file header r'```py\s*?\r?\n' # opening fence r'(?P<code>.*?)(?=\r?\n```)', # code block re.DOTALL ) chunks: List[str] = [] for m in pattern.finditer(code_md): file_path = m.group("path").strip() code_blk = m.group("code") tree = ast.parse(code_blk) lines = code_blk.splitlines() for node in tree.body: if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef)): start = node.lineno - 1 end = getattr(node, "end_lineno", start + 1) snippet = "\n".join(lines[start:end]) chunks.append(f"# File: {file_path}\n{snippet}") return chunks def chunk_doc_sections(doc: str) -> List[str]: lines = doc.splitlines(keepends=True) sections = [] current: List[str] = [] for line in lines: if re.match(r"^#{1,6}\s", line): if current: sections.append("".join(current)) current = [line] else: current.append(line) if current: sections.append("".join(current)) return sections @app.get("/ask") @limiter.limit(config["rate_limiting"]["default_limit"]) @mcp_tool("ask") async def get_context( request: Request, _td: Dict = Depends(token_dep), context_type: str = Query("all", regex="^(code|doc|all)$"), query: Optional[str] = Query( None, description="search query to filter chunks"), score_ratio: float = Query( 0.5, ge=0.0, le=1.0, description="min score as fraction of max_score"), max_results: int = Query( 20, ge=1, description="absolute cap on returned chunks"), ): """ This end point is design for any questions about Crawl4ai library. It returns a plain text markdown with extensive information about Crawl4ai. You can use this as a context for any AI assistant. Use this endpoint for AI assistants to retrieve library context for decision making or code generation tasks. Alway is BEST practice you provide a query to filter the context. Otherwise the lenght of the response will be very long. Parameters: - context_type: Specify "code" for code context, "doc" for documentation context, or "all" for both. - query: RECOMMENDED search query to filter paragraphs using BM25. You can leave this empty to get all the context. - score_ratio: Minimum score as a fraction of the maximum score for filtering results. - max_results: Maximum number of results to return. Default is 20. Returns: - JSON response with the requested context. - If "code" is specified, returns the code context. - If "doc" is specified, returns the documentation context. - If "all" is specified, returns both code and documentation contexts. """ # load contexts base = os.path.dirname(__file__) code_path = os.path.join(base, "c4ai-code-context.md") doc_path = os.path.join(base, "c4ai-doc-context.md") if not os.path.exists(code_path) or not os.path.exists(doc_path): raise HTTPException(404, "Context files not found") with open(code_path, "r") as f: code_content = f.read() with open(doc_path, "r") as f: doc_content = f.read() # if no query, just return raw contexts if not query: if context_type == "code": return JSONResponse({"code_context": code_content}) if context_type == "doc": return JSONResponse({"doc_context": doc_content}) return JSONResponse({ "code_context": code_content, "doc_context": doc_content, }) tokens = query.split() results: Dict[str, List[Dict[str, float]]] = {} # code BM25 over functions/classes if context_type in ("code", "all"): code_chunks = chunk_code_functions(code_content) bm25 = BM25Okapi([c.split() for c in code_chunks]) scores = bm25.get_scores(tokens) max_sc = float(scores.max()) if scores.size > 0 else 0.0 cutoff = max_sc * score_ratio picked = [(c, s) for c, s in zip(code_chunks, scores) if s >= cutoff] picked = sorted(picked, key=lambda x: x[1], reverse=True)[:max_results] results["code_results"] = [{"text": c, "score": s} for c, s in picked] # doc BM25 over markdown sections if context_type in ("doc", "all"): sections = chunk_doc_sections(doc_content) bm25d = BM25Okapi([sec.split() for sec in sections]) scores_d = bm25d.get_scores(tokens) max_sd = float(scores_d.max()) if scores_d.size > 0 else 0.0 cutoff_d = max_sd * score_ratio idxs = [i for i, s in enumerate(scores_d) if s >= cutoff_d] neighbors = set(i for idx in idxs for i in (idx-1, idx, idx+1)) valid = [i for i in sorted(neighbors) if 0 <= i < len(sections)] valid = valid[:max_results] results["doc_results"] = [ {"text": sections[i], "score": scores_d[i]} for i in valid ] return JSONResponse(results) # attach MCP layer (adds /mcp/ws, /mcp/sse, /mcp/schema) print(f"MCP server running on {config['app']['host']}:{config['app']['port']}") attach_mcp( app, base_url=f"http://{config['app']['host']}:{config['app']['port']}" ) # ────────────────────────── cli ────────────────────────────── if __name__ == "__main__": import uvicorn uvicorn.run( "server:app", host=config["app"]["host"], port=config["app"]["port"], reload=config["app"]["reload"], timeout_keep_alive=config["app"]["timeout_keep_alive"], ) # ─────────────────────────────────────────────────────────────
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/deploy/docker/monitor.py
deploy/docker/monitor.py
# monitor.py - Real-time monitoring stats with Redis persistence import time import json import asyncio from typing import Dict, List, Optional from datetime import datetime, timezone from collections import deque from redis import asyncio as aioredis from utils import get_container_memory_percent import psutil import logging logger = logging.getLogger(__name__) class MonitorStats: """Tracks real-time server stats with Redis persistence.""" def __init__(self, redis: aioredis.Redis): self.redis = redis self.start_time = time.time() # In-memory queues (fast reads, Redis backup) self.active_requests: Dict[str, Dict] = {} # id -> request info self.completed_requests: deque = deque(maxlen=100) # Last 100 self.janitor_events: deque = deque(maxlen=100) self.errors: deque = deque(maxlen=100) # Endpoint stats (persisted in Redis) self.endpoint_stats: Dict[str, Dict] = {} # endpoint -> {count, total_time, errors, ...} # Background persistence queue (max 10 pending persist requests) self._persist_queue: asyncio.Queue = asyncio.Queue(maxsize=10) self._persist_worker_task: Optional[asyncio.Task] = None # Timeline data (5min window, 5s resolution = 60 points) self.memory_timeline: deque = deque(maxlen=60) self.requests_timeline: deque = deque(maxlen=60) self.browser_timeline: deque = deque(maxlen=60) async def track_request_start(self, request_id: str, endpoint: str, url: str, config: Dict = None): """Track new request start.""" req_info = { "id": request_id, "endpoint": endpoint, "url": url[:100], # Truncate long URLs "start_time": time.time(), "config_sig": config.get("sig", "default") if config else "default", "mem_start": psutil.Process().memory_info().rss / (1024 * 1024) } self.active_requests[request_id] = req_info # Increment endpoint counter if endpoint not in self.endpoint_stats: self.endpoint_stats[endpoint] = { "count": 0, "total_time": 0, "errors": 0, "pool_hits": 0, "success": 0 } self.endpoint_stats[endpoint]["count"] += 1 # Queue persistence (handled by background worker) try: self._persist_queue.put_nowait(True) except asyncio.QueueFull: logger.warning("Persistence queue full, skipping") async def track_request_end(self, request_id: str, success: bool, error: str = None, pool_hit: bool = True, status_code: int = 200): """Track request completion.""" if request_id not in self.active_requests: return req_info = self.active_requests.pop(request_id) end_time = time.time() elapsed = end_time - req_info["start_time"] mem_end = psutil.Process().memory_info().rss / (1024 * 1024) mem_delta = mem_end - req_info["mem_start"] # Update stats endpoint = req_info["endpoint"] if endpoint in self.endpoint_stats: self.endpoint_stats[endpoint]["total_time"] += elapsed if success: self.endpoint_stats[endpoint]["success"] += 1 else: self.endpoint_stats[endpoint]["errors"] += 1 if pool_hit: self.endpoint_stats[endpoint]["pool_hits"] += 1 # Add to completed queue completed = { **req_info, "end_time": end_time, "elapsed": round(elapsed, 2), "mem_delta": round(mem_delta, 1), "success": success, "error": error, "status_code": status_code, "pool_hit": pool_hit } self.completed_requests.append(completed) # Track errors if not success and error: self.errors.append({ "timestamp": end_time, "endpoint": endpoint, "url": req_info["url"], "error": error, "request_id": request_id }) await self._persist_endpoint_stats() async def track_janitor_event(self, event_type: str, sig: str, details: Dict): """Track janitor cleanup events.""" self.janitor_events.append({ "timestamp": time.time(), "type": event_type, # "close_cold", "close_hot", "promote" "sig": sig[:8], "details": details }) def _cleanup_old_entries(self, max_age_seconds: int = 300): """Remove entries older than max_age_seconds (default 5min).""" now = time.time() cutoff = now - max_age_seconds # Clean completed requests while self.completed_requests and self.completed_requests[0].get("end_time", 0) < cutoff: self.completed_requests.popleft() # Clean janitor events while self.janitor_events and self.janitor_events[0].get("timestamp", 0) < cutoff: self.janitor_events.popleft() # Clean errors while self.errors and self.errors[0].get("timestamp", 0) < cutoff: self.errors.popleft() async def update_timeline(self): """Update timeline data points (called every 5s).""" now = time.time() mem_pct = get_container_memory_percent() # Clean old entries (keep last 5 minutes) self._cleanup_old_entries(max_age_seconds=300) # Count requests in last 5s recent_reqs = sum(1 for req in self.completed_requests if now - req.get("end_time", 0) < 5) # Browser counts (acquire lock to prevent race conditions) from crawler_pool import PERMANENT, HOT_POOL, COLD_POOL, LOCK async with LOCK: browser_count = { "permanent": 1 if PERMANENT else 0, "hot": len(HOT_POOL), "cold": len(COLD_POOL) } self.memory_timeline.append({"time": now, "value": mem_pct}) self.requests_timeline.append({"time": now, "value": recent_reqs}) self.browser_timeline.append({"time": now, "browsers": browser_count}) async def _persist_endpoint_stats(self): """Persist endpoint stats to Redis.""" try: await self.redis.set( "monitor:endpoint_stats", json.dumps(self.endpoint_stats), ex=86400 # 24h TTL ) except Exception as e: logger.warning(f"Failed to persist endpoint stats: {e}") async def _persistence_worker(self): """Background worker to persist stats to Redis.""" while True: try: await self._persist_queue.get() await self._persist_endpoint_stats() self._persist_queue.task_done() except asyncio.CancelledError: break except Exception as e: logger.error(f"Persistence worker error: {e}") def start_persistence_worker(self): """Start the background persistence worker.""" if not self._persist_worker_task: self._persist_worker_task = asyncio.create_task(self._persistence_worker()) logger.info("Started persistence worker") async def stop_persistence_worker(self): """Stop the background persistence worker.""" if self._persist_worker_task: self._persist_worker_task.cancel() try: await self._persist_worker_task except asyncio.CancelledError: pass self._persist_worker_task = None logger.info("Stopped persistence worker") async def cleanup(self): """Cleanup on shutdown - persist final stats and stop workers.""" logger.info("Monitor cleanup starting...") try: # Persist final stats before shutdown await self._persist_endpoint_stats() # Stop background worker await self.stop_persistence_worker() logger.info("Monitor cleanup completed") except Exception as e: logger.error(f"Monitor cleanup error: {e}") async def load_from_redis(self): """Load persisted stats from Redis.""" try: data = await self.redis.get("monitor:endpoint_stats") if data: self.endpoint_stats = json.loads(data) logger.info("Loaded endpoint stats from Redis") except Exception as e: logger.warning(f"Failed to load from Redis: {e}") async def get_health_summary(self) -> Dict: """Get current system health snapshot.""" mem_pct = get_container_memory_percent() cpu_pct = psutil.cpu_percent(interval=0.1) # Network I/O (delta since last call) net = psutil.net_io_counters() # Pool status (acquire lock to prevent race conditions) from crawler_pool import PERMANENT, HOT_POOL, COLD_POOL, LOCK async with LOCK: # TODO: Track actual browser process memory instead of estimates # These are conservative estimates based on typical Chromium usage permanent_mem = 270 if PERMANENT else 0 # Estimate: ~270MB for permanent browser hot_mem = len(HOT_POOL) * 180 # Estimate: ~180MB per hot pool browser cold_mem = len(COLD_POOL) * 180 # Estimate: ~180MB per cold pool browser permanent_active = PERMANENT is not None hot_count = len(HOT_POOL) cold_count = len(COLD_POOL) return { "container": { "memory_percent": round(mem_pct, 1), "cpu_percent": round(cpu_pct, 1), "network_sent_mb": round(net.bytes_sent / (1024**2), 2), "network_recv_mb": round(net.bytes_recv / (1024**2), 2), "uptime_seconds": int(time.time() - self.start_time) }, "pool": { "permanent": {"active": permanent_active, "memory_mb": permanent_mem}, "hot": {"count": hot_count, "memory_mb": hot_mem}, "cold": {"count": cold_count, "memory_mb": cold_mem}, "total_memory_mb": permanent_mem + hot_mem + cold_mem }, "janitor": { "next_cleanup_estimate": "adaptive", # Would need janitor state "memory_pressure": "LOW" if mem_pct < 60 else "MEDIUM" if mem_pct < 80 else "HIGH" } } def get_active_requests(self) -> List[Dict]: """Get list of currently active requests.""" now = time.time() return [ { **req, "elapsed": round(now - req["start_time"], 1), "status": "running" } for req in self.active_requests.values() ] def get_completed_requests(self, limit: int = 50, filter_status: str = "all") -> List[Dict]: """Get recent completed requests.""" requests = list(self.completed_requests)[-limit:] if filter_status == "success": requests = [r for r in requests if r.get("success")] elif filter_status == "error": requests = [r for r in requests if not r.get("success")] return requests async def get_browser_list(self) -> List[Dict]: """Get detailed browser pool information.""" from crawler_pool import PERMANENT, HOT_POOL, COLD_POOL, LAST_USED, USAGE_COUNT, DEFAULT_CONFIG_SIG, LOCK browsers = [] now = time.time() # Acquire lock to prevent race conditions during iteration async with LOCK: if PERMANENT: browsers.append({ "type": "permanent", "sig": DEFAULT_CONFIG_SIG[:8] if DEFAULT_CONFIG_SIG else "unknown", "age_seconds": int(now - self.start_time), "last_used_seconds": int(now - LAST_USED.get(DEFAULT_CONFIG_SIG, now)), "memory_mb": 270, "hits": USAGE_COUNT.get(DEFAULT_CONFIG_SIG, 0), "killable": False }) for sig, crawler in HOT_POOL.items(): browsers.append({ "type": "hot", "sig": sig[:8], "age_seconds": int(now - self.start_time), # Approximation "last_used_seconds": int(now - LAST_USED.get(sig, now)), "memory_mb": 180, # Estimate "hits": USAGE_COUNT.get(sig, 0), "killable": True }) for sig, crawler in COLD_POOL.items(): browsers.append({ "type": "cold", "sig": sig[:8], "age_seconds": int(now - self.start_time), "last_used_seconds": int(now - LAST_USED.get(sig, now)), "memory_mb": 180, "hits": USAGE_COUNT.get(sig, 0), "killable": True }) return browsers def get_endpoint_stats_summary(self) -> Dict[str, Dict]: """Get aggregated endpoint statistics.""" summary = {} for endpoint, stats in self.endpoint_stats.items(): count = stats["count"] avg_time = (stats["total_time"] / count) if count > 0 else 0 success_rate = (stats["success"] / count * 100) if count > 0 else 0 pool_hit_rate = (stats["pool_hits"] / count * 100) if count > 0 else 0 summary[endpoint] = { "count": count, "avg_latency_ms": round(avg_time * 1000, 1), "success_rate_percent": round(success_rate, 1), "pool_hit_rate_percent": round(pool_hit_rate, 1), "errors": stats["errors"] } return summary def get_timeline_data(self, metric: str, window: str = "5m") -> Dict: """Get timeline data for charts.""" # For now, only 5m window supported if metric == "memory": data = list(self.memory_timeline) elif metric == "requests": data = list(self.requests_timeline) elif metric == "browsers": data = list(self.browser_timeline) else: return {"timestamps": [], "values": []} return { "timestamps": [int(d["time"]) for d in data], "values": [d.get("value", d.get("browsers")) for d in data] } def get_janitor_log(self, limit: int = 100) -> List[Dict]: """Get recent janitor events.""" return list(self.janitor_events)[-limit:] def get_errors_log(self, limit: int = 100) -> List[Dict]: """Get recent errors.""" return list(self.errors)[-limit:] # Global instance (initialized in server.py) monitor_stats: Optional[MonitorStats] = None def get_monitor() -> MonitorStats: """Get global monitor instance.""" if monitor_stats is None: raise RuntimeError("Monitor not initialized") return monitor_stats
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/deploy/docker/job.py
deploy/docker/job.py
""" Job endpoints (enqueue + poll) for long-running LL​M extraction and raw crawl. Relies on the existing Redis task helpers in api.py """ from typing import Dict, Optional, Callable from fastapi import APIRouter, BackgroundTasks, Depends, Request from pydantic import BaseModel, HttpUrl from api import ( handle_llm_request, handle_crawl_job, handle_task_status, ) from schemas import WebhookConfig # ------------- dependency placeholders ------------- _redis = None # will be injected from server.py _config = None _token_dep: Callable = lambda: None # dummy until injected # public router router = APIRouter() # === init hook called by server.py ========================================= def init_job_router(redis, config, token_dep) -> APIRouter: """Inject shared singletons and return the router for mounting.""" global _redis, _config, _token_dep _redis, _config, _token_dep = redis, config, token_dep return router # ---------- payload models -------------------------------------------------- class LlmJobPayload(BaseModel): url: HttpUrl q: str schema: Optional[str] = None cache: bool = False provider: Optional[str] = None webhook_config: Optional[WebhookConfig] = None temperature: Optional[float] = None base_url: Optional[str] = None class CrawlJobPayload(BaseModel): urls: list[HttpUrl] browser_config: Dict = {} crawler_config: Dict = {} webhook_config: Optional[WebhookConfig] = None # ---------- LL​M job --------------------------------------------------------- @router.post("/llm/job", status_code=202) async def llm_job_enqueue( payload: LlmJobPayload, background_tasks: BackgroundTasks, request: Request, _td: Dict = Depends(lambda: _token_dep()), # late-bound dep ): webhook_config = None if payload.webhook_config: webhook_config = payload.webhook_config.model_dump(mode='json') return await handle_llm_request( _redis, background_tasks, request, str(payload.url), query=payload.q, schema=payload.schema, cache=payload.cache, config=_config, provider=payload.provider, webhook_config=webhook_config, temperature=payload.temperature, api_base_url=payload.base_url, ) @router.get("/llm/job/{task_id}") async def llm_job_status( request: Request, task_id: str, _td: Dict = Depends(lambda: _token_dep()) ): return await handle_task_status(_redis, task_id, base_url=str(request.base_url)) # ---------- CRAWL job ------------------------------------------------------- @router.post("/crawl/job", status_code=202) async def crawl_job_enqueue( payload: CrawlJobPayload, background_tasks: BackgroundTasks, _td: Dict = Depends(lambda: _token_dep()), ): webhook_config = None if payload.webhook_config: webhook_config = payload.webhook_config.model_dump(mode='json') return await handle_crawl_job( _redis, background_tasks, [str(u) for u in payload.urls], payload.browser_config, payload.crawler_config, config=_config, webhook_config=webhook_config, ) @router.get("/crawl/job/{task_id}") async def crawl_job_status( request: Request, task_id: str, _td: Dict = Depends(lambda: _token_dep()) ): return await handle_task_status(_redis, task_id, base_url=str(request.base_url))
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/deploy/docker/auth.py
deploy/docker/auth.py
import os from datetime import datetime, timedelta, timezone from typing import Dict, Optional from jwt import JWT, jwk_from_dict from jwt.utils import get_int_from_datetime from fastapi import Depends, HTTPException from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials from pydantic import EmailStr from pydantic.main import BaseModel import base64 instance = JWT() security = HTTPBearer(auto_error=False) SECRET_KEY = os.environ.get("SECRET_KEY", "mysecret") ACCESS_TOKEN_EXPIRE_MINUTES = 60 def get_jwk_from_secret(secret: str): """Convert a secret string into a JWK object.""" secret_bytes = secret.encode('utf-8') b64_secret = base64.urlsafe_b64encode(secret_bytes).rstrip(b'=').decode('utf-8') return jwk_from_dict({"kty": "oct", "k": b64_secret}) def create_access_token(data: dict, expires_delta: Optional[timedelta] = None) -> str: """Create a JWT access token with an expiration.""" to_encode = data.copy() expire = datetime.now(timezone.utc) + (expires_delta or timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)) to_encode.update({"exp": get_int_from_datetime(expire)}) signing_key = get_jwk_from_secret(SECRET_KEY) return instance.encode(to_encode, signing_key, alg='HS256') def verify_token(credentials: HTTPAuthorizationCredentials) -> Dict: """Verify the JWT token from the Authorization header.""" if not credentials or not credentials.credentials: raise HTTPException( status_code=401, detail="No token provided", headers={"WWW-Authenticate": "Bearer"} ) token = credentials.credentials verifying_key = get_jwk_from_secret(SECRET_KEY) try: payload = instance.decode(token, verifying_key, do_time_check=True, algorithms='HS256') return payload except Exception as e: raise HTTPException( status_code=401, detail=f"Invalid or expired token: {str(e)}", headers={"WWW-Authenticate": "Bearer"} ) def get_token_dependency(config: Dict): """Return the token dependency if JWT is enabled, else a function that returns None.""" if config.get("security", {}).get("jwt_enabled", False): def jwt_required(credentials: HTTPAuthorizationCredentials = Depends(security)) -> Dict: """Enforce JWT authentication when enabled.""" if credentials is None: raise HTTPException( status_code=401, detail="Authentication required. Please provide a valid Bearer token.", headers={"WWW-Authenticate": "Bearer"} ) return verify_token(credentials) return jwt_required else: return lambda: None class TokenRequest(BaseModel): email: EmailStr
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/deploy/docker/tests/test_6_multi_endpoint.py
deploy/docker/tests/test_6_multi_endpoint.py
#!/usr/bin/env python3 """ Test 6: Multi-Endpoint Testing - Tests multiple endpoints together: /html, /screenshot, /pdf, /crawl - Validates each endpoint works correctly - Monitors success rates per endpoint """ import asyncio import time import docker import httpx from threading import Thread, Event # Config IMAGE = "crawl4ai-local:latest" CONTAINER_NAME = "crawl4ai-test" PORT = 11235 REQUESTS_PER_ENDPOINT = 10 # Stats stats_history = [] stop_monitoring = Event() def monitor_stats(container): """Background stats collector.""" for stat in container.stats(decode=True, stream=True): if stop_monitoring.is_set(): break try: mem_usage = stat['memory_stats'].get('usage', 0) / (1024 * 1024) stats_history.append({'timestamp': time.time(), 'memory_mb': mem_usage}) except: pass time.sleep(0.5) async def test_html(client, base_url, count): """Test /html endpoint.""" url = f"{base_url}/html" results = [] for _ in range(count): start = time.time() try: resp = await client.post(url, json={"url": "https://httpbin.org/html"}, timeout=30.0) elapsed = (time.time() - start) * 1000 results.append({"success": resp.status_code == 200, "latency_ms": elapsed}) except Exception as e: results.append({"success": False, "error": str(e)}) return results async def test_screenshot(client, base_url, count): """Test /screenshot endpoint.""" url = f"{base_url}/screenshot" results = [] for _ in range(count): start = time.time() try: resp = await client.post(url, json={"url": "https://httpbin.org/html"}, timeout=30.0) elapsed = (time.time() - start) * 1000 results.append({"success": resp.status_code == 200, "latency_ms": elapsed}) except Exception as e: results.append({"success": False, "error": str(e)}) return results async def test_pdf(client, base_url, count): """Test /pdf endpoint.""" url = f"{base_url}/pdf" results = [] for _ in range(count): start = time.time() try: resp = await client.post(url, json={"url": "https://httpbin.org/html"}, timeout=30.0) elapsed = (time.time() - start) * 1000 results.append({"success": resp.status_code == 200, "latency_ms": elapsed}) except Exception as e: results.append({"success": False, "error": str(e)}) return results async def test_crawl(client, base_url, count): """Test /crawl endpoint.""" url = f"{base_url}/crawl" results = [] payload = { "urls": ["https://httpbin.org/html"], "browser_config": {}, "crawler_config": {} } for _ in range(count): start = time.time() try: resp = await client.post(url, json=payload, timeout=30.0) elapsed = (time.time() - start) * 1000 results.append({"success": resp.status_code == 200, "latency_ms": elapsed}) except Exception as e: results.append({"success": False, "error": str(e)}) return results def start_container(client, image, name, port): """Start container.""" try: old = client.containers.get(name) print(f"🧹 Stopping existing container...") old.stop() old.remove() except docker.errors.NotFound: pass print(f"🚀 Starting container...") container = client.containers.run( image, name=name, ports={f"{port}/tcp": port}, detach=True, shm_size="1g", mem_limit="4g", ) print(f"⏳ Waiting for health...") for _ in range(30): time.sleep(1) container.reload() if container.status == "running": try: import requests if requests.get(f"http://localhost:{port}/health", timeout=2).status_code == 200: print(f"✅ Container healthy!") return container except: pass raise TimeoutError("Container failed to start") async def main(): print("="*60) print("TEST 6: Multi-Endpoint Testing") print("="*60) client = docker.from_env() container = None monitor_thread = None try: container = start_container(client, IMAGE, CONTAINER_NAME, PORT) print(f"\n⏳ Waiting for permanent browser init (3s)...") await asyncio.sleep(3) # Start monitoring stop_monitoring.clear() stats_history.clear() monitor_thread = Thread(target=monitor_stats, args=(container,), daemon=True) monitor_thread.start() await asyncio.sleep(1) baseline_mem = stats_history[-1]['memory_mb'] if stats_history else 0 print(f"📏 Baseline: {baseline_mem:.1f} MB\n") base_url = f"http://localhost:{PORT}" # Test each endpoint endpoints = { "/html": test_html, "/screenshot": test_screenshot, "/pdf": test_pdf, "/crawl": test_crawl, } all_endpoint_stats = {} async with httpx.AsyncClient() as http_client: for endpoint_name, test_func in endpoints.items(): print(f"🔄 Testing {endpoint_name} ({REQUESTS_PER_ENDPOINT} requests)...") results = await test_func(http_client, base_url, REQUESTS_PER_ENDPOINT) successes = sum(1 for r in results if r.get("success")) success_rate = (successes / len(results)) * 100 latencies = [r["latency_ms"] for r in results if "latency_ms" in r] avg_lat = sum(latencies) / len(latencies) if latencies else 0 all_endpoint_stats[endpoint_name] = { 'success_rate': success_rate, 'avg_latency': avg_lat, 'total': len(results), 'successes': successes } print(f" ✓ Success: {success_rate:.1f}% ({successes}/{len(results)}), Avg: {avg_lat:.0f}ms") # Stop monitoring await asyncio.sleep(1) stop_monitoring.set() if monitor_thread: monitor_thread.join(timeout=2) # Final stats memory_samples = [s['memory_mb'] for s in stats_history] peak_mem = max(memory_samples) if memory_samples else 0 final_mem = memory_samples[-1] if memory_samples else 0 print(f"\n{'='*60}") print(f"RESULTS:") print(f"{'='*60}") for endpoint, stats in all_endpoint_stats.items(): print(f" {endpoint:12} Success: {stats['success_rate']:5.1f}% Avg: {stats['avg_latency']:6.0f}ms") print(f"\n Memory:") print(f" Baseline: {baseline_mem:.1f} MB") print(f" Peak: {peak_mem:.1f} MB") print(f" Final: {final_mem:.1f} MB") print(f" Delta: {final_mem - baseline_mem:+.1f} MB") print(f"{'='*60}") # Pass/Fail passed = True for endpoint, stats in all_endpoint_stats.items(): if stats['success_rate'] < 100: print(f"❌ FAIL: {endpoint} success rate {stats['success_rate']:.1f}% < 100%") passed = False if passed: print(f"✅ TEST PASSED") return 0 else: return 1 except Exception as e: print(f"\n❌ TEST ERROR: {e}") import traceback traceback.print_exc() return 1 finally: stop_monitoring.set() if container: print(f"🛑 Stopping container...") container.stop() container.remove() if __name__ == "__main__": exit_code = asyncio.run(main()) exit(exit_code)
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/deploy/docker/tests/test_4_concurrent.py
deploy/docker/tests/test_4_concurrent.py
#!/usr/bin/env python3 """ Test 4: Concurrent Load Testing - Tests pool under concurrent load - Escalates: 10 → 50 → 100 concurrent requests - Validates latency distribution (P50, P95, P99) - Monitors memory stability """ import asyncio import time import docker import httpx from threading import Thread, Event from collections import defaultdict # Config IMAGE = "crawl4ai-local:latest" CONTAINER_NAME = "crawl4ai-test" PORT = 11235 LOAD_LEVELS = [ {"name": "Light", "concurrent": 10, "requests": 20}, {"name": "Medium", "concurrent": 50, "requests": 100}, {"name": "Heavy", "concurrent": 100, "requests": 200}, ] # Stats stats_history = [] stop_monitoring = Event() def monitor_stats(container): """Background stats collector.""" for stat in container.stats(decode=True, stream=True): if stop_monitoring.is_set(): break try: mem_usage = stat['memory_stats'].get('usage', 0) / (1024 * 1024) stats_history.append({'timestamp': time.time(), 'memory_mb': mem_usage}) except: pass time.sleep(0.5) def count_log_markers(container): """Extract pool markers.""" logs = container.logs().decode('utf-8') return { 'permanent': logs.count("🔥 Using permanent browser"), 'hot': logs.count("♨️ Using hot pool browser"), 'cold': logs.count("❄️ Using cold pool browser"), 'new': logs.count("🆕 Creating new browser"), } async def hit_endpoint(client, url, payload, semaphore): """Single request with concurrency control.""" async with semaphore: start = time.time() try: resp = await client.post(url, json=payload, timeout=60.0) elapsed = (time.time() - start) * 1000 return {"success": resp.status_code == 200, "latency_ms": elapsed} except Exception as e: return {"success": False, "error": str(e)} async def run_concurrent_test(url, payload, concurrent, total_requests): """Run concurrent requests.""" semaphore = asyncio.Semaphore(concurrent) async with httpx.AsyncClient() as client: tasks = [hit_endpoint(client, url, payload, semaphore) for _ in range(total_requests)] results = await asyncio.gather(*tasks) return results def calculate_percentiles(latencies): """Calculate P50, P95, P99.""" if not latencies: return 0, 0, 0 sorted_lat = sorted(latencies) n = len(sorted_lat) return ( sorted_lat[int(n * 0.50)], sorted_lat[int(n * 0.95)], sorted_lat[int(n * 0.99)], ) def start_container(client, image, name, port): """Start container.""" try: old = client.containers.get(name) print(f"🧹 Stopping existing container...") old.stop() old.remove() except docker.errors.NotFound: pass print(f"🚀 Starting container...") container = client.containers.run( image, name=name, ports={f"{port}/tcp": port}, detach=True, shm_size="1g", mem_limit="4g", ) print(f"⏳ Waiting for health...") for _ in range(30): time.sleep(1) container.reload() if container.status == "running": try: import requests if requests.get(f"http://localhost:{port}/health", timeout=2).status_code == 200: print(f"✅ Container healthy!") return container except: pass raise TimeoutError("Container failed to start") async def main(): print("="*60) print("TEST 4: Concurrent Load Testing") print("="*60) client = docker.from_env() container = None monitor_thread = None try: container = start_container(client, IMAGE, CONTAINER_NAME, PORT) print(f"\n⏳ Waiting for permanent browser init (3s)...") await asyncio.sleep(3) # Start monitoring stop_monitoring.clear() stats_history.clear() monitor_thread = Thread(target=monitor_stats, args=(container,), daemon=True) monitor_thread.start() await asyncio.sleep(1) baseline_mem = stats_history[-1]['memory_mb'] if stats_history else 0 print(f"📏 Baseline: {baseline_mem:.1f} MB\n") url = f"http://localhost:{PORT}/html" payload = {"url": "https://httpbin.org/html"} all_results = [] level_stats = [] # Run load levels for level in LOAD_LEVELS: print(f"{'='*60}") print(f"🔄 {level['name']} Load: {level['concurrent']} concurrent, {level['requests']} total") print(f"{'='*60}") start_time = time.time() results = await run_concurrent_test(url, payload, level['concurrent'], level['requests']) duration = time.time() - start_time successes = sum(1 for r in results if r.get("success")) success_rate = (successes / len(results)) * 100 latencies = [r["latency_ms"] for r in results if "latency_ms" in r] p50, p95, p99 = calculate_percentiles(latencies) avg_lat = sum(latencies) / len(latencies) if latencies else 0 print(f" Duration: {duration:.1f}s") print(f" Success: {success_rate:.1f}% ({successes}/{len(results)})") print(f" Avg Latency: {avg_lat:.0f}ms") print(f" P50/P95/P99: {p50:.0f}ms / {p95:.0f}ms / {p99:.0f}ms") level_stats.append({ 'name': level['name'], 'concurrent': level['concurrent'], 'success_rate': success_rate, 'avg_latency': avg_lat, 'p50': p50, 'p95': p95, 'p99': p99, }) all_results.extend(results) await asyncio.sleep(2) # Cool down between levels # Stop monitoring await asyncio.sleep(1) stop_monitoring.set() if monitor_thread: monitor_thread.join(timeout=2) # Final stats pool_stats = count_log_markers(container) memory_samples = [s['memory_mb'] for s in stats_history] peak_mem = max(memory_samples) if memory_samples else 0 final_mem = memory_samples[-1] if memory_samples else 0 print(f"\n{'='*60}") print(f"FINAL RESULTS:") print(f"{'='*60}") print(f" Total Requests: {len(all_results)}") print(f"\n Pool Utilization:") print(f" 🔥 Permanent: {pool_stats['permanent']}") print(f" ♨️ Hot: {pool_stats['hot']}") print(f" ❄️ Cold: {pool_stats['cold']}") print(f" 🆕 New: {pool_stats['new']}") print(f"\n Memory:") print(f" Baseline: {baseline_mem:.1f} MB") print(f" Peak: {peak_mem:.1f} MB") print(f" Final: {final_mem:.1f} MB") print(f" Delta: {final_mem - baseline_mem:+.1f} MB") print(f"{'='*60}") # Pass/Fail passed = True for ls in level_stats: if ls['success_rate'] < 99: print(f"❌ FAIL: {ls['name']} success rate {ls['success_rate']:.1f}% < 99%") passed = False if ls['p99'] > 10000: # 10s threshold print(f"⚠️ WARNING: {ls['name']} P99 latency {ls['p99']:.0f}ms very high") if final_mem - baseline_mem > 300: print(f"⚠️ WARNING: Memory grew {final_mem - baseline_mem:.1f} MB") if passed: print(f"✅ TEST PASSED") return 0 else: return 1 except Exception as e: print(f"\n❌ TEST ERROR: {e}") import traceback traceback.print_exc() return 1 finally: stop_monitoring.set() if container: print(f"🛑 Stopping container...") container.stop() container.remove() if __name__ == "__main__": exit_code = asyncio.run(main()) exit(exit_code)
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/deploy/docker/tests/test_7_cleanup.py
deploy/docker/tests/test_7_cleanup.py
#!/usr/bin/env python3 """ Test 7: Cleanup Verification (Janitor) - Creates load spike then goes idle - Verifies memory returns to near baseline - Tests janitor cleanup of idle browsers - Monitors memory recovery time """ import asyncio import time import docker import httpx from threading import Thread, Event # Config IMAGE = "crawl4ai-local:latest" CONTAINER_NAME = "crawl4ai-test" PORT = 11235 SPIKE_REQUESTS = 20 # Create some browsers IDLE_TIME = 90 # Wait 90s for janitor (runs every 60s) # Stats stats_history = [] stop_monitoring = Event() def monitor_stats(container): """Background stats collector.""" for stat in container.stats(decode=True, stream=True): if stop_monitoring.is_set(): break try: mem_usage = stat['memory_stats'].get('usage', 0) / (1024 * 1024) stats_history.append({'timestamp': time.time(), 'memory_mb': mem_usage}) except: pass time.sleep(1) # Sample every 1s for this test def start_container(client, image, name, port): """Start container.""" try: old = client.containers.get(name) print(f"🧹 Stopping existing container...") old.stop() old.remove() except docker.errors.NotFound: pass print(f"🚀 Starting container...") container = client.containers.run( image, name=name, ports={f"{port}/tcp": port}, detach=True, shm_size="1g", mem_limit="4g", ) print(f"⏳ Waiting for health...") for _ in range(30): time.sleep(1) container.reload() if container.status == "running": try: import requests if requests.get(f"http://localhost:{port}/health", timeout=2).status_code == 200: print(f"✅ Container healthy!") return container except: pass raise TimeoutError("Container failed to start") async def main(): print("="*60) print("TEST 7: Cleanup Verification (Janitor)") print("="*60) client = docker.from_env() container = None monitor_thread = None try: container = start_container(client, IMAGE, CONTAINER_NAME, PORT) print(f"\n⏳ Waiting for permanent browser init (3s)...") await asyncio.sleep(3) # Start monitoring stop_monitoring.clear() stats_history.clear() monitor_thread = Thread(target=monitor_stats, args=(container,), daemon=True) monitor_thread.start() await asyncio.sleep(2) baseline_mem = stats_history[-1]['memory_mb'] if stats_history else 0 print(f"📏 Baseline: {baseline_mem:.1f} MB\n") # Create load spike with different configs to populate pool print(f"🔥 Creating load spike ({SPIKE_REQUESTS} requests with varied configs)...") url = f"http://localhost:{PORT}/crawl" viewports = [ {"width": 1920, "height": 1080}, {"width": 1024, "height": 768}, {"width": 375, "height": 667}, ] async with httpx.AsyncClient(timeout=60.0) as http_client: tasks = [] for i in range(SPIKE_REQUESTS): vp = viewports[i % len(viewports)] payload = { "urls": ["https://httpbin.org/html"], "browser_config": { "type": "BrowserConfig", "params": { "viewport": {"type": "dict", "value": vp}, "headless": True, "text_mode": True, "extra_args": [ "--no-sandbox", "--disable-dev-shm-usage", "--disable-gpu", "--disable-software-rasterizer", "--disable-web-security", "--allow-insecure-localhost", "--ignore-certificate-errors" ] } }, "crawler_config": {} } tasks.append(http_client.post(url, json=payload)) results = await asyncio.gather(*tasks, return_exceptions=True) successes = sum(1 for r in results if hasattr(r, 'status_code') and r.status_code == 200) print(f" ✓ Spike completed: {successes}/{len(results)} successful") # Measure peak await asyncio.sleep(2) peak_mem = max([s['memory_mb'] for s in stats_history]) if stats_history else baseline_mem print(f" 📊 Peak memory: {peak_mem:.1f} MB (+{peak_mem - baseline_mem:.1f} MB)") # Now go idle and wait for janitor print(f"\n⏸️ Going idle for {IDLE_TIME}s (janitor cleanup)...") print(f" (Janitor runs every 60s, checking for idle browsers)") for elapsed in range(0, IDLE_TIME, 10): await asyncio.sleep(10) current_mem = stats_history[-1]['memory_mb'] if stats_history else 0 print(f" [{elapsed+10:3d}s] Memory: {current_mem:.1f} MB") # Stop monitoring stop_monitoring.set() if monitor_thread: monitor_thread.join(timeout=2) # Analyze memory recovery final_mem = stats_history[-1]['memory_mb'] if stats_history else 0 recovery_mb = peak_mem - final_mem recovery_pct = (recovery_mb / (peak_mem - baseline_mem) * 100) if (peak_mem - baseline_mem) > 0 else 0 print(f"\n{'='*60}") print(f"RESULTS:") print(f"{'='*60}") print(f" Memory Journey:") print(f" Baseline: {baseline_mem:.1f} MB") print(f" Peak: {peak_mem:.1f} MB (+{peak_mem - baseline_mem:.1f} MB)") print(f" Final: {final_mem:.1f} MB (+{final_mem - baseline_mem:.1f} MB)") print(f" Recovered: {recovery_mb:.1f} MB ({recovery_pct:.1f}%)") print(f"{'='*60}") # Pass/Fail passed = True # Should have created some memory pressure if peak_mem - baseline_mem < 100: print(f"⚠️ WARNING: Peak increase only {peak_mem - baseline_mem:.1f} MB (expected more browsers)") # Should recover most memory (within 100MB of baseline) if final_mem - baseline_mem > 100: print(f"⚠️ WARNING: Memory didn't recover well (still +{final_mem - baseline_mem:.1f} MB above baseline)") else: print(f"✅ Good memory recovery!") # Baseline + 50MB tolerance if final_mem - baseline_mem < 50: print(f"✅ Excellent cleanup (within 50MB of baseline)") print(f"✅ TEST PASSED") return 0 except Exception as e: print(f"\n❌ TEST ERROR: {e}") import traceback traceback.print_exc() return 1 finally: stop_monitoring.set() if container: print(f"🛑 Stopping container...") container.stop() container.remove() if __name__ == "__main__": exit_code = asyncio.run(main()) exit(exit_code)
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/deploy/docker/tests/demo_monitor_dashboard.py
deploy/docker/tests/demo_monitor_dashboard.py
#!/usr/bin/env python3 """ Monitor Dashboard Demo Script Generates varied activity to showcase all monitoring features for video recording. """ import httpx import asyncio import time from datetime import datetime BASE_URL = "http://localhost:11235" async def demo_dashboard(): print("🎬 Monitor Dashboard Demo - Starting...\n") print(f"📊 Dashboard: {BASE_URL}/dashboard") print("=" * 60) async with httpx.AsyncClient(timeout=60.0) as client: # Phase 1: Simple requests (permanent browser) print("\n🔷 Phase 1: Testing permanent browser pool") print("-" * 60) for i in range(5): print(f" {i+1}/5 Request to /crawl (default config)...") try: r = await client.post( f"{BASE_URL}/crawl", json={"urls": [f"https://httpbin.org/html?req={i}"], "crawler_config": {}} ) print(f" ✅ Status: {r.status_code}, Time: {r.elapsed.total_seconds():.2f}s") except Exception as e: print(f" ❌ Error: {e}") await asyncio.sleep(1) # Small delay between requests # Phase 2: Create variant browsers (different configs) print("\n🔶 Phase 2: Testing cold→hot pool promotion") print("-" * 60) viewports = [ {"width": 1920, "height": 1080}, {"width": 1280, "height": 720}, {"width": 800, "height": 600} ] for idx, viewport in enumerate(viewports): print(f" Viewport {viewport['width']}x{viewport['height']}:") for i in range(4): # 4 requests each to trigger promotion at 3 try: r = await client.post( f"{BASE_URL}/crawl", json={ "urls": [f"https://httpbin.org/json?v={idx}&r={i}"], "browser_config": {"viewport": viewport}, "crawler_config": {} } ) print(f" {i+1}/4 ✅ {r.status_code} - Should see cold→hot after 3 uses") except Exception as e: print(f" {i+1}/4 ❌ {e}") await asyncio.sleep(0.5) # Phase 3: Concurrent burst (stress pool) print("\n🔷 Phase 3: Concurrent burst (10 parallel)") print("-" * 60) tasks = [] for i in range(10): tasks.append( client.post( f"{BASE_URL}/crawl", json={"urls": [f"https://httpbin.org/delay/2?burst={i}"], "crawler_config": {}} ) ) print(" Sending 10 concurrent requests...") start = time.time() results = await asyncio.gather(*tasks, return_exceptions=True) elapsed = time.time() - start successes = sum(1 for r in results if not isinstance(r, Exception) and r.status_code == 200) print(f" ✅ {successes}/10 succeeded in {elapsed:.2f}s") # Phase 4: Multi-endpoint coverage print("\n🔶 Phase 4: Testing multiple endpoints") print("-" * 60) endpoints = [ ("/md", {"url": "https://httpbin.org/html", "f": "fit", "c": "0"}), ("/screenshot", {"url": "https://httpbin.org/html"}), ("/pdf", {"url": "https://httpbin.org/html"}), ] for endpoint, payload in endpoints: print(f" Testing {endpoint}...") try: if endpoint == "/md": r = await client.post(f"{BASE_URL}{endpoint}", json=payload) else: r = await client.post(f"{BASE_URL}{endpoint}", json=payload) print(f" ✅ {r.status_code}") except Exception as e: print(f" ❌ {e}") await asyncio.sleep(1) # Phase 5: Intentional error (to populate errors tab) print("\n🔷 Phase 5: Generating error examples") print("-" * 60) print(" Triggering invalid URL error...") try: r = await client.post( f"{BASE_URL}/crawl", json={"urls": ["invalid://bad-url"], "crawler_config": {}} ) print(f" Response: {r.status_code}") except Exception as e: print(f" ✅ Error captured: {type(e).__name__}") # Phase 6: Wait for janitor activity print("\n🔶 Phase 6: Waiting for janitor cleanup...") print("-" * 60) print(" Idle for 40s to allow janitor to clean cold pool browsers...") for i in range(40, 0, -10): print(f" {i}s remaining... (Check dashboard for cleanup events)") await asyncio.sleep(10) # Phase 7: Final stats check print("\n🔷 Phase 7: Final dashboard state") print("-" * 60) r = await client.get(f"{BASE_URL}/monitor/health") health = r.json() print(f" Memory: {health['container']['memory_percent']:.1f}%") print(f" Browsers: Perm={health['pool']['permanent']['active']}, " f"Hot={health['pool']['hot']['count']}, Cold={health['pool']['cold']['count']}") r = await client.get(f"{BASE_URL}/monitor/endpoints/stats") stats = r.json() print(f"\n Endpoint Stats:") for endpoint, data in stats.items(): print(f" {endpoint}: {data['count']} req, " f"{data['avg_latency_ms']:.0f}ms avg, " f"{data['success_rate_percent']:.1f}% success") r = await client.get(f"{BASE_URL}/monitor/browsers") browsers = r.json() print(f"\n Pool Efficiency:") print(f" Total browsers: {browsers['summary']['total_count']}") print(f" Memory usage: {browsers['summary']['total_memory_mb']} MB") print(f" Reuse rate: {browsers['summary']['reuse_rate_percent']:.1f}%") print("\n" + "=" * 60) print("✅ Demo complete! Dashboard is now populated with rich data.") print(f"\n📹 Recording tip: Refresh {BASE_URL}/dashboard") print(" You should see:") print(" • Active & completed requests") print(" • Browser pool (permanent + hot/cold)") print(" • Janitor cleanup events") print(" • Endpoint analytics") print(" • Memory timeline") if __name__ == "__main__": try: asyncio.run(demo_dashboard()) except KeyboardInterrupt: print("\n\n⚠️ Demo interrupted by user") except Exception as e: print(f"\n\n❌ Demo failed: {e}")
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/deploy/docker/tests/test_1_basic.py
deploy/docker/tests/test_1_basic.py
#!/usr/bin/env python3 """ Test 1: Basic Container Health + Single Endpoint - Starts container - Hits /health endpoint 10 times - Reports success rate and basic latency """ import asyncio import time import docker import httpx # Config IMAGE = "crawl4ai-local:latest" CONTAINER_NAME = "crawl4ai-test" PORT = 11235 REQUESTS = 10 async def test_endpoint(url: str, count: int): """Hit endpoint multiple times, return stats.""" results = [] async with httpx.AsyncClient(timeout=30.0) as client: for i in range(count): start = time.time() try: resp = await client.get(url) elapsed = (time.time() - start) * 1000 # ms results.append({ "success": resp.status_code == 200, "latency_ms": elapsed, "status": resp.status_code }) print(f" [{i+1}/{count}] ✓ {resp.status_code} - {elapsed:.0f}ms") except Exception as e: results.append({ "success": False, "latency_ms": None, "error": str(e) }) print(f" [{i+1}/{count}] ✗ Error: {e}") return results def start_container(client, image: str, name: str, port: int): """Start container, return container object.""" # Clean up existing try: old = client.containers.get(name) print(f"🧹 Stopping existing container '{name}'...") old.stop() old.remove() except docker.errors.NotFound: pass print(f"🚀 Starting container '{name}' from image '{image}'...") container = client.containers.run( image, name=name, ports={f"{port}/tcp": port}, detach=True, shm_size="1g", environment={"PYTHON_ENV": "production"} ) # Wait for health print(f"⏳ Waiting for container to be healthy...") for _ in range(30): # 30s timeout time.sleep(1) container.reload() if container.status == "running": try: # Quick health check import requests resp = requests.get(f"http://localhost:{port}/health", timeout=2) if resp.status_code == 200: print(f"✅ Container healthy!") return container except: pass raise TimeoutError("Container failed to start") def stop_container(container): """Stop and remove container.""" print(f"🛑 Stopping container...") container.stop() container.remove() print(f"✅ Container removed") async def main(): print("="*60) print("TEST 1: Basic Container Health + Single Endpoint") print("="*60) client = docker.from_env() container = None try: # Start container container = start_container(client, IMAGE, CONTAINER_NAME, PORT) # Test /health endpoint print(f"\n📊 Testing /health endpoint ({REQUESTS} requests)...") url = f"http://localhost:{PORT}/health" results = await test_endpoint(url, REQUESTS) # Calculate stats successes = sum(1 for r in results if r["success"]) success_rate = (successes / len(results)) * 100 latencies = [r["latency_ms"] for r in results if r["latency_ms"] is not None] avg_latency = sum(latencies) / len(latencies) if latencies else 0 # Print results print(f"\n{'='*60}") print(f"RESULTS:") print(f" Success Rate: {success_rate:.1f}% ({successes}/{len(results)})") print(f" Avg Latency: {avg_latency:.0f}ms") if latencies: print(f" Min Latency: {min(latencies):.0f}ms") print(f" Max Latency: {max(latencies):.0f}ms") print(f"{'='*60}") # Pass/Fail if success_rate >= 100: print(f"✅ TEST PASSED") return 0 else: print(f"❌ TEST FAILED (expected 100% success rate)") return 1 except Exception as e: print(f"\n❌ TEST ERROR: {e}") return 1 finally: if container: stop_container(container) if __name__ == "__main__": exit_code = asyncio.run(main()) exit(exit_code)
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/deploy/docker/tests/test_3_pool.py
deploy/docker/tests/test_3_pool.py
#!/usr/bin/env python3 """ Test 3: Pool Validation - Permanent Browser Reuse - Tests /html endpoint (should use permanent browser) - Monitors container logs for pool hit markers - Validates browser reuse rate - Checks memory after browser creation """ import asyncio import time import docker import httpx from threading import Thread, Event # Config IMAGE = "crawl4ai-local:latest" CONTAINER_NAME = "crawl4ai-test" PORT = 11235 REQUESTS = 30 # Stats tracking stats_history = [] stop_monitoring = Event() def monitor_stats(container): """Background stats collector.""" for stat in container.stats(decode=True, stream=True): if stop_monitoring.is_set(): break try: mem_usage = stat['memory_stats'].get('usage', 0) / (1024 * 1024) stats_history.append({ 'timestamp': time.time(), 'memory_mb': mem_usage, }) except: pass time.sleep(0.5) def count_log_markers(container): """Extract pool usage markers from logs.""" logs = container.logs().decode('utf-8') permanent_hits = logs.count("🔥 Using permanent browser") hot_hits = logs.count("♨️ Using hot pool browser") cold_hits = logs.count("❄️ Using cold pool browser") new_created = logs.count("🆕 Creating new browser") return { 'permanent_hits': permanent_hits, 'hot_hits': hot_hits, 'cold_hits': cold_hits, 'new_created': new_created, 'total_hits': permanent_hits + hot_hits + cold_hits } async def test_endpoint(url: str, count: int): """Hit endpoint multiple times.""" results = [] async with httpx.AsyncClient(timeout=60.0) as client: for i in range(count): start = time.time() try: resp = await client.post(url, json={"url": "https://httpbin.org/html"}) elapsed = (time.time() - start) * 1000 results.append({ "success": resp.status_code == 200, "latency_ms": elapsed, }) if (i + 1) % 10 == 0: print(f" [{i+1}/{count}] ✓ {resp.status_code} - {elapsed:.0f}ms") except Exception as e: results.append({"success": False, "error": str(e)}) print(f" [{i+1}/{count}] ✗ Error: {e}") return results def start_container(client, image: str, name: str, port: int): """Start container.""" try: old = client.containers.get(name) print(f"🧹 Stopping existing container...") old.stop() old.remove() except docker.errors.NotFound: pass print(f"🚀 Starting container...") container = client.containers.run( image, name=name, ports={f"{port}/tcp": port}, detach=True, shm_size="1g", mem_limit="4g", ) print(f"⏳ Waiting for health...") for _ in range(30): time.sleep(1) container.reload() if container.status == "running": try: import requests resp = requests.get(f"http://localhost:{port}/health", timeout=2) if resp.status_code == 200: print(f"✅ Container healthy!") return container except: pass raise TimeoutError("Container failed to start") def stop_container(container): """Stop container.""" print(f"🛑 Stopping container...") container.stop() container.remove() async def main(): print("="*60) print("TEST 3: Pool Validation - Permanent Browser Reuse") print("="*60) client = docker.from_env() container = None monitor_thread = None try: # Start container container = start_container(client, IMAGE, CONTAINER_NAME, PORT) # Wait for permanent browser initialization print(f"\n⏳ Waiting for permanent browser init (3s)...") await asyncio.sleep(3) # Start stats monitoring print(f"📊 Starting stats monitor...") stop_monitoring.clear() stats_history.clear() monitor_thread = Thread(target=monitor_stats, args=(container,), daemon=True) monitor_thread.start() await asyncio.sleep(1) baseline_mem = stats_history[-1]['memory_mb'] if stats_history else 0 print(f"📏 Baseline (with permanent browser): {baseline_mem:.1f} MB") # Test /html endpoint (uses permanent browser for default config) print(f"\n🔄 Running {REQUESTS} requests to /html...") url = f"http://localhost:{PORT}/html" results = await test_endpoint(url, REQUESTS) # Wait a bit await asyncio.sleep(1) # Stop monitoring stop_monitoring.set() if monitor_thread: monitor_thread.join(timeout=2) # Analyze logs for pool markers print(f"\n📋 Analyzing pool usage...") pool_stats = count_log_markers(container) # Calculate request stats successes = sum(1 for r in results if r.get("success")) success_rate = (successes / len(results)) * 100 latencies = [r["latency_ms"] for r in results if "latency_ms" in r] avg_latency = sum(latencies) / len(latencies) if latencies else 0 # Memory stats memory_samples = [s['memory_mb'] for s in stats_history] peak_mem = max(memory_samples) if memory_samples else 0 final_mem = memory_samples[-1] if memory_samples else 0 mem_delta = final_mem - baseline_mem # Calculate reuse rate total_requests = len(results) total_pool_hits = pool_stats['total_hits'] reuse_rate = (total_pool_hits / total_requests * 100) if total_requests > 0 else 0 # Print results print(f"\n{'='*60}") print(f"RESULTS:") print(f" Success Rate: {success_rate:.1f}% ({successes}/{len(results)})") print(f" Avg Latency: {avg_latency:.0f}ms") print(f"\n Pool Stats:") print(f" 🔥 Permanent Hits: {pool_stats['permanent_hits']}") print(f" ♨️ Hot Pool Hits: {pool_stats['hot_hits']}") print(f" ❄️ Cold Pool Hits: {pool_stats['cold_hits']}") print(f" 🆕 New Created: {pool_stats['new_created']}") print(f" 📊 Reuse Rate: {reuse_rate:.1f}%") print(f"\n Memory Stats:") print(f" Baseline: {baseline_mem:.1f} MB") print(f" Peak: {peak_mem:.1f} MB") print(f" Final: {final_mem:.1f} MB") print(f" Delta: {mem_delta:+.1f} MB") print(f"{'='*60}") # Pass/Fail passed = True if success_rate < 100: print(f"❌ FAIL: Success rate {success_rate:.1f}% < 100%") passed = False if reuse_rate < 80: print(f"❌ FAIL: Reuse rate {reuse_rate:.1f}% < 80% (expected high permanent browser usage)") passed = False if pool_stats['permanent_hits'] < (total_requests * 0.8): print(f"⚠️ WARNING: Only {pool_stats['permanent_hits']} permanent hits out of {total_requests} requests") if mem_delta > 200: print(f"⚠️ WARNING: Memory grew by {mem_delta:.1f} MB (possible browser leak)") if passed: print(f"✅ TEST PASSED") return 0 else: return 1 except Exception as e: print(f"\n❌ TEST ERROR: {e}") import traceback traceback.print_exc() return 1 finally: stop_monitoring.set() if container: stop_container(container) if __name__ == "__main__": exit_code = asyncio.run(main()) exit(exit_code)
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/deploy/docker/tests/test_5_pool_stress.py
deploy/docker/tests/test_5_pool_stress.py
#!/usr/bin/env python3 """ Test 5: Pool Stress - Mixed Configs - Tests hot/cold pool with different browser configs - Uses different viewports to create config variants - Validates cold → hot promotion after 3 uses - Monitors pool tier distribution """ import asyncio import time import docker import httpx from threading import Thread, Event import random # Config IMAGE = "crawl4ai-local:latest" CONTAINER_NAME = "crawl4ai-test" PORT = 11235 REQUESTS_PER_CONFIG = 5 # 5 requests per config variant # Different viewport configs to test pool tiers VIEWPORT_CONFIGS = [ None, # Default (permanent browser) {"width": 1920, "height": 1080}, # Desktop {"width": 1024, "height": 768}, # Tablet {"width": 375, "height": 667}, # Mobile ] # Stats stats_history = [] stop_monitoring = Event() def monitor_stats(container): """Background stats collector.""" for stat in container.stats(decode=True, stream=True): if stop_monitoring.is_set(): break try: mem_usage = stat['memory_stats'].get('usage', 0) / (1024 * 1024) stats_history.append({'timestamp': time.time(), 'memory_mb': mem_usage}) except: pass time.sleep(0.5) def analyze_pool_logs(container): """Extract detailed pool stats from logs.""" logs = container.logs().decode('utf-8') permanent = logs.count("🔥 Using permanent browser") hot = logs.count("♨️ Using hot pool browser") cold = logs.count("❄️ Using cold pool browser") new = logs.count("🆕 Creating new browser") promotions = logs.count("⬆️ Promoting to hot pool") return { 'permanent': permanent, 'hot': hot, 'cold': cold, 'new': new, 'promotions': promotions, 'total': permanent + hot + cold } async def crawl_with_viewport(client, url, viewport): """Single request with specific viewport.""" payload = { "urls": ["https://httpbin.org/html"], "browser_config": {}, "crawler_config": {} } # Add viewport if specified if viewport: payload["browser_config"] = { "type": "BrowserConfig", "params": { "viewport": {"type": "dict", "value": viewport}, "headless": True, "text_mode": True, "extra_args": [ "--no-sandbox", "--disable-dev-shm-usage", "--disable-gpu", "--disable-software-rasterizer", "--disable-web-security", "--allow-insecure-localhost", "--ignore-certificate-errors" ] } } start = time.time() try: resp = await client.post(url, json=payload, timeout=60.0) elapsed = (time.time() - start) * 1000 return {"success": resp.status_code == 200, "latency_ms": elapsed, "viewport": viewport} except Exception as e: return {"success": False, "error": str(e), "viewport": viewport} def start_container(client, image, name, port): """Start container.""" try: old = client.containers.get(name) print(f"🧹 Stopping existing container...") old.stop() old.remove() except docker.errors.NotFound: pass print(f"🚀 Starting container...") container = client.containers.run( image, name=name, ports={f"{port}/tcp": port}, detach=True, shm_size="1g", mem_limit="4g", ) print(f"⏳ Waiting for health...") for _ in range(30): time.sleep(1) container.reload() if container.status == "running": try: import requests if requests.get(f"http://localhost:{port}/health", timeout=2).status_code == 200: print(f"✅ Container healthy!") return container except: pass raise TimeoutError("Container failed to start") async def main(): print("="*60) print("TEST 5: Pool Stress - Mixed Configs") print("="*60) client = docker.from_env() container = None monitor_thread = None try: container = start_container(client, IMAGE, CONTAINER_NAME, PORT) print(f"\n⏳ Waiting for permanent browser init (3s)...") await asyncio.sleep(3) # Start monitoring stop_monitoring.clear() stats_history.clear() monitor_thread = Thread(target=monitor_stats, args=(container,), daemon=True) monitor_thread.start() await asyncio.sleep(1) baseline_mem = stats_history[-1]['memory_mb'] if stats_history else 0 print(f"📏 Baseline: {baseline_mem:.1f} MB\n") url = f"http://localhost:{PORT}/crawl" print(f"Testing {len(VIEWPORT_CONFIGS)} different configs:") for i, vp in enumerate(VIEWPORT_CONFIGS): vp_str = "Default" if vp is None else f"{vp['width']}x{vp['height']}" print(f" {i+1}. {vp_str}") print() # Run requests: repeat each config REQUESTS_PER_CONFIG times all_results = [] config_sequence = [] for _ in range(REQUESTS_PER_CONFIG): for viewport in VIEWPORT_CONFIGS: config_sequence.append(viewport) # Shuffle to mix configs random.shuffle(config_sequence) print(f"🔄 Running {len(config_sequence)} requests with mixed configs...") async with httpx.AsyncClient() as http_client: for i, viewport in enumerate(config_sequence): result = await crawl_with_viewport(http_client, url, viewport) all_results.append(result) if (i + 1) % 5 == 0: vp_str = "default" if result['viewport'] is None else f"{result['viewport']['width']}x{result['viewport']['height']}" status = "✓" if result.get('success') else "✗" lat = f"{result.get('latency_ms', 0):.0f}ms" if 'latency_ms' in result else "error" print(f" [{i+1}/{len(config_sequence)}] {status} {vp_str} - {lat}") # Stop monitoring await asyncio.sleep(2) stop_monitoring.set() if monitor_thread: monitor_thread.join(timeout=2) # Analyze results pool_stats = analyze_pool_logs(container) successes = sum(1 for r in all_results if r.get("success")) success_rate = (successes / len(all_results)) * 100 latencies = [r["latency_ms"] for r in all_results if "latency_ms" in r] avg_lat = sum(latencies) / len(latencies) if latencies else 0 memory_samples = [s['memory_mb'] for s in stats_history] peak_mem = max(memory_samples) if memory_samples else 0 final_mem = memory_samples[-1] if memory_samples else 0 print(f"\n{'='*60}") print(f"RESULTS:") print(f"{'='*60}") print(f" Requests: {len(all_results)}") print(f" Success Rate: {success_rate:.1f}% ({successes}/{len(all_results)})") print(f" Avg Latency: {avg_lat:.0f}ms") print(f"\n Pool Statistics:") print(f" 🔥 Permanent: {pool_stats['permanent']}") print(f" ♨️ Hot: {pool_stats['hot']}") print(f" ❄️ Cold: {pool_stats['cold']}") print(f" 🆕 New: {pool_stats['new']}") print(f" ⬆️ Promotions: {pool_stats['promotions']}") print(f" 📊 Reuse: {(pool_stats['total'] / len(all_results) * 100):.1f}%") print(f"\n Memory:") print(f" Baseline: {baseline_mem:.1f} MB") print(f" Peak: {peak_mem:.1f} MB") print(f" Final: {final_mem:.1f} MB") print(f" Delta: {final_mem - baseline_mem:+.1f} MB") print(f"{'='*60}") # Pass/Fail passed = True if success_rate < 99: print(f"❌ FAIL: Success rate {success_rate:.1f}% < 99%") passed = False # Should see promotions since we repeat each config 5 times if pool_stats['promotions'] < (len(VIEWPORT_CONFIGS) - 1): # -1 for default print(f"⚠️ WARNING: Only {pool_stats['promotions']} promotions (expected ~{len(VIEWPORT_CONFIGS)-1})") # Should have created some browsers for different configs if pool_stats['new'] == 0: print(f"⚠️ NOTE: No new browsers created (all used default?)") if pool_stats['permanent'] == len(all_results): print(f"⚠️ NOTE: All requests used permanent browser (configs not varying enough?)") if final_mem - baseline_mem > 500: print(f"⚠️ WARNING: Memory grew {final_mem - baseline_mem:.1f} MB") if passed: print(f"✅ TEST PASSED") return 0 else: return 1 except Exception as e: print(f"\n❌ TEST ERROR: {e}") import traceback traceback.print_exc() return 1 finally: stop_monitoring.set() if container: print(f"🛑 Stopping container...") container.stop() container.remove() if __name__ == "__main__": exit_code = asyncio.run(main()) exit(exit_code)
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/deploy/docker/tests/test_2_memory.py
deploy/docker/tests/test_2_memory.py
#!/usr/bin/env python3 """ Test 2: Docker Stats Monitoring - Extends Test 1 with real-time container stats - Monitors memory % and CPU during requests - Reports baseline, peak, and final memory """ import asyncio import time import docker import httpx from threading import Thread, Event # Config IMAGE = "crawl4ai-local:latest" CONTAINER_NAME = "crawl4ai-test" PORT = 11235 REQUESTS = 20 # More requests to see memory usage # Stats tracking stats_history = [] stop_monitoring = Event() def monitor_stats(container): """Background thread to collect container stats.""" for stat in container.stats(decode=True, stream=True): if stop_monitoring.is_set(): break try: # Extract memory stats mem_usage = stat['memory_stats'].get('usage', 0) / (1024 * 1024) # MB mem_limit = stat['memory_stats'].get('limit', 1) / (1024 * 1024) mem_percent = (mem_usage / mem_limit * 100) if mem_limit > 0 else 0 # Extract CPU stats (handle missing fields on Mac) cpu_percent = 0 try: cpu_delta = stat['cpu_stats']['cpu_usage']['total_usage'] - \ stat['precpu_stats']['cpu_usage']['total_usage'] system_delta = stat['cpu_stats'].get('system_cpu_usage', 0) - \ stat['precpu_stats'].get('system_cpu_usage', 0) if system_delta > 0: num_cpus = stat['cpu_stats'].get('online_cpus', 1) cpu_percent = (cpu_delta / system_delta * num_cpus * 100.0) except (KeyError, ZeroDivisionError): pass stats_history.append({ 'timestamp': time.time(), 'memory_mb': mem_usage, 'memory_percent': mem_percent, 'cpu_percent': cpu_percent }) except Exception as e: # Skip malformed stats pass time.sleep(0.5) # Sample every 500ms async def test_endpoint(url: str, count: int): """Hit endpoint, return stats.""" results = [] async with httpx.AsyncClient(timeout=30.0) as client: for i in range(count): start = time.time() try: resp = await client.get(url) elapsed = (time.time() - start) * 1000 results.append({ "success": resp.status_code == 200, "latency_ms": elapsed, }) if (i + 1) % 5 == 0: # Print every 5 requests print(f" [{i+1}/{count}] ✓ {resp.status_code} - {elapsed:.0f}ms") except Exception as e: results.append({"success": False, "error": str(e)}) print(f" [{i+1}/{count}] ✗ Error: {e}") return results def start_container(client, image: str, name: str, port: int): """Start container.""" try: old = client.containers.get(name) print(f"🧹 Stopping existing container '{name}'...") old.stop() old.remove() except docker.errors.NotFound: pass print(f"🚀 Starting container '{name}'...") container = client.containers.run( image, name=name, ports={f"{port}/tcp": port}, detach=True, shm_size="1g", mem_limit="4g", # Set explicit memory limit ) print(f"⏳ Waiting for health...") for _ in range(30): time.sleep(1) container.reload() if container.status == "running": try: import requests resp = requests.get(f"http://localhost:{port}/health", timeout=2) if resp.status_code == 200: print(f"✅ Container healthy!") return container except: pass raise TimeoutError("Container failed to start") def stop_container(container): """Stop container.""" print(f"🛑 Stopping container...") container.stop() container.remove() async def main(): print("="*60) print("TEST 2: Docker Stats Monitoring") print("="*60) client = docker.from_env() container = None monitor_thread = None try: # Start container container = start_container(client, IMAGE, CONTAINER_NAME, PORT) # Start stats monitoring in background print(f"\n📊 Starting stats monitor...") stop_monitoring.clear() stats_history.clear() monitor_thread = Thread(target=monitor_stats, args=(container,), daemon=True) monitor_thread.start() # Wait a bit for baseline await asyncio.sleep(2) baseline_mem = stats_history[-1]['memory_mb'] if stats_history else 0 print(f"📏 Baseline memory: {baseline_mem:.1f} MB") # Test /health endpoint print(f"\n🔄 Running {REQUESTS} requests to /health...") url = f"http://localhost:{PORT}/health" results = await test_endpoint(url, REQUESTS) # Wait a bit to capture peak await asyncio.sleep(1) # Stop monitoring stop_monitoring.set() if monitor_thread: monitor_thread.join(timeout=2) # Calculate stats successes = sum(1 for r in results if r.get("success")) success_rate = (successes / len(results)) * 100 latencies = [r["latency_ms"] for r in results if "latency_ms" in r] avg_latency = sum(latencies) / len(latencies) if latencies else 0 # Memory stats memory_samples = [s['memory_mb'] for s in stats_history] peak_mem = max(memory_samples) if memory_samples else 0 final_mem = memory_samples[-1] if memory_samples else 0 mem_delta = final_mem - baseline_mem # Print results print(f"\n{'='*60}") print(f"RESULTS:") print(f" Success Rate: {success_rate:.1f}% ({successes}/{len(results)})") print(f" Avg Latency: {avg_latency:.0f}ms") print(f"\n Memory Stats:") print(f" Baseline: {baseline_mem:.1f} MB") print(f" Peak: {peak_mem:.1f} MB") print(f" Final: {final_mem:.1f} MB") print(f" Delta: {mem_delta:+.1f} MB") print(f"{'='*60}") # Pass/Fail if success_rate >= 100 and mem_delta < 100: # No significant memory growth print(f"✅ TEST PASSED") return 0 else: if success_rate < 100: print(f"❌ TEST FAILED (success rate < 100%)") if mem_delta >= 100: print(f"⚠️ WARNING: Memory grew by {mem_delta:.1f} MB") return 1 except Exception as e: print(f"\n❌ TEST ERROR: {e}") return 1 finally: stop_monitoring.set() if container: stop_container(container) if __name__ == "__main__": exit_code = asyncio.run(main()) exit(exit_code)
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/deploy/docker/tests/test_monitor_demo.py
deploy/docker/tests/test_monitor_demo.py
#!/usr/bin/env python3 """Quick test to generate monitor dashboard activity""" import httpx import asyncio async def test_dashboard(): async with httpx.AsyncClient(timeout=30.0) as client: print("📊 Generating dashboard activity...") # Test 1: Simple crawl print("\n1️⃣ Running simple crawl...") r1 = await client.post( "http://localhost:11235/crawl", json={"urls": ["https://httpbin.org/html"], "crawler_config": {}} ) print(f" Status: {r1.status_code}") # Test 2: Multiple URLs print("\n2️⃣ Running multi-URL crawl...") r2 = await client.post( "http://localhost:11235/crawl", json={ "urls": [ "https://httpbin.org/html", "https://httpbin.org/json" ], "crawler_config": {} } ) print(f" Status: {r2.status_code}") # Test 3: Check monitor health print("\n3️⃣ Checking monitor health...") r3 = await client.get("http://localhost:11235/monitor/health") health = r3.json() print(f" Memory: {health['container']['memory_percent']}%") print(f" Browsers: {health['pool']['permanent']['active']}") # Test 4: Check requests print("\n4️⃣ Checking request log...") r4 = await client.get("http://localhost:11235/monitor/requests") reqs = r4.json() print(f" Active: {len(reqs['active'])}") print(f" Completed: {len(reqs['completed'])}") # Test 5: Check endpoint stats print("\n5️⃣ Checking endpoint stats...") r5 = await client.get("http://localhost:11235/monitor/endpoints/stats") stats = r5.json() for endpoint, data in stats.items(): print(f" {endpoint}: {data['count']} requests, {data['avg_latency_ms']}ms avg") print("\n✅ Dashboard should now show activity!") print(f"\n🌐 Open: http://localhost:11235/dashboard") if __name__ == "__main__": asyncio.run(test_dashboard())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/test_config_selection.py
tests/test_config_selection.py
""" Test config selection logic in dispatchers """ import asyncio import sys from pathlib import Path from unittest.mock import AsyncMock, MagicMock # Add parent directory to path for imports sys.path.insert(0, str(Path(__file__).parent.parent)) from crawl4ai.async_configs import CrawlerRunConfig, MatchMode from crawl4ai.async_dispatcher import BaseDispatcher, MemoryAdaptiveDispatcher class TestDispatcher(BaseDispatcher): """Simple test dispatcher to verify config selection""" async def crawl_url(self, url, config, task_id, **kwargs): # Just return which config was selected selected = self.select_config(url, config) return {"url": url, "config_id": id(selected)} async def run_urls(self, urls, crawler, config): results = [] for url in urls: result = await self.crawl_url(url, config, "test") results.append(result) return results async def test_dispatcher_config_selection(): print("Testing dispatcher config selection") print("=" * 50) # Create test configs with different matchers pdf_config = CrawlerRunConfig(url_matcher="*.pdf") api_config = CrawlerRunConfig(url_matcher=lambda url: 'api' in url) default_config = CrawlerRunConfig() # No matcher configs = [pdf_config, api_config, default_config] # Create test dispatcher dispatcher = TestDispatcher() # Test single config print("\nTest 1: Single config") result = await dispatcher.crawl_url("https://example.com/file.pdf", pdf_config, "test1") assert result["config_id"] == id(pdf_config) print("✓ Single config works") # Test config list selection print("\nTest 2: Config list selection") test_cases = [ ("https://example.com/file.pdf", id(pdf_config)), ("https://api.example.com/data", id(api_config)), ("https://example.com/page", id(configs[0])), # No match, uses first ] for url, expected_id in test_cases: result = await dispatcher.crawl_url(url, configs, "test") assert result["config_id"] == expected_id, f"URL {url} got wrong config" print(f"✓ {url} -> correct config selected") # Test with MemoryAdaptiveDispatcher print("\nTest 3: MemoryAdaptiveDispatcher config selection") mem_dispatcher = MemoryAdaptiveDispatcher() # Test select_config method directly selected = mem_dispatcher.select_config("https://example.com/doc.pdf", configs) assert selected == pdf_config print("✓ MemoryAdaptiveDispatcher.select_config works") # Test empty config list print("\nTest 4: Edge cases") selected = mem_dispatcher.select_config("https://example.com", []) assert isinstance(selected, CrawlerRunConfig) # Should return default print("✓ Empty config list returns default config") # Test None config selected = mem_dispatcher.select_config("https://example.com", None) assert isinstance(selected, CrawlerRunConfig) # Should return default print("✓ None config returns default config") print("\n" + "=" * 50) print("All dispatcher tests passed! ✓") if __name__ == "__main__": asyncio.run(test_dispatcher_config_selection())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/docker_example.py
tests/docker_example.py
import requests import json import time import sys import base64 import os from typing import Dict, Any class Crawl4AiTester: def __init__(self, base_url: str = "http://localhost:11235"): self.base_url = base_url def submit_and_wait( self, request_data: Dict[str, Any], timeout: int = 300 ) -> Dict[str, Any]: # Submit crawl job using async endpoint response = requests.post( f"{self.base_url}/crawl/job", json=request_data ) response.raise_for_status() job_response = response.json() task_id = job_response["task_id"] print(f"Submitted job with task_id: {task_id}") # Poll for result start_time = time.time() while True: if time.time() - start_time > timeout: raise TimeoutError( f"Task {task_id} did not complete within {timeout} seconds" ) result = requests.get( f"{self.base_url}/crawl/job/{task_id}" ) result.raise_for_status() status = result.json() if status["status"] == "failed": print("Task failed:", status.get("error")) raise Exception(f"Task failed: {status.get('error')}") if status["status"] == "completed": return status time.sleep(2) def submit_sync(self, request_data: Dict[str, Any]) -> Dict[str, Any]: # Use synchronous crawl endpoint response = requests.post( f"{self.base_url}/crawl", json=request_data, timeout=60, ) if response.status_code == 408: raise TimeoutError("Task did not complete within server timeout") response.raise_for_status() return response.json() def test_docker_deployment(version="basic"): tester = Crawl4AiTester( base_url="http://localhost:11235", #base_url="https://crawl4ai-sby74.ondigitalocean.app", ) print(f"Testing Crawl4AI Docker {version} version") # Health check with timeout and retry max_retries = 5 for i in range(max_retries): try: health = requests.get(f"{tester.base_url}/health", timeout=10) print("Health check:", health.json()) break except requests.exceptions.RequestException: if i == max_retries - 1: print(f"Failed to connect after {max_retries} attempts") sys.exit(1) print(f"Waiting for service to start (attempt {i+1}/{max_retries})...") time.sleep(5) # Test cases based on version test_basic_crawl(tester) test_basic_crawl_sync(tester) if version in ["full", "transformer"]: test_cosine_extraction(tester) test_js_execution(tester) test_css_selector(tester) test_structured_extraction(tester) test_llm_extraction(tester) test_llm_with_ollama(tester) test_screenshot(tester) def test_basic_crawl(tester: Crawl4AiTester): print("\n=== Testing Basic Crawl (Async) ===") request = { "urls": ["https://www.nbcnews.com/business"], } result = tester.submit_and_wait(request) print(f"Basic crawl result count: {len(result['result']['results'])}") assert result["result"]["success"] assert len(result["result"]["results"]) > 0 assert len(result["result"]["results"][0]["markdown"]) > 0 def test_basic_crawl_sync(tester: Crawl4AiTester): print("\n=== Testing Basic Crawl (Sync) ===") request = { "urls": ["https://www.nbcnews.com/business"], } result = tester.submit_sync(request) print(f"Basic crawl result count: {len(result['results'])}") assert result["success"] assert len(result["results"]) > 0 assert len(result["results"][0]["markdown"]) > 0 def test_js_execution(tester: Crawl4AiTester): print("\n=== Testing JS Execution ===") request = { "urls": ["https://www.nbcnews.com/business"], "browser_config": {"headless": True}, "crawler_config": { "js_code": [ "const loadMoreButton = Array.from(document.querySelectorAll('button')).find(button => button.textContent.includes('Load More')); if(loadMoreButton) loadMoreButton.click();" ], "wait_for": "wide-tease-item__wrapper df flex-column flex-row-m flex-nowrap-m enable-new-sports-feed-mobile-design(10)" } } result = tester.submit_and_wait(request) print(f"JS execution result count: {len(result['result']['results'])}") assert result["result"]["success"] def test_css_selector(tester: Crawl4AiTester): print("\n=== Testing CSS Selector ===") request = { "urls": ["https://www.nbcnews.com/business"], "browser_config": {"headless": True}, "crawler_config": { "css_selector": ".wide-tease-item__description", "word_count_threshold": 10 } } result = tester.submit_and_wait(request) print(f"CSS selector result count: {len(result['result']['results'])}") assert result["result"]["success"] def test_structured_extraction(tester: Crawl4AiTester): print("\n=== Testing Structured Extraction ===") schema = { "name": "Cryptocurrency Prices", "baseSelector": "table[data-testid=\"prices-table\"] tbody tr", "fields": [ { "name": "asset_name", "selector": "td:nth-child(2) p.cds-headline-h4steop", "type": "text" }, { "name": "asset_symbol", "selector": "td:nth-child(2) p.cds-label2-l1sm09ec", "type": "text" }, { "name": "asset_image_url", "selector": "td:nth-child(2) img[alt=\"Asset Symbol\"]", "type": "attribute", "attribute": "src" }, { "name": "asset_url", "selector": "td:nth-child(2) a[aria-label^=\"Asset page for\"]", "type": "attribute", "attribute": "href" }, { "name": "price", "selector": "td:nth-child(3) div.cds-typographyResets-t6muwls.cds-body-bwup3gq", "type": "text" }, { "name": "change", "selector": "td:nth-child(7) p.cds-body-bwup3gq", "type": "text" } ] } request = { "urls": ["https://www.coinbase.com/explore"], "crawler_config": { "type": "CrawlerRunConfig", "params": { "extraction_strategy": { "type": "JsonCssExtractionStrategy", "params": {"schema": schema} } } } } result = tester.submit_and_wait(request) extracted = json.loads(result["result"]["results"][0]["extracted_content"]) print(f"Extracted {len(extracted)} items") if extracted: print("Sample item:", json.dumps(extracted[0], indent=2)) assert result["result"]["success"] assert len(extracted) > 0 def test_llm_extraction(tester: Crawl4AiTester): print("\n=== Testing LLM Extraction ===") schema = { "type": "object", "properties": { "asset_name": { "type": "string", "description": "Name of the asset.", }, "price": { "type": "string", "description": "Price of the asset.", }, "change": { "type": "string", "description": "Change in price of the asset.", }, }, "required": ["asset_name", "price", "change"], } request = { "urls": ["https://www.coinbase.com/en-in/explore"], "browser_config": {}, "crawler_config": { "type": "CrawlerRunConfig", "params": { "extraction_strategy": { "type": "LLMExtractionStrategy", "params": { "llm_config": { "type": "LLMConfig", "params": { "provider": "gemini/gemini-2.5-flash", "api_token": os.getenv("GEMINI_API_KEY") } }, "schema": schema, "extraction_type": "schema", "instruction": "From the crawled content tioned asset names along with their prices and change in price.", } }, "word_count_threshold": 1 } } } try: result = tester.submit_and_wait(request) extracted = json.loads(result["result"]["results"][0]["extracted_content"]) print(f"Extracted {len(extracted)} model pricing entries") if extracted: print("Sample entry:", json.dumps(extracted[0], indent=2)) assert result["result"]["success"] except Exception as e: print(f"LLM extraction test failed (might be due to missing API key): {str(e)}") def test_llm_with_ollama(tester: Crawl4AiTester): print("\n=== Testing LLM with Ollama ===") schema = { "type": "object", "properties": { "article_title": { "type": "string", "description": "The main title of the news article", }, "summary": { "type": "string", "description": "A brief summary of the article content", }, "main_topics": { "type": "array", "items": {"type": "string"}, "description": "Main topics or themes discussed in the article", }, }, } request = { "urls": ["https://www.nbcnews.com/business"], "browser_config": {"verbose": True}, "crawler_config": { "type": "CrawlerRunConfig", "params": { "extraction_strategy": { "type": "LLMExtractionStrategy", "params": { "llm_config": { "type": "LLMConfig", "params": { "provider": "ollama/llama3.2:latest", } }, "schema": schema, "extraction_type": "schema", "instruction": "Extract the main article information including title, summary, and main topics.", } }, "word_count_threshold": 1 } } } try: result = tester.submit_and_wait(request) extracted = json.loads(result["result"]["results"][0]["extracted_content"]) print("Extracted content:", json.dumps(extracted, indent=2)) assert result["result"]["success"] except Exception as e: print(f"Ollama extraction test failed: {str(e)}") def test_cosine_extraction(tester: Crawl4AiTester): print("\n=== Testing Cosine Extraction ===") request = { "urls": ["https://www.nbcnews.com/business"], "browser_config": {}, "crawler_config": { "type": "CrawlerRunConfig", "params": { "extraction_strategy": { "type": "CosineStrategy", "params": { "semantic_filter": "business finance economy", "word_count_threshold": 10, "max_dist": 0.2, "top_k": 3, } } } } } try: result = tester.submit_and_wait(request) extracted = json.loads(result["result"]["results"][0]["extracted_content"]) print(f"Extracted {len(extracted)} text clusters") if extracted: print("First cluster tags:", extracted[0]["tags"]) assert result["result"]["success"] except Exception as e: print(f"Cosine extraction test failed: {str(e)}") def test_screenshot(tester: Crawl4AiTester): print("\n=== Testing Screenshot ===") request = { "urls": ["https://www.nbcnews.com/business"], "browser_config": {"headless": True}, "crawler_config": { "type": "CrawlerRunConfig", "params": { "screenshot": True } } } result = tester.submit_and_wait(request) screenshot_data = result["result"]["results"][0]["screenshot"] print("Screenshot captured:", bool(screenshot_data)) if screenshot_data: # Save screenshot screenshot_bytes = base64.b64decode(screenshot_data) with open("test_screenshot.jpg", "wb") as f: f.write(screenshot_bytes) print("Screenshot saved as test_screenshot.jpg") assert result["result"]["success"] if __name__ == "__main__": version = sys.argv[1] if len(sys.argv) > 1 else "basic" # version = "full" test_docker_deployment(version)
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/test_cli_docs.py
tests/test_cli_docs.py
import asyncio from crawl4ai.docs_manager import DocsManager from click.testing import CliRunner from crawl4ai.cli import cli def test_cli(): """Test all CLI commands""" runner = CliRunner() print("\n1. Testing docs update...") # Use sync version for testing docs_manager = DocsManager() loop = asyncio.get_event_loop() loop.run_until_complete(docs_manager.fetch_docs()) # print("\n2. Testing listing...") # result = runner.invoke(cli, ['docs', 'list']) # print(f"Status: {'✅' if result.exit_code == 0 else '❌'}") # print(result.output) # print("\n2. Testing index building...") # result = runner.invoke(cli, ['docs', 'index']) # print(f"Status: {'✅' if result.exit_code == 0 else '❌'}") # print(f"Output: {result.output}") # print("\n3. Testing search...") # result = runner.invoke(cli, ['docs', 'search', 'how to use crawler', '--build-index']) # print(f"Status: {'✅' if result.exit_code == 0 else '❌'}") # print(f"First 200 chars: {result.output[:200]}...") # print("\n4. Testing combine with sections...") # result = runner.invoke(cli, ['docs', 'combine', 'chunking_strategies', 'extraction_strategies', '--mode', 'extended']) # print(f"Status: {'✅' if result.exit_code == 0 else '❌'}") # print(f"First 200 chars: {result.output[:200]}...") print("\n5. Testing combine all sections...") result = runner.invoke(cli, ["docs", "combine", "--mode", "condensed"]) print(f"Status: {'✅' if result.exit_code == 0 else '❌'}") print(f"First 200 chars: {result.output[:200]}...") if __name__ == "__main__": test_cli()
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/test_multi_config.py
tests/test_multi_config.py
""" Test example for multiple crawler configs feature """ import asyncio import sys from pathlib import Path # Add parent directory to path for imports sys.path.insert(0, str(Path(__file__).parent.parent)) from crawl4ai import AsyncWebCrawler, CrawlerRunConfig, MatchMode, CacheMode async def test_multi_config(): # Create different configs for different URL patterns # Config for PDF files pdf_config = CrawlerRunConfig( url_matcher="*.pdf", ) # Config for articles (using multiple patterns with OR logic) article_config = CrawlerRunConfig( url_matcher=["*/news/*", "*blog*", "*/article/*"], match_mode=MatchMode.OR, screenshot=True, ) # Config using custom matcher function api_config = CrawlerRunConfig( url_matcher=lambda url: 'api' in url or 'json' in url, ) # Config combining patterns and functions with AND logic secure_docs_config = CrawlerRunConfig( url_matcher=[ "*.doc*", # Matches .doc, .docx lambda url: url.startswith('https://') # Must be HTTPS ], match_mode=MatchMode.AND, ) # Default config (no url_matcher means it won't match anything unless it's the fallback) default_config = CrawlerRunConfig( # cache_mode=CacheMode.BYPASS, ) # List of configs - order matters! First match wins configs = [ pdf_config, article_config, api_config, secure_docs_config, default_config # Fallback ] # Test URLs - using real URLs that exist test_urls = [ "https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf", # Real PDF "https://www.bbc.com/news/articles/c5y3e3glnldo", # News article "https://blog.python.org/", # Blog URL "https://api.github.com/users/github", # GitHub API (returns JSON) "https://httpbin.org/json", # API endpoint that returns JSON "https://www.python.org/", # Generic HTTPS page "http://info.cern.ch/", # HTTP (not HTTPS) page "https://example.com/", # → Default config ] # Test the matching logic print("Config matching test:") print("-" * 50) for url in test_urls: for i, config in enumerate(configs): if config.is_match(url): print(f"{url} -> Config {i} matches") break else: print(f"{url} -> No match, will use fallback (first config)") print("\n" + "=" * 50 + "\n") # Now test with actual crawler async with AsyncWebCrawler() as crawler: # Single config - traditional usage still works print("Test 1: Single config (backwards compatible)") result = await crawler.arun_many( urls=["https://www.python.org/"], config=default_config ) print(f"Crawled {len(result)} URLs with single config\n") # Multiple configs - new feature print("Test 2: Multiple configs") # Just test with 2 URLs to avoid timeout results = await crawler.arun_many( urls=test_urls[:2], # Just test first 2 URLs config=configs # Pass list of configs ) print(f"Crawled {len(results)} URLs with multiple configs") # Using custom matcher inline print("\nTest 3: Inline custom matcher") custom_config = CrawlerRunConfig( url_matcher=lambda url: len(url) > 50 and 'python' in url.lower(), verbose=False ) results = await crawler.arun_many( urls=[ "https://docs.python.org/3/library/asyncio.html", # Long URL with 'python' "https://python.org/", # Short URL with 'python' - won't match "https://www.google.com/" # No 'python' - won't match ], config=[custom_config, default_config] ) print(f"Crawled {len(results)} URLs with custom matcher") if __name__ == "__main__": asyncio.run(test_multi_config())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/test_memory_macos.py
tests/test_memory_macos.py
#!/usr/bin/env python3 """Test script to verify macOS memory calculation accuracy.""" import psutil import platform import time from crawl4ai.utils import get_true_memory_usage_percent, get_memory_stats, get_true_available_memory_gb def test_memory_calculation(): """Test and compare memory calculations.""" print(f"Platform: {platform.system()}") print(f"Python version: {platform.python_version()}") print("-" * 60) # Get psutil's view vm = psutil.virtual_memory() psutil_percent = vm.percent psutil_available_gb = vm.available / (1024**3) total_gb = vm.total / (1024**3) # Get our corrected view true_percent = get_true_memory_usage_percent() true_available_gb = get_true_available_memory_gb() true_percent_calc, available_calc, total_calc = get_memory_stats() print("Memory Statistics Comparison:") print(f"Total Memory: {total_gb:.2f} GB") print() print("PSUtil (Standard) Calculation:") print(f" - Memory Used: {psutil_percent:.1f}%") print(f" - Available: {psutil_available_gb:.2f} GB") print() print("Platform-Aware Calculation:") print(f" - Memory Used: {true_percent:.1f}%") print(f" - Available: {true_available_gb:.2f} GB") print(f" - Difference: {true_available_gb - psutil_available_gb:.2f} GB of reclaimable memory") print() # Show the impact on dispatcher behavior print("Impact on MemoryAdaptiveDispatcher:") thresholds = { "Normal": 90.0, "Critical": 95.0, "Recovery": 85.0 } for name, threshold in thresholds.items(): psutil_triggered = psutil_percent >= threshold true_triggered = true_percent >= threshold print(f" - {name} Threshold ({threshold}%):") print(f" PSUtil: {'TRIGGERED' if psutil_triggered else 'OK'}") print(f" Platform-Aware: {'TRIGGERED' if true_triggered else 'OK'}") if psutil_triggered != true_triggered: print(f" → Difference: Platform-aware prevents false {'pressure' if psutil_triggered else 'recovery'}") print() # Monitor for a few seconds print("Monitoring memory for 10 seconds...") for i in range(10): vm = psutil.virtual_memory() true_pct = get_true_memory_usage_percent() print(f" {i+1}s - PSUtil: {vm.percent:.1f}% | Platform-Aware: {true_pct:.1f}%", end="\r") time.sleep(1) print("\n") if __name__ == "__main__": test_memory_calculation()
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/test_docker.py
tests/test_docker.py
import requests import json import time import sys import base64 import os from typing import Dict, Any class Crawl4AiTester: def __init__(self, base_url: str = "http://localhost:11235"): self.base_url = base_url def submit_and_wait( self, request_data: Dict[str, Any], timeout: int = 300 ) -> Dict[str, Any]: # Submit crawl job response = requests.post(f"{self.base_url}/crawl", json=request_data) task_id = response.json()["task_id"] print(f"Task ID: {task_id}") # Poll for result start_time = time.time() while True: if time.time() - start_time > timeout: raise TimeoutError( f"Task {task_id} did not complete within {timeout} seconds" ) result = requests.get(f"{self.base_url}/task/{task_id}") status = result.json() if status["status"] == "failed": print("Task failed:", status.get("error")) raise Exception(f"Task failed: {status.get('error')}") if status["status"] == "completed": return status time.sleep(2) def test_docker_deployment(version="basic"): tester = Crawl4AiTester() print(f"Testing Crawl4AI Docker {version} version") # Health check with timeout and retry max_retries = 5 for i in range(max_retries): try: health = requests.get(f"{tester.base_url}/health", timeout=10) print("Health check:", health.json()) break except requests.exceptions.RequestException: if i == max_retries - 1: print(f"Failed to connect after {max_retries} attempts") sys.exit(1) print(f"Waiting for service to start (attempt {i+1}/{max_retries})...") time.sleep(5) # Test cases based on version test_basic_crawl(tester) # if version in ["full", "transformer"]: # test_cosine_extraction(tester) # test_js_execution(tester) # test_css_selector(tester) # test_structured_extraction(tester) # test_llm_extraction(tester) # test_llm_with_ollama(tester) # test_screenshot(tester) def test_basic_crawl(tester: Crawl4AiTester): print("\n=== Testing Basic Crawl ===") request = {"urls": ["https://www.nbcnews.com/business"], "priority": 10} result = tester.submit_and_wait(request) print(f"Basic crawl result length: {len(result['result']['markdown'])}") assert result["result"]["success"] assert len(result["result"]["markdown"]) > 0 def test_js_execution(tester: Crawl4AiTester): print("\n=== Testing JS Execution ===") request = { "urls": ["https://www.nbcnews.com/business"], "priority": 8, "js_code": [ "const loadMoreButton = Array.from(document.querySelectorAll('button')).find(button => button.textContent.includes('Load More')); loadMoreButton && loadMoreButton.click();" ], "wait_for": "article.tease-card:nth-child(10)", "crawler_params": {"headless": True}, } result = tester.submit_and_wait(request) print(f"JS execution result length: {len(result['result']['markdown'])}") assert result["result"]["success"] def test_css_selector(tester: Crawl4AiTester): print("\n=== Testing CSS Selector ===") request = { "urls": ["https://www.nbcnews.com/business"], "priority": 7, "css_selector": ".wide-tease-item__description", "crawler_params": {"headless": True}, "extra": {"word_count_threshold": 10}, } result = tester.submit_and_wait(request) print(f"CSS selector result length: {len(result['result']['markdown'])}") assert result["result"]["success"] def test_structured_extraction(tester: Crawl4AiTester): print("\n=== Testing Structured Extraction ===") schema = { "name": "Coinbase Crypto Prices", "baseSelector": ".cds-tableRow-t45thuk", "fields": [ { "name": "crypto", "selector": "td:nth-child(1) h2", "type": "text", }, { "name": "symbol", "selector": "td:nth-child(1) p", "type": "text", }, { "name": "price", "selector": "td:nth-child(2)", "type": "text", }, ], } request = { "urls": ["https://www.coinbase.com/explore"], "priority": 9, "extraction_config": {"type": "json_css", "params": {"schema": schema}}, } result = tester.submit_and_wait(request) extracted = json.loads(result["result"]["extracted_content"]) print(f"Extracted {len(extracted)} items") print("Sample item:", json.dumps(extracted[0], indent=2)) assert result["result"]["success"] assert len(extracted) > 0 def test_llm_extraction(tester: Crawl4AiTester): print("\n=== Testing LLM Extraction ===") schema = { "type": "object", "properties": { "model_name": { "type": "string", "description": "Name of the OpenAI model.", }, "input_fee": { "type": "string", "description": "Fee for input token for the OpenAI model.", }, "output_fee": { "type": "string", "description": "Fee for output token for the OpenAI model.", }, }, "required": ["model_name", "input_fee", "output_fee"], } request = { "urls": ["https://openai.com/api/pricing"], "priority": 8, "extraction_config": { "type": "llm", "params": { "provider": "openai/gpt-4o-mini", "api_token": os.getenv("OPENAI_API_KEY"), "schema": schema, "extraction_type": "schema", "instruction": """From the crawled content, extract all mentioned model names along with their fees for input and output tokens.""", }, }, "crawler_params": {"word_count_threshold": 1}, } try: result = tester.submit_and_wait(request) extracted = json.loads(result["result"]["extracted_content"]) print(f"Extracted {len(extracted)} model pricing entries") print("Sample entry:", json.dumps(extracted[0], indent=2)) assert result["result"]["success"] except Exception as e: print(f"LLM extraction test failed (might be due to missing API key): {str(e)}") def test_llm_with_ollama(tester: Crawl4AiTester): print("\n=== Testing LLM with Ollama ===") schema = { "type": "object", "properties": { "article_title": { "type": "string", "description": "The main title of the news article", }, "summary": { "type": "string", "description": "A brief summary of the article content", }, "main_topics": { "type": "array", "items": {"type": "string"}, "description": "Main topics or themes discussed in the article", }, }, } request = { "urls": ["https://www.nbcnews.com/business"], "priority": 8, "extraction_config": { "type": "llm", "params": { "provider": "ollama/llama2", "schema": schema, "extraction_type": "schema", "instruction": "Extract the main article information including title, summary, and main topics.", }, }, "extra": {"word_count_threshold": 1}, "crawler_params": {"verbose": True}, } try: result = tester.submit_and_wait(request) extracted = json.loads(result["result"]["extracted_content"]) print("Extracted content:", json.dumps(extracted, indent=2)) assert result["result"]["success"] except Exception as e: print(f"Ollama extraction test failed: {str(e)}") def test_cosine_extraction(tester: Crawl4AiTester): print("\n=== Testing Cosine Extraction ===") request = { "urls": ["https://www.nbcnews.com/business"], "priority": 8, "extraction_config": { "type": "cosine", "params": { "semantic_filter": "business finance economy", "word_count_threshold": 10, "max_dist": 0.2, "top_k": 3, }, }, } try: result = tester.submit_and_wait(request) extracted = json.loads(result["result"]["extracted_content"]) print(f"Extracted {len(extracted)} text clusters") print("First cluster tags:", extracted[0]["tags"]) assert result["result"]["success"] except Exception as e: print(f"Cosine extraction test failed: {str(e)}") def test_screenshot(tester: Crawl4AiTester): print("\n=== Testing Screenshot ===") request = { "urls": ["https://www.nbcnews.com/business"], "priority": 5, "screenshot": True, "crawler_params": {"headless": True}, } result = tester.submit_and_wait(request) print("Screenshot captured:", bool(result["result"]["screenshot"])) if result["result"]["screenshot"]: # Save screenshot screenshot_data = base64.b64decode(result["result"]["screenshot"]) with open("test_screenshot.jpg", "wb") as f: f.write(screenshot_data) print("Screenshot saved as test_screenshot.jpg") assert result["result"]["success"] if __name__ == "__main__": version = sys.argv[1] if len(sys.argv) > 1 else "basic" # version = "full" test_docker_deployment(version)
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/test_docker_api_with_llm_provider.py
tests/test_docker_api_with_llm_provider.py
#!/usr/bin/env python3 """Test script to verify Docker API with LLM provider configuration.""" import requests import json import time BASE_URL = "http://localhost:11235" def test_health(): """Test health endpoint.""" print("1. Testing health endpoint...") response = requests.get(f"{BASE_URL}/health") print(f" Status: {response.status_code}") print(f" Response: {response.json()}") print() def test_schema(): """Test schema endpoint to see configuration.""" print("2. Testing schema endpoint...") response = requests.get(f"{BASE_URL}/schema") print(f" Status: {response.status_code}") # Print only browser config to keep output concise print(f" Browser config keys: {list(response.json().get('browser', {}).keys())[:5]}...") print() def test_markdown_with_llm_filter(): """Test markdown endpoint with LLM filter (should use configured provider).""" print("3. Testing markdown endpoint with LLM filter...") print(" This should use the Groq provider from LLM_PROVIDER env var") # Note: This will fail with dummy API keys, but we can see if it tries to use Groq payload = { "url": "https://httpbin.org/html", "f": "llm", "q": "Extract the main content" } response = requests.post(f"{BASE_URL}/md", json=payload) print(f" Status: {response.status_code}") if response.status_code != 200: print(f" Error: {response.text[:200]}...") else: print(f" Success! Markdown length: {len(response.json().get('markdown', ''))} chars") print() def test_markdown_with_provider_override(): """Test markdown endpoint with provider override in request.""" print("4. Testing markdown endpoint with provider override...") print(" This should use OpenAI provider from request parameter") payload = { "url": "https://httpbin.org/html", "f": "llm", "q": "Extract the main content", "provider": "openai/gpt-4" # Override to use OpenAI } response = requests.post(f"{BASE_URL}/md", json=payload) print(f" Status: {response.status_code}") if response.status_code != 200: print(f" Error: {response.text[:200]}...") else: print(f" Success! Markdown length: {len(response.json().get('markdown', ''))} chars") print() def test_simple_crawl(): """Test simple crawl without LLM.""" print("5. Testing simple crawl (no LLM required)...") payload = { "urls": ["https://httpbin.org/html"], "browser_config": { "type": "BrowserConfig", "params": {"headless": True} }, "crawler_config": { "type": "CrawlerRunConfig", "params": {"cache_mode": "bypass"} } } response = requests.post(f"{BASE_URL}/crawl", json=payload) print(f" Status: {response.status_code}") if response.status_code == 200: result = response.json() print(f" Success: {result.get('success')}") print(f" Results count: {len(result.get('results', []))}") if result.get('results'): print(f" First result success: {result['results'][0].get('success')}") else: print(f" Error: {response.text[:200]}...") print() def test_playground(): """Test if playground is accessible.""" print("6. Testing playground interface...") response = requests.get(f"{BASE_URL}/playground") print(f" Status: {response.status_code}") print(f" Content-Type: {response.headers.get('content-type')}") print() if __name__ == "__main__": print("=== Crawl4AI Docker API Tests ===\n") print(f"Testing API at {BASE_URL}\n") # Wait a bit for server to be fully ready time.sleep(2) test_health() test_schema() test_simple_crawl() test_playground() print("\nTesting LLM functionality (these may fail with dummy API keys):\n") test_markdown_with_llm_filter() test_markdown_with_provider_override() print("\nTests completed!")
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/test_preserve_https_for_internal_links.py
tests/test_preserve_https_for_internal_links.py
#!/usr/bin/env python3 """ Final test and demo for HTTPS preservation feature (Issue #1410) This demonstrates how the preserve_https_for_internal_links flag prevents HTTPS downgrade when servers redirect to HTTP. """ import sys import os from urllib.parse import urljoin, urlparse def demonstrate_issue(): """Show the problem: HTTPS -> HTTP redirect causes HTTP links""" print("=" * 60) print("DEMONSTRATING THE ISSUE") print("=" * 60) # Simulate what happens during crawling original_url = "https://quotes.toscrape.com/tag/deep-thoughts" redirected_url = "http://quotes.toscrape.com/tag/deep-thoughts/" # Server redirects to HTTP # Extract a relative link relative_link = "/author/Albert-Einstein" # Standard URL joining uses the redirected (HTTP) base resolved_url = urljoin(redirected_url, relative_link) print(f"Original URL: {original_url}") print(f"Redirected to: {redirected_url}") print(f"Relative link: {relative_link}") print(f"Resolved link: {resolved_url}") print(f"\n❌ Problem: Link is now HTTP instead of HTTPS!") return resolved_url def demonstrate_solution(): """Show the solution: preserve HTTPS for internal links""" print("\n" + "=" * 60) print("DEMONSTRATING THE SOLUTION") print("=" * 60) # Our normalize_url with HTTPS preservation def normalize_url_with_preservation(href, base_url, preserve_https=False, original_scheme=None): """Normalize URL with optional HTTPS preservation""" # Standard resolution full_url = urljoin(base_url, href.strip()) # Preserve HTTPS if requested if preserve_https and original_scheme == 'https': parsed_full = urlparse(full_url) parsed_base = urlparse(base_url) # Only for same-domain links if parsed_full.scheme == 'http' and parsed_full.netloc == parsed_base.netloc: full_url = full_url.replace('http://', 'https://', 1) print(f" → Preserved HTTPS for {parsed_full.netloc}") return full_url # Same scenario as before original_url = "https://quotes.toscrape.com/tag/deep-thoughts" redirected_url = "http://quotes.toscrape.com/tag/deep-thoughts/" relative_link = "/author/Albert-Einstein" # Without preservation (current behavior) resolved_without = normalize_url_with_preservation( relative_link, redirected_url, preserve_https=False, original_scheme='https' ) print(f"\nWithout preservation:") print(f" Result: {resolved_without}") # With preservation (new feature) resolved_with = normalize_url_with_preservation( relative_link, redirected_url, preserve_https=True, original_scheme='https' ) print(f"\nWith preservation (preserve_https_for_internal_links=True):") print(f" Result: {resolved_with}") print(f"\n✅ Solution: Internal link stays HTTPS!") return resolved_with def test_edge_cases(): """Test important edge cases""" print("\n" + "=" * 60) print("EDGE CASES") print("=" * 60) from urllib.parse import urljoin, urlparse def preserve_https(href, base_url, original_scheme): """Helper to test preservation logic""" full_url = urljoin(base_url, href) if original_scheme == 'https': parsed_full = urlparse(full_url) parsed_base = urlparse(base_url) # Fixed: check for protocol-relative URLs if (parsed_full.scheme == 'http' and parsed_full.netloc == parsed_base.netloc and not href.strip().startswith('//')): full_url = full_url.replace('http://', 'https://', 1) return full_url test_cases = [ # (description, href, base_url, original_scheme, should_be_https) ("External link", "http://other.com/page", "http://example.com", "https", False), ("Already HTTPS", "/page", "https://example.com", "https", True), ("No original HTTPS", "/page", "http://example.com", "http", False), ("Subdomain", "/page", "http://sub.example.com", "https", True), ("Protocol-relative", "//example.com/page", "http://example.com", "https", False), ] for desc, href, base_url, orig_scheme, should_be_https in test_cases: result = preserve_https(href, base_url, orig_scheme) is_https = result.startswith('https://') status = "✅" if is_https == should_be_https else "❌" print(f"\n{status} {desc}:") print(f" Input: {href} + {base_url}") print(f" Result: {result}") print(f" Expected HTTPS: {should_be_https}, Got: {is_https}") def usage_example(): """Show how to use the feature in crawl4ai""" print("\n" + "=" * 60) print("USAGE IN CRAWL4AI") print("=" * 60) print(""" To enable HTTPS preservation in your crawl4ai code: ```python from crawl4ai import AsyncWebCrawler, CrawlerRunConfig async with AsyncWebCrawler() as crawler: config = CrawlerRunConfig( preserve_https_for_internal_links=True # Enable HTTPS preservation ) result = await crawler.arun( url="https://example.com", config=config ) # All internal links will maintain HTTPS even if # the server redirects to HTTP ``` This is especially useful for: - Sites that redirect HTTPS to HTTP but still support HTTPS - Security-conscious crawling where you want to stay on HTTPS - Avoiding mixed content issues in downstream processing """) if __name__ == "__main__": # Run all demonstrations demonstrate_issue() demonstrate_solution() test_edge_cases() usage_example() print("\n" + "=" * 60) print("✅ All tests complete!") print("=" * 60)
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/test_config_matching_only.py
tests/test_config_matching_only.py
""" Test only the config matching logic without running crawler """ import sys from pathlib import Path # Add parent directory to path for imports sys.path.insert(0, str(Path(__file__).parent.parent)) from crawl4ai.async_configs import CrawlerRunConfig, MatchMode def test_all_matching_scenarios(): print("Testing CrawlerRunConfig.is_match() method") print("=" * 50) # Test 1: Single string pattern print("\n1. Single string pattern (glob style)") config = CrawlerRunConfig( url_matcher="*.pdf", # For example we can set this => scraping_strategy=PDFContentScrapingStrategy() ) test_urls = [ ("https://example.com/file.pdf", True), ("https://example.com/doc.PDF", False), # Case sensitive ("https://example.com/file.txt", False), ("file.pdf", True), ] for url, expected in test_urls: result = config.is_match(url) status = "✓" if result == expected else "✗" print(f" {status} {url} -> {result}") # Test 2: List of patterns with OR print("\n2. List of patterns with OR (default)") config = CrawlerRunConfig( url_matcher=["*/article/*", "*/blog/*", "*.html"], match_mode=MatchMode.OR ) test_urls = [ ("https://example.com/article/news", True), ("https://example.com/blog/post", True), ("https://example.com/page.html", True), ("https://example.com/page.php", False), ] for url, expected in test_urls: result = config.is_match(url) status = "✓" if result == expected else "✗" print(f" {status} {url} -> {result}") # Test 3: Custom function print("\n3. Custom function matcher") config = CrawlerRunConfig( url_matcher=lambda url: 'api' in url and (url.endswith('.json') or url.endswith('.xml')) ) test_urls = [ ("https://api.example.com/data.json", True), ("https://api.example.com/data.xml", True), ("https://api.example.com/data.html", False), ("https://example.com/data.json", False), # No 'api' ] for url, expected in test_urls: result = config.is_match(url) status = "✓" if result == expected else "✗" print(f" {status} {url} -> {result}") # Test 4: Mixed list with AND print("\n4. Mixed patterns and functions with AND") config = CrawlerRunConfig( url_matcher=[ "https://*", # Must be HTTPS lambda url: '.com' in url, # Must have .com lambda url: len(url) < 50 # Must be short ], match_mode=MatchMode.AND ) test_urls = [ ("https://example.com/page", True), ("http://example.com/page", False), # Not HTTPS ("https://example.org/page", False), # No .com ("https://example.com/" + "x" * 50, False), # Too long ] for url, expected in test_urls: result = config.is_match(url) status = "✓" if result == expected else "✗" print(f" {status} {url} -> {result}") # Test 5: Complex real-world scenario print("\n5. Complex pattern combinations") config = CrawlerRunConfig( url_matcher=[ "*/api/v[0-9]/*", # API versioned endpoints lambda url: 'graphql' in url, # GraphQL endpoints "*.json" # JSON files ], match_mode=MatchMode.OR ) test_urls = [ ("https://example.com/api/v1/users", True), ("https://example.com/api/v2/posts", True), ("https://example.com/graphql", True), ("https://example.com/data.json", True), ("https://example.com/api/users", False), # No version ] for url, expected in test_urls: result = config.is_match(url) status = "✓" if result == expected else "✗" print(f" {status} {url} -> {result}") # Test 6: Edge cases print("\n6. Edge cases") # No matcher config = CrawlerRunConfig() result = config.is_match("https://example.com") print(f" {'✓' if not result else '✗'} No matcher -> {result}") # Empty list config = CrawlerRunConfig(url_matcher=[]) result = config.is_match("https://example.com") print(f" {'✓' if not result else '✗'} Empty list -> {result}") # None in list (should be skipped) config = CrawlerRunConfig(url_matcher=["*.pdf", None, "*.doc"]) result = config.is_match("test.pdf") print(f" {'✓' if result else '✗'} List with None -> {result}") print("\n" + "=" * 50) print("All matching tests completed!") if __name__ == "__main__": test_all_matching_scenarios()
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/test_arun_many.py
tests/test_arun_many.py
""" Test example for multiple crawler configs feature """ import asyncio import sys from pathlib import Path # Add parent directory to path for imports sys.path.insert(0, str(Path(__file__).parent.parent)) from crawl4ai import AsyncWebCrawler, CrawlerRunConfig, CacheMode from crawl4ai.processors.pdf import PDFContentScrapingStrategy async def test_run_many(): default_config = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, # scraping_strategy=PDFContentScrapingStrategy() ) test_urls = [ # "https://blog.python.org/", # Blog URL "https://www.python.org/", # Generic HTTPS page "https://www.kidocode.com/", # Generic HTTPS page "https://www.example.com/", # Generic HTTPS page # "https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf", ] async with AsyncWebCrawler() as crawler: # Single config - traditional usage still works print("Test 1: Single config (backwards compatible)") result = await crawler.arun_many( urls=test_urls[:2], config=default_config ) print(f"Crawled {len(result)} URLs with single config\n") for item in result: print(f" {item.url} -> {item.status_code}") if __name__ == "__main__": asyncio.run(test_run_many())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/test_pyopenssl_update.py
tests/test_pyopenssl_update.py
""" Test script to verify pyOpenSSL update doesn't break crawl4ai functionality. This test verifies: 1. pyOpenSSL and cryptography versions are correct and secure 2. Basic crawling functionality still works 3. HTTPS/SSL connections work properly 4. Stealth mode integration works (uses playwright-stealth internally) Issue: #1545 - Security vulnerability in cryptography package Fix: Updated pyOpenSSL from >=24.3.0 to >=25.3.0 Expected: cryptography package should be >=45.0.7 (above vulnerable range) """ import asyncio import sys from packaging import version def check_versions(): """Verify pyOpenSSL and cryptography versions meet security requirements.""" print("=" * 60) print("STEP 1: Checking Package Versions") print("=" * 60) try: import OpenSSL pyopenssl_version = OpenSSL.__version__ print(f"✓ pyOpenSSL version: {pyopenssl_version}") # Check pyOpenSSL >= 25.3.0 if version.parse(pyopenssl_version) >= version.parse("25.3.0"): print(f" ✓ Version check passed: {pyopenssl_version} >= 25.3.0") else: print(f" ✗ Version check FAILED: {pyopenssl_version} < 25.3.0") return False except ImportError as e: print(f"✗ Failed to import pyOpenSSL: {e}") return False try: import cryptography crypto_version = cryptography.__version__ print(f"✓ cryptography version: {crypto_version}") # Check cryptography >= 45.0.7 (above vulnerable range) if version.parse(crypto_version) >= version.parse("45.0.7"): print(f" ✓ Security check passed: {crypto_version} >= 45.0.7 (not vulnerable)") else: print(f" ✗ Security check FAILED: {crypto_version} < 45.0.7 (potentially vulnerable)") return False except ImportError as e: print(f"✗ Failed to import cryptography: {e}") return False print("\n✓ All version checks passed!\n") return True async def test_basic_crawl(): """Test basic crawling functionality with HTTPS site.""" print("=" * 60) print("STEP 2: Testing Basic HTTPS Crawling") print("=" * 60) try: from crawl4ai import AsyncWebCrawler async with AsyncWebCrawler(verbose=True) as crawler: # Test with a simple HTTPS site (requires SSL/TLS) print("Crawling example.com (HTTPS)...") result = await crawler.arun( url="https://www.example.com", bypass_cache=True ) if result.success: print(f"✓ Crawl successful!") print(f" - Status code: {result.status_code}") print(f" - Content length: {len(result.html)} bytes") print(f" - SSL/TLS connection: ✓ Working") return True else: print(f"✗ Crawl failed: {result.error_message}") return False except Exception as e: print(f"✗ Test failed with error: {e}") import traceback traceback.print_exc() return False async def test_stealth_mode(): """Test stealth mode functionality (depends on playwright-stealth).""" print("\n" + "=" * 60) print("STEP 3: Testing Stealth Mode Integration") print("=" * 60) try: from crawl4ai import AsyncWebCrawler, BrowserConfig # Create browser config with stealth mode browser_config = BrowserConfig( headless=True, verbose=False ) async with AsyncWebCrawler(config=browser_config, verbose=True) as crawler: print("Crawling with stealth mode enabled...") result = await crawler.arun( url="https://www.example.com", bypass_cache=True ) if result.success: print(f"✓ Stealth crawl successful!") print(f" - Stealth mode: ✓ Working") return True else: print(f"✗ Stealth crawl failed: {result.error_message}") return False except Exception as e: print(f"✗ Stealth test failed with error: {e}") import traceback traceback.print_exc() return False async def main(): """Run all tests.""" print("\n") print("╔" + "=" * 58 + "╗") print("║ pyOpenSSL Security Update Verification Test (Issue #1545) ║") print("╚" + "=" * 58 + "╝") print("\n") # Step 1: Check versions versions_ok = check_versions() if not versions_ok: print("\n✗ FAILED: Version requirements not met") return False # Step 2: Test basic crawling crawl_ok = await test_basic_crawl() if not crawl_ok: print("\n✗ FAILED: Basic crawling test failed") return False # Step 3: Test stealth mode stealth_ok = await test_stealth_mode() if not stealth_ok: print("\n✗ FAILED: Stealth mode test failed") return False # All tests passed print("\n" + "=" * 60) print("FINAL RESULT") print("=" * 60) print("✓ All tests passed successfully!") print("✓ pyOpenSSL update is working correctly") print("✓ No breaking changes detected") print("✓ Security vulnerability resolved") print("=" * 60) print("\n") return True if __name__ == "__main__": try: success = asyncio.run(main()) sys.exit(0 if success else 1) except KeyboardInterrupt: print("\n\nTest interrupted by user") sys.exit(1) except Exception as e: print(f"\n✗ Unexpected error: {e}") import traceback traceback.print_exc() sys.exit(1)
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/test_pyopenssl_security_fix.py
tests/test_pyopenssl_security_fix.py
""" Lightweight test to verify pyOpenSSL security fix (Issue #1545). This test verifies the security requirements are met: 1. pyOpenSSL >= 25.3.0 is installed 2. cryptography >= 45.0.7 is installed (above vulnerable range) 3. SSL/TLS functionality works correctly This test can run without full crawl4ai dependencies installed. """ import sys from packaging import version def test_package_versions(): """Test that package versions meet security requirements.""" print("=" * 70) print("TEST: Package Version Security Requirements (Issue #1545)") print("=" * 70) all_passed = True # Test pyOpenSSL version try: import OpenSSL pyopenssl_version = OpenSSL.__version__ print(f"\n✓ pyOpenSSL is installed: {pyopenssl_version}") if version.parse(pyopenssl_version) >= version.parse("25.3.0"): print(f" ✓ PASS: pyOpenSSL {pyopenssl_version} >= 25.3.0 (required)") else: print(f" ✗ FAIL: pyOpenSSL {pyopenssl_version} < 25.3.0 (required)") all_passed = False except ImportError as e: print(f"\n✗ FAIL: pyOpenSSL not installed - {e}") all_passed = False # Test cryptography version try: import cryptography crypto_version = cryptography.__version__ print(f"\n✓ cryptography is installed: {crypto_version}") # The vulnerable range is >=37.0.0 & <43.0.1 # We need >= 45.0.7 to be safe if version.parse(crypto_version) >= version.parse("45.0.7"): print(f" ✓ PASS: cryptography {crypto_version} >= 45.0.7 (secure)") print(f" ✓ NOT in vulnerable range (37.0.0 to 43.0.0)") elif version.parse(crypto_version) >= version.parse("37.0.0") and version.parse(crypto_version) < version.parse("43.0.1"): print(f" ✗ FAIL: cryptography {crypto_version} is VULNERABLE") print(f" ✗ Version is in vulnerable range (>=37.0.0 & <43.0.1)") all_passed = False else: print(f" ⚠ WARNING: cryptography {crypto_version} < 45.0.7") print(f" ⚠ May not meet security requirements") except ImportError as e: print(f"\n✗ FAIL: cryptography not installed - {e}") all_passed = False return all_passed def test_ssl_basic_functionality(): """Test that SSL/TLS basic functionality works.""" print("\n" + "=" * 70) print("TEST: SSL/TLS Basic Functionality") print("=" * 70) try: import OpenSSL.SSL # Create a basic SSL context to verify functionality context = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_2_METHOD) print("\n✓ SSL Context created successfully") print(" ✓ PASS: SSL/TLS functionality is working") return True except Exception as e: print(f"\n✗ FAIL: SSL functionality test failed - {e}") return False def test_pyopenssl_crypto_integration(): """Test that pyOpenSSL and cryptography integration works.""" print("\n" + "=" * 70) print("TEST: pyOpenSSL <-> cryptography Integration") print("=" * 70) try: from OpenSSL import crypto # Generate a simple key pair to test integration key = crypto.PKey() key.generate_key(crypto.TYPE_RSA, 2048) print("\n✓ Generated RSA key pair successfully") print(" ✓ PASS: pyOpenSSL and cryptography are properly integrated") return True except Exception as e: print(f"\n✗ FAIL: Integration test failed - {e}") import traceback traceback.print_exc() return False def main(): """Run all security tests.""" print("\n") print("╔" + "=" * 68 + "╗") print("║ pyOpenSSL Security Fix Verification - Issue #1545 ║") print("╚" + "=" * 68 + "╝") print("\nVerifying that the pyOpenSSL update resolves the security vulnerability") print("in the cryptography package (CVE: versions >=37.0.0 & <43.0.1)\n") results = [] # Test 1: Package versions results.append(("Package Versions", test_package_versions())) # Test 2: SSL functionality results.append(("SSL Functionality", test_ssl_basic_functionality())) # Test 3: Integration results.append(("pyOpenSSL-crypto Integration", test_pyopenssl_crypto_integration())) # Summary print("\n" + "=" * 70) print("TEST SUMMARY") print("=" * 70) all_passed = True for test_name, passed in results: status = "✓ PASS" if passed else "✗ FAIL" print(f"{status}: {test_name}") all_passed = all_passed and passed print("=" * 70) if all_passed: print("\n✓✓✓ ALL TESTS PASSED ✓✓✓") print("✓ Security vulnerability is resolved") print("✓ pyOpenSSL >= 25.3.0 is working correctly") print("✓ cryptography >= 45.0.7 (not vulnerable)") print("\nThe dependency update is safe to merge.\n") return True else: print("\n✗✗✗ SOME TESTS FAILED ✗✗✗") print("✗ Security requirements not met") print("\nDo NOT merge until all tests pass.\n") return False if __name__ == "__main__": try: success = main() sys.exit(0 if success else 1) except KeyboardInterrupt: print("\n\nTest interrupted by user") sys.exit(1) except Exception as e: print(f"\n✗ Unexpected error: {e}") import traceback traceback.print_exc() sys.exit(1)
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/test_main.py
tests/test_main.py
import asyncio import aiohttp import json import time import os from typing import Dict, Any class NBCNewsAPITest: def __init__(self, base_url: str = "http://localhost:8000"): self.base_url = base_url self.session = None async def __aenter__(self): self.session = aiohttp.ClientSession() return self async def __aexit__(self, exc_type, exc_val, exc_tb): if self.session: await self.session.close() async def submit_crawl(self, request_data: Dict[str, Any]) -> str: async with self.session.post( f"{self.base_url}/crawl", json=request_data ) as response: result = await response.json() return result["task_id"] async def get_task_status(self, task_id: str) -> Dict[str, Any]: async with self.session.get(f"{self.base_url}/task/{task_id}") as response: return await response.json() async def wait_for_task( self, task_id: str, timeout: int = 300, poll_interval: int = 2 ) -> Dict[str, Any]: start_time = time.time() while True: if time.time() - start_time > timeout: raise TimeoutError( f"Task {task_id} did not complete within {timeout} seconds" ) status = await self.get_task_status(task_id) if status["status"] in ["completed", "failed"]: return status await asyncio.sleep(poll_interval) async def check_health(self) -> Dict[str, Any]: async with self.session.get(f"{self.base_url}/health") as response: return await response.json() async def test_basic_crawl(): print("\n=== Testing Basic Crawl ===") async with NBCNewsAPITest() as api: request = {"urls": ["https://www.nbcnews.com/business"], "priority": 10} task_id = await api.submit_crawl(request) result = await api.wait_for_task(task_id) print(f"Basic crawl result length: {len(result['result']['markdown'])}") assert result["status"] == "completed" assert "result" in result assert result["result"]["success"] async def test_js_execution(): print("\n=== Testing JS Execution ===") async with NBCNewsAPITest() as api: request = { "urls": ["https://www.nbcnews.com/business"], "priority": 8, "js_code": [ "const loadMoreButton = Array.from(document.querySelectorAll('button')).find(button => button.textContent.includes('Load More')); loadMoreButton && loadMoreButton.click();" ], "wait_for": "article.tease-card:nth-child(10)", "crawler_params": {"headless": True}, } task_id = await api.submit_crawl(request) result = await api.wait_for_task(task_id) print(f"JS execution result length: {len(result['result']['markdown'])}") assert result["status"] == "completed" assert result["result"]["success"] async def test_css_selector(): print("\n=== Testing CSS Selector ===") async with NBCNewsAPITest() as api: request = { "urls": ["https://www.nbcnews.com/business"], "priority": 7, "css_selector": ".wide-tease-item__description", } task_id = await api.submit_crawl(request) result = await api.wait_for_task(task_id) print(f"CSS selector result length: {len(result['result']['markdown'])}") assert result["status"] == "completed" assert result["result"]["success"] async def test_structured_extraction(): print("\n=== Testing Structured Extraction ===") async with NBCNewsAPITest() as api: schema = { "name": "NBC News Articles", "baseSelector": "article.tease-card", "fields": [ {"name": "title", "selector": "h2", "type": "text"}, { "name": "description", "selector": ".tease-card__description", "type": "text", }, { "name": "link", "selector": "a", "type": "attribute", "attribute": "href", }, ], } request = { "urls": ["https://www.nbcnews.com/business"], "priority": 9, "extraction_config": {"type": "json_css", "params": {"schema": schema}}, } task_id = await api.submit_crawl(request) result = await api.wait_for_task(task_id) extracted = json.loads(result["result"]["extracted_content"]) print(f"Extracted {len(extracted)} articles") assert result["status"] == "completed" assert result["result"]["success"] assert len(extracted) > 0 async def test_batch_crawl(): print("\n=== Testing Batch Crawl ===") async with NBCNewsAPITest() as api: request = { "urls": [ "https://www.nbcnews.com/business", "https://www.nbcnews.com/business/consumer", "https://www.nbcnews.com/business/economy", ], "priority": 6, "crawler_params": {"headless": True}, } task_id = await api.submit_crawl(request) result = await api.wait_for_task(task_id) print(f"Batch crawl completed, got {len(result['results'])} results") assert result["status"] == "completed" assert "results" in result assert len(result["results"]) == 3 async def test_llm_extraction(): print("\n=== Testing LLM Extraction with Ollama ===") async with NBCNewsAPITest() as api: schema = { "type": "object", "properties": { "article_title": { "type": "string", "description": "The main title of the news article", }, "summary": { "type": "string", "description": "A brief summary of the article content", }, "main_topics": { "type": "array", "items": {"type": "string"}, "description": "Main topics or themes discussed in the article", }, }, "required": ["article_title", "summary", "main_topics"], } request = { "urls": ["https://www.nbcnews.com/business"], "priority": 8, "extraction_config": { "type": "llm", "params": { "provider": "openai/gpt-4o-mini", "api_key": os.getenv("OLLAMA_API_KEY"), "schema": schema, "extraction_type": "schema", "instruction": """Extract the main article information including title, a brief summary, and main topics discussed. Focus on the primary business news article on the page.""", }, }, "crawler_params": {"headless": True, "word_count_threshold": 1}, } task_id = await api.submit_crawl(request) result = await api.wait_for_task(task_id) if result["status"] == "completed": extracted = json.loads(result["result"]["extracted_content"]) print("Extracted article analysis:") print(json.dumps(extracted, indent=2)) assert result["status"] == "completed" assert result["result"]["success"] async def test_screenshot(): print("\n=== Testing Screenshot ===") async with NBCNewsAPITest() as api: request = { "urls": ["https://www.nbcnews.com/business"], "priority": 5, "screenshot": True, "crawler_params": {"headless": True}, } task_id = await api.submit_crawl(request) result = await api.wait_for_task(task_id) print("Screenshot captured:", bool(result["result"]["screenshot"])) assert result["status"] == "completed" assert result["result"]["success"] assert result["result"]["screenshot"] is not None async def test_priority_handling(): print("\n=== Testing Priority Handling ===") async with NBCNewsAPITest() as api: # Submit low priority task first low_priority = { "urls": ["https://www.nbcnews.com/business"], "priority": 1, "crawler_params": {"headless": True}, } low_task_id = await api.submit_crawl(low_priority) # Submit high priority task high_priority = { "urls": ["https://www.nbcnews.com/business/consumer"], "priority": 10, "crawler_params": {"headless": True}, } high_task_id = await api.submit_crawl(high_priority) # Get both results high_result = await api.wait_for_task(high_task_id) low_result = await api.wait_for_task(low_task_id) print("Both tasks completed") assert high_result["status"] == "completed" assert low_result["status"] == "completed" async def main(): try: # Start with health check async with NBCNewsAPITest() as api: health = await api.check_health() print("Server health:", health) # Run all tests # await test_basic_crawl() # await test_js_execution() # await test_css_selector() # await test_structured_extraction() await test_llm_extraction() # await test_batch_crawl() # await test_screenshot() # await test_priority_handling() except Exception as e: print(f"Test failed: {str(e)}") raise if __name__ == "__main__": asyncio.run(main())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/test_virtual_scroll.py
tests/test_virtual_scroll.py
""" Test virtual scroll implementation according to the design: - Create a page with virtual scroll that replaces content - Verify all 1000 items are captured """ import asyncio import os from crawl4ai import AsyncWebCrawler, CrawlerRunConfig, VirtualScrollConfig, CacheMode, BrowserConfig async def test_virtual_scroll(): """Test virtual scroll with content replacement (true virtual scroll)""" # Create test HTML with true virtual scroll that replaces content test_html = ''' <html> <head> <style> #container { height: 500px; overflow-y: auto; border: 1px solid #ccc; } .item { height: 50px; padding: 10px; border-bottom: 1px solid #eee; } </style> </head> <body> <h1>Virtual Scroll Test - 1000 Items</h1> <div id="container"></div> <script> // True virtual scroll that REPLACES content const container = document.getElementById('container'); const totalItems = 1000; const itemsPerPage = 10; // Only show 10 items at a time let currentStartIndex = 0; // All our data const allData = []; for (let i = 0; i < totalItems; i++) { allData.push({ id: i, text: `Item ${i + 1} of ${totalItems} - Unique ID: ${i}` }); } // Function to render current page function renderPage(startIndex) { const items = []; const endIndex = Math.min(startIndex + itemsPerPage, totalItems); for (let i = startIndex; i < endIndex; i++) { const item = allData[i]; items.push(`<div class="item" data-index="${item.id}">${item.text}</div>`); } // REPLACE container content (virtual scroll) container.innerHTML = items.join(''); currentStartIndex = startIndex; } // Initial render renderPage(0); // Handle scroll container.addEventListener('scroll', () => { const scrollTop = container.scrollTop; const scrollHeight = container.scrollHeight; const clientHeight = container.clientHeight; // Calculate which page we should show based on scroll position // This creates a virtual scroll effect if (scrollTop + clientHeight >= scrollHeight - 50) { // Load next page const nextIndex = currentStartIndex + itemsPerPage; if (nextIndex < totalItems) { renderPage(nextIndex); // Reset scroll to top to continue scrolling container.scrollTop = 10; } } }); </script> </body> </html> ''' # Save test HTML to a file import tempfile with tempfile.NamedTemporaryFile(mode='w', suffix='.html', delete=False) as f: f.write(test_html) test_file_path = f.name httpd = None old_cwd = os.getcwd() try: # Start a simple HTTP server import http.server import socketserver import threading import random # Find available port for _ in range(10): PORT = random.randint(8000, 9999) try: Handler = http.server.SimpleHTTPRequestHandler os.chdir(os.path.dirname(test_file_path)) httpd = socketserver.TCPServer(("", PORT), Handler) break except OSError: continue if httpd is None: raise RuntimeError("Could not find available port") server_thread = threading.Thread(target=httpd.serve_forever) server_thread.daemon = True server_thread.start() # Give server time to start await asyncio.sleep(0.5) # Configure virtual scroll # With 10 items per page and 1000 total, we need 100 pages # Let's do 120 scrolls to ensure we get everything virtual_config = VirtualScrollConfig( container_selector="#container", scroll_count=120, scroll_by="container_height", # Scroll by container height wait_after_scroll=0.1 # Quick wait for test ) config = CrawlerRunConfig( virtual_scroll_config=virtual_config, cache_mode=CacheMode.BYPASS, verbose=True ) browserConfig = BrowserConfig( headless= False ) async with AsyncWebCrawler(verbose=True, config=browserConfig) as crawler: result = await crawler.arun( url=f"http://localhost:{PORT}/{os.path.basename(test_file_path)}", config=config ) # Count all items in the result import re items = re.findall(r'data-index="(\d+)"', result.html) unique_indices = sorted(set(int(idx) for idx in items)) print(f"\n{'='*60}") print(f"TEST RESULTS:") print(f"HTML Length: {len(result.html)}") print(f"Total items found: {len(items)}") print(f"Unique items: {len(unique_indices)}") if unique_indices: print(f"Item indices: {min(unique_indices)} to {max(unique_indices)}") print(f"Expected: 0 to 999") # Check for gaps expected = set(range(1000)) actual = set(unique_indices) missing = expected - actual if missing: print(f"\n❌ FAILED! Missing {len(missing)} items") print(f"Missing indices: {sorted(missing)[:10]}{'...' if len(missing) > 10 else ''}") else: print(f"\n✅ SUCCESS! All 1000 items captured!") # Show some sample items print(f"\nSample items from result:") sample_items = re.findall(r'<div class="item"[^>]*>([^<]+)</div>', result.html)[:5] for item in sample_items: print(f" - {item}") print(f"{'='*60}\n") finally: # Clean up if httpd: httpd.shutdown() os.chdir(old_cwd) os.unlink(test_file_path) if __name__ == "__main__": asyncio.run(test_virtual_scroll())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/test_llm_simple_url.py
tests/test_llm_simple_url.py
#!/usr/bin/env python3 """ Test LLMTableExtraction with controlled HTML """ import os import sys sys.path.insert(0, os.path.dirname(os.path.abspath(__file__))) import asyncio from crawl4ai import ( AsyncWebCrawler, CrawlerRunConfig, LLMConfig, LLMTableExtraction, DefaultTableExtraction, CacheMode ) async def test_controlled_html(): """Test with controlled HTML content.""" print("\n" + "=" * 60) print("LLM TABLE EXTRACTION TEST") print("=" * 60) url = "https://en.wikipedia.org/wiki/List_of_chemical_elements" # url = "https://en.wikipedia.org/wiki/List_of_prime_ministers_of_India" # Configure LLM llm_config = LLMConfig( # provider="openai/gpt-4.1-mini", # api_token=os.getenv("OPENAI_API_KEY"), provider="groq/llama-3.3-70b-versatile", api_token="GROQ_API_TOKEN", temperature=0.1, max_tokens=32000 ) print("\n1. Testing LLMTableExtraction:") # Create LLM extraction strategy llm_strategy = LLMTableExtraction( llm_config=llm_config, verbose=True, # css_selector="div.w3-example" css_selector="div.mw-content-ltr", # css_selector="table.wikitable", max_tries=2, enable_chunking=True, chunk_token_threshold=5000, # Lower threshold to force chunking min_rows_per_chunk=10, max_parallel_chunks=3 ) config_llm = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, table_extraction=llm_strategy ) async with AsyncWebCrawler() as crawler: # Test with LLM extraction result_llm = await crawler.arun( # url=f"raw:{test_html}", url=url, config=config_llm ) if result_llm.success: print(f"\n ✓ LLM Extraction: Found {len(result_llm.tables)} table(s)") for i, table in enumerate(result_llm.tables, 1): print(f"\n Table {i}:") print(f" - Caption: {table.get('caption', 'No caption')}") print(f" - Headers: {table['headers']}") print(f" - Rows: {len(table['rows'])}") # Show how colspan/rowspan were handled print(f" - Sample rows:") for j, row in enumerate(table['rows'][:2], 1): print(f" Row {j}: {row}") metadata = table.get('metadata', {}) print(f" - Metadata:") print(f" • Has merged cells: {metadata.get('has_merged_cells', False)}") print(f" • Table type: {metadata.get('table_type', 'unknown')}") # # Compare with default extraction # print("\n2. Comparing with DefaultTableExtraction:") # default_strategy = DefaultTableExtraction( # table_score_threshold=3, # verbose=False # ) # config_default = CrawlerRunConfig( # cache_mode=CacheMode.BYPASS, # table_extraction=default_strategy # ) # result_default = await crawler.arun( # # url=f"raw:{test_html}", # url=url, # config=config_default # ) # if result_default.success: # print(f" ✓ Default Extraction: Found {len(result_default.tables)} table(s)") # # Compare handling of complex structures # print("\n3. Comparison Summary:") # print(f" LLM found: {len(result_llm.tables)} tables") # print(f" Default found: {len(result_default.tables)} tables") # if result_llm.tables and result_default.tables: # llm_first = result_llm.tables[0] # default_first = result_default.tables[0] # print(f"\n First table comparison:") # print(f" LLM headers: {len(llm_first['headers'])} columns") # print(f" Default headers: {len(default_first['headers'])} columns") # # Check if LLM better handled the complex structure # if llm_first.get('metadata', {}).get('has_merged_cells'): # print(" ✓ LLM correctly identified merged cells") # # Test pandas compatibility # try: # import pandas as pd # print("\n4. Testing Pandas compatibility:") # # Create DataFrame from LLM extraction # df_llm = pd.DataFrame( # llm_first['rows'], # columns=llm_first['headers'] # ) # print(f" ✓ LLM table -> DataFrame: Shape {df_llm.shape}") # # Create DataFrame from default extraction # df_default = pd.DataFrame( # default_first['rows'], # columns=default_first['headers'] # ) # print(f" ✓ Default table -> DataFrame: Shape {df_default.shape}") # print("\n LLM DataFrame preview:") # print(df_llm.head(2).to_string()) # except ImportError: # print("\n4. Pandas not installed, skipping DataFrame test") print("\n✅ Test completed successfully!") async def main(): """Run the test.""" # Check for API key if not os.getenv("OPENAI_API_KEY"): print("⚠️ OPENAI_API_KEY not set. Please set it to test LLM extraction.") print(" You can set it with: export OPENAI_API_KEY='your-key-here'") return await test_controlled_html() if __name__ == "__main__": asyncio.run(main())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/check_dependencies.py
tests/check_dependencies.py
#!/usr/bin/env python3 """ Dependency checker for Crawl4AI Analyzes imports in the codebase and shows which files use them """ import ast import os import sys from pathlib import Path from typing import Set, Dict, List, Tuple from collections import defaultdict import re import toml # Standard library modules to ignore STDLIB_MODULES = { 'abc', 'argparse', 'asyncio', 'base64', 'collections', 'concurrent', 'contextlib', 'copy', 'datetime', 'decimal', 'email', 'enum', 'functools', 'glob', 'hashlib', 'http', 'importlib', 'io', 'itertools', 'json', 'logging', 'math', 'mimetypes', 'multiprocessing', 'os', 'pathlib', 'pickle', 'platform', 'pprint', 'random', 're', 'shutil', 'signal', 'socket', 'sqlite3', 'string', 'subprocess', 'sys', 'tempfile', 'threading', 'time', 'traceback', 'typing', 'unittest', 'urllib', 'uuid', 'warnings', 'weakref', 'xml', 'zipfile', 'dataclasses', 'secrets', 'statistics', 'textwrap', 'queue', 'csv', 'gzip', 'tarfile', 'configparser', 'inspect', 'operator', 'struct', 'binascii', 'codecs', 'locale', 'gc', 'atexit', 'builtins', 'html', 'errno', 'fcntl', 'pwd', 'grp', 'resource', 'termios', 'tty', 'pty', 'select', 'selectors', 'ssl', 'zlib', 'bz2', 'lzma', 'types', 'copy', 'pydoc', 'profile', 'cProfile', 'timeit', 'trace', 'doctest', 'pdb', 'contextvars', 'dataclasses', 'graphlib', 'zoneinfo', 'tomllib', 'cgi', 'wsgiref', 'fileinput', 'linecache', 'tokenize', 'tabnanny', 'compileall', 'dis', 'pickletools', 'formatter', '__future__', 'array', 'ctypes', 'heapq', 'bisect', 'array', 'weakref', 'types', 'copy', 'pprint', 'repr', 'numbers', 'cmath', 'fractions', 'statistics', 'itertools', 'functools', 'operator', 'pathlib', 'fileinput', 'stat', 'filecmp', 'tempfile', 'glob', 'fnmatch', 'linecache', 'shutil', 'pickle', 'copyreg', 'shelve', 'marshal', 'dbm', 'sqlite3', 'zlib', 'gzip', 'bz2', 'lzma', 'zipfile', 'tarfile', 'configparser', 'netrc', 'xdrlib', 'plistlib', 'hashlib', 'hmac', 'secrets', 'os', 'io', 'time', 'argparse', 'getopt', 'logging', 'getpass', 'curses', 'platform', 'errno', 'ctypes', 'threading', 'multiprocessing', 'concurrent', 'subprocess', 'sched', 'queue', 'contextvars', 'asyncio', 'socket', 'ssl', 'email', 'json', 'mailcap', 'mailbox', 'mimetypes', 'base64', 'binhex', 'binascii', 'quopri', 'uu', 'html', 'xml', 'webbrowser', 'cgi', 'cgitb', 'wsgiref', 'urllib', 'http', 'ftplib', 'poplib', 'imaplib', 'nntplib', 'smtplib', 'smtpd', 'telnetlib', 'uuid', 'socketserver', 'xmlrpc', 'ipaddress', 'audioop', 'aifc', 'sunau', 'wave', 'chunk', 'colorsys', 'imghdr', 'sndhdr', 'ossaudiodev', 'gettext', 'locale', 'turtle', 'cmd', 'shlex', 'tkinter', 'typing', 'pydoc', 'doctest', 'unittest', 'test', '2to3', 'distutils', 'venv', 'ensurepip', 'zipapp', 'py_compile', 'compileall', 'dis', 'pickletools', 'pdb', 'timeit', 'trace', 'tracemalloc', 'warnings', 'faulthandler', 'pdb', 'dataclasses', 'cgi', 'cgitb', 'chunk', 'crypt', 'imghdr', 'mailcap', 'nis', 'nntplib', 'optparse', 'ossaudiodev', 'pipes', 'smtpd', 'sndhdr', 'spwd', 'sunau', 'telnetlib', 'uu', 'xdrlib', 'msilib', 'pstats', 'rlcompleter', 'tkinter', 'ast' } # Known package name mappings (import name -> package name) PACKAGE_MAPPINGS = { 'bs4': 'beautifulsoup4', 'PIL': 'pillow', 'cv2': 'opencv-python', 'sklearn': 'scikit-learn', 'yaml': 'PyYAML', 'OpenSSL': 'pyOpenSSL', 'sqlalchemy': 'SQLAlchemy', 'playwright': 'playwright', 'patchright': 'patchright', 'dotenv': 'python-dotenv', 'fake_useragent': 'fake-useragent', 'playwright_stealth': 'tf-playwright-stealth', 'sentence_transformers': 'sentence-transformers', 'rank_bm25': 'rank-bm25', 'snowballstemmer': 'snowballstemmer', 'pypdf': 'pypdf', 'pdf2image': 'pdf2image', } class ImportVisitor(ast.NodeVisitor): """AST visitor to extract imports from Python files""" def __init__(self): self.imports = {} # Changed to dict to store line numbers self.from_imports = {} def visit_Import(self, node): for alias in node.names: module_name = alias.name.split('.')[0] if module_name not in self.imports: self.imports[module_name] = [] self.imports[module_name].append(node.lineno) def visit_ImportFrom(self, node): if node.module and node.level == 0: # absolute imports only module_name = node.module.split('.')[0] if module_name not in self.from_imports: self.from_imports[module_name] = [] self.from_imports[module_name].append(node.lineno) def extract_imports_from_file(filepath: Path) -> Dict[str, List[int]]: """Extract all imports from a Python file with line numbers""" all_imports = {} try: with open(filepath, 'r', encoding='utf-8') as f: content = f.read() tree = ast.parse(content) visitor = ImportVisitor() visitor.visit(tree) # Merge imports and from_imports for module, lines in visitor.imports.items(): if module not in all_imports: all_imports[module] = [] all_imports[module].extend(lines) for module, lines in visitor.from_imports.items(): if module not in all_imports: all_imports[module] = [] all_imports[module].extend(lines) except Exception as e: # Silently skip files that can't be parsed pass return all_imports def get_codebase_imports_with_files(root_dir: Path) -> Dict[str, List[Tuple[str, List[int]]]]: """Get all imports from the crawl4ai library and docs folders with file locations and line numbers""" import_to_files = defaultdict(list) # Only scan crawl4ai library folder and docs folder target_dirs = [ root_dir / 'crawl4ai', root_dir / 'docs' ] for target_dir in target_dirs: if not target_dir.exists(): continue for py_file in target_dir.rglob('*.py'): # Skip __pycache__ directories if '__pycache__' in py_file.parts: continue # Skip setup.py and similar files if py_file.name in ['setup.py', 'setup.cfg', 'conf.py']: continue imports = extract_imports_from_file(py_file) # Map each import to the file and line numbers for imp, line_numbers in imports.items(): relative_path = py_file.relative_to(root_dir) import_to_files[imp].append((str(relative_path), sorted(line_numbers))) return dict(import_to_files) def get_declared_dependencies() -> Set[str]: """Get declared dependencies from pyproject.toml and requirements.txt""" declared = set() # Read from pyproject.toml if Path('pyproject.toml').exists(): with open('pyproject.toml', 'r') as f: data = toml.load(f) # Get main dependencies deps = data.get('project', {}).get('dependencies', []) for dep in deps: # Parse dependency string (e.g., "numpy>=1.26.0,<3") match = re.match(r'^([a-zA-Z0-9_-]+)', dep) if match: pkg_name = match.group(1).lower() declared.add(pkg_name) # Get optional dependencies optional = data.get('project', {}).get('optional-dependencies', {}) for group, deps in optional.items(): for dep in deps: match = re.match(r'^([a-zA-Z0-9_-]+)', dep) if match: pkg_name = match.group(1).lower() declared.add(pkg_name) # Also check requirements.txt as backup if Path('requirements.txt').exists(): with open('requirements.txt', 'r') as f: for line in f: line = line.strip() if line and not line.startswith('#'): match = re.match(r'^([a-zA-Z0-9_-]+)', line) if match: pkg_name = match.group(1).lower() declared.add(pkg_name) return declared def normalize_package_name(name: str) -> str: """Normalize package name for comparison""" # Handle known mappings first if name in PACKAGE_MAPPINGS: return PACKAGE_MAPPINGS[name].lower() # Basic normalization return name.lower().replace('_', '-') def check_missing_dependencies(): """Main function to check for missing dependencies""" print("🔍 Analyzing crawl4ai library and docs folders...\n") # Get all imports with their file locations root_dir = Path('.') import_to_files = get_codebase_imports_with_files(root_dir) # Get declared dependencies declared_deps = get_declared_dependencies() # Normalize declared dependencies normalized_declared = {normalize_package_name(dep) for dep in declared_deps} # Categorize imports external_imports = {} local_imports = {} # Known local packages local_packages = {'crawl4ai'} for imp, file_info in import_to_files.items(): # Skip standard library if imp in STDLIB_MODULES: continue # Check if it's a local import if any(imp.startswith(local) for local in local_packages): local_imports[imp] = file_info else: external_imports[imp] = file_info # Check which external imports are not declared not_declared = {} declared_imports = {} for imp, file_info in external_imports.items(): normalized_imp = normalize_package_name(imp) # Check if import is covered by declared dependencies found = False for declared in normalized_declared: if normalized_imp == declared or normalized_imp.startswith(declared + '.') or declared.startswith(normalized_imp): found = True break if found: declared_imports[imp] = file_info else: not_declared[imp] = file_info # Print results print(f"📊 Summary:") print(f" - Total unique imports: {len(import_to_files)}") print(f" - External imports: {len(external_imports)}") print(f" - Declared dependencies: {len(declared_deps)}") print(f" - External imports NOT in dependencies: {len(not_declared)}\n") if not_declared: print("❌ External imports NOT declared in pyproject.toml or requirements.txt:\n") # Sort by import name for imp in sorted(not_declared.keys()): file_info = not_declared[imp] print(f" 📦 {imp}") if imp in PACKAGE_MAPPINGS: print(f" → Package name: {PACKAGE_MAPPINGS[imp]}") # Show up to 3 files that use this import for i, (file_path, line_numbers) in enumerate(file_info[:3]): # Format line numbers for clickable output if len(line_numbers) == 1: print(f" - {file_path}:{line_numbers[0]}") else: # Show first few line numbers line_str = ','.join(str(ln) for ln in line_numbers[:3]) if len(line_numbers) > 3: line_str += f"... ({len(line_numbers)} imports)" print(f" - {file_path}: lines {line_str}") if len(file_info) > 3: print(f" ... and {len(file_info) - 3} more files") print() # Check for potentially unused dependencies print("\n🔎 Checking declared dependencies usage...\n") # Get all used external packages used_packages = set() for imp in external_imports.keys(): normalized = normalize_package_name(imp) used_packages.add(normalized) # Find unused unused = [] for dep in declared_deps: normalized_dep = normalize_package_name(dep) # Check if any import uses this dependency found_usage = False for used in used_packages: if used == normalized_dep or used.startswith(normalized_dep) or normalized_dep.startswith(used): found_usage = True break if not found_usage: # Some packages are commonly unused directly indirect_deps = {'wheel', 'setuptools', 'pip', 'colorama', 'certifi', 'packaging', 'urllib3'} if normalized_dep not in indirect_deps: unused.append(dep) if unused: print("⚠️ Declared dependencies with NO imports found:") for dep in sorted(unused): print(f" - {dep}") print("\n Note: These might be used indirectly or by other dependencies") else: print("✅ All declared dependencies have corresponding imports") print("\n" + "="*60) print("💡 How to use this report:") print(" 1. Check each ❌ import to see if it's legitimate") print(" 2. If legitimate, add the package to pyproject.toml") print(" 3. If it's an internal module or typo, fix the import") print(" 4. Review unused dependencies - remove if truly not needed") print("="*60) if __name__ == '__main__': check_missing_dependencies()
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/test_web_crawler.py
tests/test_web_crawler.py
import unittest, os from crawl4ai import LLMConfig from crawl4ai.web_crawler import WebCrawler from crawl4ai.chunking_strategy import ( RegexChunking, FixedLengthWordChunking, SlidingWindowChunking, ) from crawl4ai import ( CosineStrategy, LLMExtractionStrategy, TopicExtractionStrategy, NoExtractionStrategy, ) class TestWebCrawler(unittest.TestCase): def setUp(self): self.crawler = WebCrawler() def test_warmup(self): self.crawler.warmup() self.assertTrue(self.crawler.ready, "WebCrawler failed to warm up") def test_run_default_strategies(self): result = self.crawler.run( url="https://www.nbcnews.com/business", word_count_threshold=5, chunking_strategy=RegexChunking(), extraction_strategy=CosineStrategy(), bypass_cache=True, ) self.assertTrue( result.success, "Failed to crawl and extract using default strategies" ) def test_run_different_strategies(self): url = "https://www.nbcnews.com/business" # Test with FixedLengthWordChunking and LLMExtractionStrategy result = self.crawler.run( url=url, word_count_threshold=5, chunking_strategy=FixedLengthWordChunking(chunk_size=100), extraction_strategy=LLMExtractionStrategy( llm_config=LLMConfig(provider="openai/gpt-3.5-turbo", api_token=os.getenv("OPENAI_API_KEY")) ), bypass_cache=True, ) self.assertTrue( result.success, "Failed to crawl and extract with FixedLengthWordChunking and LLMExtractionStrategy", ) # Test with SlidingWindowChunking and TopicExtractionStrategy result = self.crawler.run( url=url, word_count_threshold=5, chunking_strategy=SlidingWindowChunking(window_size=100, step=50), extraction_strategy=TopicExtractionStrategy(num_keywords=5), bypass_cache=True, ) self.assertTrue( result.success, "Failed to crawl and extract with SlidingWindowChunking and TopicExtractionStrategy", ) def test_invalid_url(self): with self.assertRaises(Exception) as context: self.crawler.run(url="invalid_url", bypass_cache=True) self.assertIn("Invalid URL", str(context.exception)) def test_unsupported_extraction_strategy(self): with self.assertRaises(Exception) as context: self.crawler.run( url="https://www.nbcnews.com/business", extraction_strategy="UnsupportedStrategy", bypass_cache=True, ) self.assertIn("Unsupported extraction strategy", str(context.exception)) def test_invalid_css_selector(self): with self.assertRaises(ValueError) as context: self.crawler.run( url="https://www.nbcnews.com/business", css_selector="invalid_selector", bypass_cache=True, ) self.assertIn("Invalid CSS selector", str(context.exception)) def test_crawl_with_cache_and_bypass_cache(self): url = "https://www.nbcnews.com/business" # First crawl with cache enabled result = self.crawler.run(url=url, bypass_cache=False) self.assertTrue(result.success, "Failed to crawl and cache the result") # Second crawl with bypass_cache=True result = self.crawler.run(url=url, bypass_cache=True) self.assertTrue(result.success, "Failed to bypass cache and fetch fresh data") def test_fetch_multiple_pages(self): urls = ["https://www.nbcnews.com/business", "https://www.bbc.com/news"] results = [] for url in urls: result = self.crawler.run( url=url, word_count_threshold=5, chunking_strategy=RegexChunking(), extraction_strategy=CosineStrategy(), bypass_cache=True, ) results.append(result) self.assertEqual(len(results), 2, "Failed to crawl and extract multiple pages") for result in results: self.assertTrue( result.success, "Failed to crawl and extract a page in the list" ) def test_run_fixed_length_word_chunking_and_no_extraction(self): result = self.crawler.run( url="https://www.nbcnews.com/business", word_count_threshold=5, chunking_strategy=FixedLengthWordChunking(chunk_size=100), extraction_strategy=NoExtractionStrategy(), bypass_cache=True, ) self.assertTrue( result.success, "Failed to crawl and extract with FixedLengthWordChunking and NoExtractionStrategy", ) def test_run_sliding_window_and_no_extraction(self): result = self.crawler.run( url="https://www.nbcnews.com/business", word_count_threshold=5, chunking_strategy=SlidingWindowChunking(window_size=100, step=50), extraction_strategy=NoExtractionStrategy(), bypass_cache=True, ) self.assertTrue( result.success, "Failed to crawl and extract with SlidingWindowChunking and NoExtractionStrategy", ) if __name__ == "__main__": unittest.main()
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/__init__.py
tests/__init__.py
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/test_llm_extraction_parallel_issue_1055.py
tests/test_llm_extraction_parallel_issue_1055.py
""" Final verification test for Issue #1055 fix This test demonstrates that LLM extraction now runs in parallel when using arun_many with multiple URLs. """ import os import sys import time import asyncio grandparent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(grandparent_dir) from crawl4ai import ( AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode, LLMExtractionStrategy, LLMConfig, ) from pydantic import BaseModel class SimpleData(BaseModel): title: str summary: str def print_section(title): print("\n" + "=" * 80) print(title) print("=" * 80 + "\n") async def test_without_llm(): """Baseline: Test crawling without LLM extraction""" print_section("TEST 1: Crawling WITHOUT LLM Extraction") config = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, ) browser_config = BrowserConfig(headless=True, verbose=False) urls = [ "https://www.example.com", "https://www.iana.org", "https://www.wikipedia.org", ] print(f"Crawling {len(urls)} URLs without LLM extraction...") print("Expected: Fast and parallel\n") start_time = time.time() async with AsyncWebCrawler(config=browser_config) as crawler: results = await crawler.arun_many(urls=urls, config=config) duration = time.time() - start_time print(f"\n✅ Completed in {duration:.2f}s") print(f" Successful: {sum(1 for r in results if r.success)}/{len(urls)}") print(f" Average: {duration/len(urls):.2f}s per URL") return duration async def test_with_llm_before_fix(): """Demonstrate the problem: Sequential execution with LLM""" print_section("TEST 2: What Issue #1055 Reported (LLM Sequential Behavior)") print("The issue reported that with LLM extraction, URLs would crawl") print("one after another instead of in parallel.") print("\nWithout our fix, this would show:") print(" - URL 1 fetches → extracts → completes") print(" - URL 2 fetches → extracts → completes") print(" - URL 3 fetches → extracts → completes") print("\nTotal time would be approximately sum of all individual times.") async def test_with_llm_after_fix(): """Demonstrate the fix: Parallel execution with LLM""" print_section("TEST 3: After Fix - LLM Extraction in Parallel") config = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, extraction_strategy=LLMExtractionStrategy( llm_config=LLMConfig(provider="openai/gpt-4o-mini"), schema=SimpleData.model_json_schema(), extraction_type="schema", instruction="Extract title and summary", ) ) browser_config = BrowserConfig(headless=True, verbose=False) urls = [ "https://www.example.com", "https://www.iana.org", "https://www.wikipedia.org", ] print(f"Crawling {len(urls)} URLs WITH LLM extraction...") print("Expected: Parallel execution with our fix\n") completion_times = {} start_time = time.time() async with AsyncWebCrawler(config=browser_config) as crawler: results = await crawler.arun_many(urls=urls, config=config) for result in results: elapsed = time.time() - start_time completion_times[result.url] = elapsed print(f" [{elapsed:5.2f}s] ✓ {result.url[:50]}") duration = time.time() - start_time print(f"\n✅ Total time: {duration:.2f}s") print(f" Successful: {sum(1 for url in urls if url in completion_times)}/{len(urls)}") # Analyze parallelism times = list(completion_times.values()) if len(times) >= 2: # If parallel, completion times should be staggered, not evenly spaced time_diffs = [times[i+1] - times[i] for i in range(len(times)-1)] avg_diff = sum(time_diffs) / len(time_diffs) print(f"\nParallelism Analysis:") print(f" Completion time differences: {[f'{d:.2f}s' for d in time_diffs]}") print(f" Average difference: {avg_diff:.2f}s") # In parallel mode, some tasks complete close together # In sequential mode, they're evenly spaced (avg ~2-3s apart) if avg_diff < duration / len(urls): print(f" ✅ PARALLEL: Tasks completed with overlapping execution") else: print(f" ⚠️ SEQUENTIAL: Tasks completed one after another") return duration async def test_multiple_arun_calls(): """Test multiple individual arun() calls in parallel""" print_section("TEST 4: Multiple arun() Calls with asyncio.gather") config = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, extraction_strategy=LLMExtractionStrategy( llm_config=LLMConfig(provider="openai/gpt-4o-mini"), schema=SimpleData.model_json_schema(), extraction_type="schema", instruction="Extract title and summary", ) ) browser_config = BrowserConfig(headless=True, verbose=False) urls = [ "https://www.example.com", "https://www.iana.org", "https://www.wikipedia.org", ] print(f"Running {len(urls)} arun() calls with asyncio.gather()...") print("Expected: True parallel execution\n") start_time = time.time() async with AsyncWebCrawler(config=browser_config) as crawler: tasks = [crawler.arun(url, config=config) for url in urls] results = await asyncio.gather(*tasks) duration = time.time() - start_time print(f"\n✅ Completed in {duration:.2f}s") print(f" Successful: {sum(1 for r in results if r.success)}/{len(urls)}") print(f" This proves the async LLM extraction works correctly") return duration async def main(): print("\n" + "🚀" * 40) print("ISSUE #1055 FIX VERIFICATION") print("Testing: Sequential → Parallel LLM Extraction") print("🚀" * 40) # Run tests await test_without_llm() await test_with_llm_before_fix() time_with_llm = await test_with_llm_after_fix() time_gather = await test_multiple_arun_calls() # Final summary print_section("FINAL VERDICT") print("✅ Fix Verified!") print("\nWhat changed:") print(" • Created aperform_completion_with_backoff() using litellm.acompletion") print(" • Added arun() method to ExtractionStrategy base class") print(" • Implemented parallel arun() in LLMExtractionStrategy") print(" • Updated AsyncWebCrawler to use arun() when available") print("\nResult:") print(" • LLM extraction now runs in parallel across multiple URLs") print(" • Backward compatible - existing strategies still work") print(" • No breaking changes to the API") print("\n✨ Issue #1055 is RESOLVED!") print("\n" + "=" * 80 + "\n") if __name__ == "__main__": asyncio.run(main())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/test_link_extractor.py
tests/test_link_extractor.py
#!/usr/bin/env python3 """ Test script for Link Extractor functionality """ from crawl4ai.models import Link from crawl4ai import AsyncWebCrawler, CrawlerRunConfig from crawl4ai import LinkPreviewConfig import asyncio import sys import os # Add the crawl4ai directory to the path sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'crawl4ai')) async def test_link_extractor(): """Test the link extractor functionality""" print("🔗 Testing Link Extractor Functionality") print("=" * 50) # Test configuration with link extraction AND scoring enabled config = CrawlerRunConfig( link_preview_config=LinkPreviewConfig( include_internal=True, include_external=False, # Only internal links for this test # No include/exclude patterns for first test - let's see what we get query="API documentation reference guide", score_threshold=0.3, concurrency=5, timeout=10, max_links=5, # Just test with 5 links first verbose=True # Show detailed progress ), score_links=True, # Enable intrinsic link scoring only_text=True, verbose=True ) # Test URLs test_urls = [ "https://docs.python.org/3/", # Python docs - should have many internal links "https://httpbin.org/", # Simple site for testing ] async with AsyncWebCrawler() as crawler: for url in test_urls: print(f"\n🌐 Testing URL: {url}") print("-" * 40) try: result = await crawler.arun(url, config=config) # Debug: Check if link extraction config is being passed print(f"🔍 Debug - Link extraction config: {config.link_preview_config.to_dict() if config.link_preview_config else None}") print(f"🔍 Debug - Score links: {config.score_links}") if result.success: print(f"✅ Crawl successful!") print( f"📄 Page title: {result.metadata.get('title', 'No title')}") # Check links - handle both dict and Links object structure if isinstance(result.links, dict): internal_links = [ Link(**link) for link in result.links.get('internal', [])] external_links = [ Link(**link) for link in result.links.get('external', [])] else: internal_links = result.links.internal external_links = result.links.external print(f"🔗 Found {len(internal_links)} internal links") print(f"🌍 Found {len(external_links)} external links") # Show links with head data links_with_head = [link for link in internal_links + external_links if hasattr(link, 'head_data') and link.head_data] print( f"🧠 Links with head data extracted: {len(links_with_head)}") # Show all score types for all links (first 3) all_links = internal_links + external_links if all_links: print(f"\n🔢 Sample link scores (first 3 links):") for i, link in enumerate(all_links[:3]): print(f"\n {i+1}. {link.href}") # Show intrinsic score if hasattr(link, 'intrinsic_score') and link.intrinsic_score is not None: if link.intrinsic_score == float('inf'): print(f" Intrinsic Score: ∞ (scoring disabled)") else: print(f" Intrinsic Score: {link.intrinsic_score:.2f}/10.0") else: print(f" Intrinsic Score: Not available") # Show contextual score (BM25) if hasattr(link, 'contextual_score') and link.contextual_score is not None: print(f" Contextual Score: {link.contextual_score:.3f}") else: print(f" Contextual Score: Not available") # Show total score if hasattr(link, 'total_score') and link.total_score is not None: print(f" Total Score: {link.total_score:.3f}") else: print(f" Total Score: Not available") print(f" Text: '{link.text[:50]}...' " if link.text else " Text: (no text)") if links_with_head: print("\n📊 Sample links with head data:") # Show top 3 for i, link in enumerate(links_with_head[:3]): print(f"\n {i+1}. {link.href}") print( f" Status: {link.head_extraction_status}") # Show all three score types print(f" 📊 Scoring Summary:") if hasattr(link, 'intrinsic_score') and link.intrinsic_score is not None: if link.intrinsic_score == float('inf'): print(f" • Intrinsic Score: ∞ (scoring disabled)") else: print(f" • Intrinsic Score: {link.intrinsic_score:.2f}/10.0") else: print(f" • Intrinsic Score: Not available") if hasattr(link, 'contextual_score') and link.contextual_score is not None: print(f" • Contextual Score: {link.contextual_score:.3f}") else: print(f" • Contextual Score: Not available") if hasattr(link, 'total_score') and link.total_score is not None: print(f" • Total Score: {link.total_score:.3f}") else: print(f" • Total Score: Not available") if link.head_data: title = link.head_data.get('title', 'No title') if title: print(f" Title: {title[:60]}...") meta = link.head_data.get('meta', {}) if 'description' in meta and meta['description']: desc = meta['description'] print(f" Description: {desc[:80]}...") # Show link metadata keys (should now be properly formatted) link_data = link.head_data.get('link', {}) if link_data: keys = list(link_data.keys())[:3] print(f" Link types: {keys}") # Show failed extractions failed_links = [link for link in internal_links + external_links if hasattr(link, 'head_extraction_status') and link.head_extraction_status == 'failed'] if failed_links: print( f"\n❌ Failed head extractions: {len(failed_links)}") for link in failed_links[:2]: # Show first 2 failures print(f" - {link.href}") if hasattr(link, 'head_extraction_error') and link.head_extraction_error: print( f" Error: {link.head_extraction_error}") else: print(f"❌ Crawl failed: {result.error_message}") except Exception as e: print(f"💥 Error testing {url}: {str(e)}") import traceback traceback.print_exc() def test_config_examples(): """Show example configurations""" print("\n📚 Example Configurations") print("=" * 50) examples = [ { "name": "BM25 Scored Documentation Links", "config": LinkPreviewConfig( include_internal=True, include_external=False, include_patterns=["*/docs/*", "*/api/*", "*/reference/*"], query="API documentation reference guide", score_threshold=0.3, max_links=30, verbose=True ) }, { "name": "Internal Links Only", "config": LinkPreviewConfig( include_internal=True, include_external=False, max_links=50, verbose=True ) }, { "name": "External Links with Patterns", "config": LinkPreviewConfig( include_internal=False, include_external=True, include_patterns=["*github.com*", "*stackoverflow.com*"], max_links=20, concurrency=10 ) }, { "name": "High-Performance Mode", "config": LinkPreviewConfig( include_internal=True, include_external=False, concurrency=20, timeout=3, max_links=100, verbose=False ) } ] for example in examples: print(f"\n📝 {example['name']}:") print(" Configuration:") config_dict = example['config'].to_dict() for key, value in config_dict.items(): print(f" {key}: {value}") print(" Usage:") print(" from crawl4ai import LinkPreviewConfig") print(" config = CrawlerRunConfig(") print(" link_preview_config=LinkPreviewConfig(") for key, value in config_dict.items(): if isinstance(value, str): print(f" {key}='{value}',") elif isinstance(value, list) and value: print(f" {key}={value},") elif value is not None: print(f" {key}={value},") print(" )") print(" )") if __name__ == "__main__": # Show configuration examples first test_config_examples() # Run the actual test print("\n🚀 Running Link Extractor Tests...") asyncio.run(test_link_extractor()) print("\n✨ Test completed!")
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/test_llmtxt.py
tests/test_llmtxt.py
from crawl4ai.llmtxt import AsyncLLMTextManager # Changed to AsyncLLMTextManager from crawl4ai.async_logger import AsyncLogger from pathlib import Path import asyncio async def main(): current_file = Path(__file__).resolve() # base_dir = current_file.parent.parent / "local/_docs/llm.txt/test_docs" base_dir = current_file.parent.parent / "local/_docs/llm.txt" docs_dir = base_dir # Create directory if it doesn't exist docs_dir.mkdir(parents=True, exist_ok=True) # Initialize logger logger = AsyncLogger() # Updated initialization with default batching params # manager = AsyncLLMTextManager(docs_dir, logger, max_concurrent_calls=3, batch_size=2) manager = AsyncLLMTextManager(docs_dir, logger, batch_size=2) # Let's first check what files we have print("\nAvailable files:") for f in docs_dir.glob("*.md"): print(f"- {f.name}") # Generate index files print("\nGenerating index files...") await manager.generate_index_files( force_generate_facts=False, clear_bm25_cache=False ) # Test some relevant queries about Crawl4AI test_queries = [ "How is using the `arun_many` method?", ] print("\nTesting search functionality:") for query in test_queries: print(f"\nQuery: {query}") results = manager.search(query, top_k=2) print(f"Results length: {len(results)} characters") if results: print( "First 200 chars of results:", results[:200].replace("\n", " "), "..." ) else: print("No results found") if __name__ == "__main__": asyncio.run(main())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/test_scraping_strategy.py
tests/test_scraping_strategy.py
import nest_asyncio nest_asyncio.apply() import asyncio from crawl4ai import ( AsyncWebCrawler, CrawlerRunConfig, LXMLWebScrapingStrategy, CacheMode, ) async def main(): config = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, scraping_strategy=LXMLWebScrapingStrategy(), # Faster alternative to default BeautifulSoup ) async with AsyncWebCrawler() as crawler: result = await crawler.arun(url="https://example.com", config=config) print(f"Success: {result.success}") print(f"Markdown length: {len(result.markdown.raw_markdown)}") if __name__ == "__main__": asyncio.run(main())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/test_normalize_url.py
tests/test_normalize_url.py
import unittest from crawl4ai.utils import normalize_url class TestNormalizeUrl(unittest.TestCase): def test_basic_relative_path(self): self.assertEqual(normalize_url("path/to/page.html", "http://example.com/base/"), "http://example.com/base/path/to/page.html") def test_base_url_with_trailing_slash(self): self.assertEqual(normalize_url("page.html", "http://example.com/base/"), "http://example.com/base/page.html") def test_base_url_without_trailing_slash(self): # If normalize_url correctly uses urljoin, "base" is treated as a file. self.assertEqual(normalize_url("page.html", "http://example.com/base"), "http://example.com/page.html") def test_absolute_url_as_href(self): self.assertEqual(normalize_url("http://another.com/page.html", "http://example.com/"), "http://another.com/page.html") def test_href_with_leading_trailing_spaces(self): self.assertEqual(normalize_url(" page.html ", "http://example.com/"), "http://example.com/page.html") def test_empty_href(self): # urljoin with an empty href and base ending in '/' returns the base. self.assertEqual(normalize_url("", "http://example.com/base/"), "http://example.com/base/") # urljoin with an empty href and base not ending in '/' also returns base. self.assertEqual(normalize_url("", "http://example.com/base"), "http://example.com/base") def test_href_with_query_parameters(self): self.assertEqual(normalize_url("page.html?query=test", "http://example.com/"), "http://example.com/page.html?query=test") def test_href_with_fragment(self): self.assertEqual(normalize_url("page.html#section", "http://example.com/"), "http://example.com/page.html#section") def test_different_scheme_in_href(self): self.assertEqual(normalize_url("https://secure.example.com/page.html", "http://example.com/"), "https://secure.example.com/page.html") def test_parent_directory_in_href(self): self.assertEqual(normalize_url("../otherpage.html", "http://example.com/base/current/"), "http://example.com/base/otherpage.html") def test_root_relative_href(self): self.assertEqual(normalize_url("/otherpage.html", "http://example.com/base/current/"), "http://example.com/otherpage.html") def test_base_url_with_path_and_no_trailing_slash(self): # If normalize_url correctly uses urljoin, "path" is treated as a file. self.assertEqual(normalize_url("file.html", "http://example.com/path"), "http://example.com/file.html") def test_base_url_is_just_domain(self): self.assertEqual(normalize_url("page.html", "http://example.com"), "http://example.com/page.html") def test_href_is_only_query(self): self.assertEqual(normalize_url("?query=true", "http://example.com/page.html"), "http://example.com/page.html?query=true") def test_href_is_only_fragment(self): self.assertEqual(normalize_url("#fragment", "http://example.com/page.html"), "http://example.com/page.html#fragment") def test_relative_link_from_base_file_url(self): """ Tests the specific bug report: relative links from a base URL that is a file. Example: Page URL: http://example.com/path/to/document.html Link on page: <a href="./file.xlsx"> Expected: http://example.com/path/to/file.xlsx """ base_url_file = "http://example.com/zwgk/fdzdgk/zdxx/spaq/t19360680.shtml" href_relative_current_dir = "./P020241203375994691134.xlsx" expected_url1 = "http://example.com/zwgk/fdzdgk/zdxx/spaq/P020241203375994691134.xlsx" self.assertEqual(normalize_url(href_relative_current_dir, base_url_file), expected_url1) # Test with a relative link that doesn't start with "./" href_relative_no_dot_slash = "another.doc" expected_url2 = "http://example.com/zwgk/fdzdgk/zdxx/spaq/another.doc" self.assertEqual(normalize_url(href_relative_no_dot_slash, base_url_file), expected_url2) def test_invalid_base_url_scheme(self): with self.assertRaises(ValueError) as context: normalize_url("page.html", "ftp://example.com/") self.assertIn("Invalid base URL format", str(context.exception)) def test_invalid_base_url_netloc(self): with self.assertRaises(ValueError) as context: normalize_url("page.html", "http:///path/") self.assertIn("Invalid base URL format", str(context.exception)) def test_base_url_with_port(self): self.assertEqual(normalize_url("path/file.html", "http://example.com:8080/base/"), "http://example.com:8080/base/path/file.html") def test_href_with_special_characters(self): self.assertEqual(normalize_url("path%20with%20spaces/file.html", "http://example.com/"), "http://example.com/path%20with%20spaces/file.html") if __name__ == '__main__': unittest.main()
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/profiler/test_keyboard_handle.py
tests/profiler/test_keyboard_handle.py
import sys import pytest import asyncio from unittest.mock import patch, MagicMock from crawl4ai.browser_profiler import BrowserProfiler @pytest.mark.asyncio @pytest.mark.skipif(sys.platform != "win32", reason="Windows-specific msvcrt test") async def test_keyboard_input_handling(): # Mock sequence of keystrokes: arrow key followed by 'q' mock_keys = [b'\x00K', b'q'] mock_kbhit = MagicMock(side_effect=[True, True, False]) mock_getch = MagicMock(side_effect=mock_keys) with patch('msvcrt.kbhit', mock_kbhit), patch('msvcrt.getch', mock_getch): # profiler = BrowserProfiler() user_done_event = asyncio.Event() # Create a local async function to simulate the keyboard input handling async def test_listen_for_quit_command(): if sys.platform == "win32": while True: try: if mock_kbhit(): raw = mock_getch() try: key = raw.decode("utf-8") except UnicodeDecodeError: continue if len(key) != 1 or not key.isprintable(): continue if key.lower() == "q": user_done_event.set() return await asyncio.sleep(0.1) except Exception as e: continue # Run the listener listener_task = asyncio.create_task(test_listen_for_quit_command()) # Wait for the event to be set try: await asyncio.wait_for(user_done_event.wait(), timeout=1.0) assert user_done_event.is_set() finally: if not listener_task.done(): listener_task.cancel() try: await listener_task except asyncio.CancelledError: pass
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/profiler/test_create_profile.py
tests/profiler/test_create_profile.py
from crawl4ai import BrowserProfiler import asyncio if __name__ == "__main__": # Example usage profiler = BrowserProfiler() # Create a new profile import os from pathlib import Path home_dir = Path.home() profile_path = asyncio.run(profiler.create_profile( str(home_dir / ".crawl4ai/profiles/test-profile"))) print(f"Profile created at: {profile_path}") # # Launch a standalone browser # asyncio.run(profiler.launch_standalone_browser()) # # List profiles # profiles = profiler.list_profiles() # for profile in profiles: # print(f"Profile: {profile['name']}, Path: {profile['path']}") # # Delete a profile # success = profiler.delete_profile("my-profile") # if success: # print("Profile deleted successfully") # else: # print("Failed to delete profile")
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/proxy/test_proxy_deprecation.py
tests/proxy/test_proxy_deprecation.py
import warnings import pytest from crawl4ai.async_configs import BrowserConfig, ProxyConfig def test_browser_config_proxy_string_emits_deprecation_and_autoconverts(): warnings.simplefilter("always", DeprecationWarning) proxy_str = "23.95.150.145:6114:username:password" with warnings.catch_warnings(record=True) as caught: cfg = BrowserConfig(proxy=proxy_str, headless=True) dep_warnings = [w for w in caught if issubclass(w.category, DeprecationWarning)] assert dep_warnings, "Expected DeprecationWarning when using BrowserConfig(proxy=...)" assert cfg.proxy is None, "cfg.proxy should be None after auto-conversion" assert isinstance(cfg.proxy_config, ProxyConfig), "cfg.proxy_config should be ProxyConfig instance" assert cfg.proxy_config.username == "username" assert cfg.proxy_config.password == "password" assert cfg.proxy_config.server.startswith("http://") assert cfg.proxy_config.server.endswith(":6114") def test_browser_config_with_proxy_config_emits_no_deprecation(): warnings.simplefilter("always", DeprecationWarning) with warnings.catch_warnings(record=True) as caught: cfg = BrowserConfig( headless=True, proxy_config={ "server": "http://127.0.0.1:8080", "username": "u", "password": "p", }, ) dep_warnings = [w for w in caught if issubclass(w.category, DeprecationWarning)] assert not dep_warnings, "Did not expect DeprecationWarning when using proxy_config" assert cfg.proxy is None assert isinstance(cfg.proxy_config, ProxyConfig)
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/proxy/test_proxy_config.py
tests/proxy/test_proxy_config.py
""" Comprehensive test suite for ProxyConfig in different forms: 1. String form (ip:port:username:password) 2. Dict form (dictionary with keys) 3. Object form (ProxyConfig instance) 4. Environment variable form (from env vars) Tests cover all possible scenarios and edge cases using pytest. """ import asyncio import os import pytest import tempfile from unittest.mock import patch from crawl4ai import AsyncWebCrawler, BrowserConfig from crawl4ai.async_configs import CrawlerRunConfig, ProxyConfig from crawl4ai.cache_context import CacheMode class TestProxyConfig: """Comprehensive test suite for ProxyConfig functionality.""" # Test data for different scenarios # get free proxy server from from webshare.io https://www.webshare.io/?referral_code=3sqog0y1fvsl TEST_PROXY_DATA = { "server": "", "username": "", "password": "", "ip": "" } def setup_method(self): """Setup for each test method.""" self.test_url = "https://httpbin.org/ip" # Use httpbin for testing # ==================== OBJECT FORM TESTS ==================== def test_proxy_config_object_creation_basic(self): """Test basic ProxyConfig object creation.""" proxy = ProxyConfig(server="127.0.0.1:8080") assert proxy.server == "127.0.0.1:8080" assert proxy.username is None assert proxy.password is None assert proxy.ip == "127.0.0.1" # Should auto-extract IP def test_proxy_config_object_creation_full(self): """Test ProxyConfig object creation with all parameters.""" proxy = ProxyConfig( server=f"http://{self.TEST_PROXY_DATA['server']}", username=self.TEST_PROXY_DATA['username'], password=self.TEST_PROXY_DATA['password'], ip=self.TEST_PROXY_DATA['ip'] ) assert proxy.server == f"http://{self.TEST_PROXY_DATA['server']}" assert proxy.username == self.TEST_PROXY_DATA['username'] assert proxy.password == self.TEST_PROXY_DATA['password'] assert proxy.ip == self.TEST_PROXY_DATA['ip'] def test_proxy_config_object_ip_extraction(self): """Test automatic IP extraction from server URL.""" test_cases = [ ("http://192.168.1.1:8080", "192.168.1.1"), ("https://10.0.0.1:3128", "10.0.0.1"), ("192.168.1.100:8080", "192.168.1.100"), ("proxy.example.com:8080", "proxy.example.com"), ] for server, expected_ip in test_cases: proxy = ProxyConfig(server=server) assert proxy.ip == expected_ip, f"Failed for server: {server}" def test_proxy_config_object_invalid_server(self): """Test ProxyConfig with invalid server formats.""" # Should not raise exception but may not extract IP properly proxy = ProxyConfig(server="invalid-format") assert proxy.server == "invalid-format" # IP extraction might fail but object should still be created # ==================== DICT FORM TESTS ==================== def test_proxy_config_from_dict_basic(self): """Test creating ProxyConfig from basic dictionary.""" proxy_dict = {"server": "127.0.0.1:8080"} proxy = ProxyConfig.from_dict(proxy_dict) assert proxy.server == "127.0.0.1:8080" assert proxy.username is None assert proxy.password is None def test_proxy_config_from_dict_full(self): """Test creating ProxyConfig from complete dictionary.""" proxy_dict = { "server": f"http://{self.TEST_PROXY_DATA['server']}", "username": self.TEST_PROXY_DATA['username'], "password": self.TEST_PROXY_DATA['password'], "ip": self.TEST_PROXY_DATA['ip'] } proxy = ProxyConfig.from_dict(proxy_dict) assert proxy.server == proxy_dict["server"] assert proxy.username == proxy_dict["username"] assert proxy.password == proxy_dict["password"] assert proxy.ip == proxy_dict["ip"] def test_proxy_config_from_dict_missing_keys(self): """Test creating ProxyConfig from dictionary with missing keys.""" proxy_dict = {"server": "127.0.0.1:8080", "username": "user"} proxy = ProxyConfig.from_dict(proxy_dict) assert proxy.server == "127.0.0.1:8080" assert proxy.username == "user" assert proxy.password is None assert proxy.ip == "127.0.0.1" # Should auto-extract def test_proxy_config_from_dict_empty(self): """Test creating ProxyConfig from empty dictionary.""" proxy_dict = {} proxy = ProxyConfig.from_dict(proxy_dict) assert proxy.server is None assert proxy.username is None assert proxy.password is None assert proxy.ip is None def test_proxy_config_from_dict_none_values(self): """Test creating ProxyConfig from dictionary with None values.""" proxy_dict = { "server": "127.0.0.1:8080", "username": None, "password": None, "ip": None } proxy = ProxyConfig.from_dict(proxy_dict) assert proxy.server == "127.0.0.1:8080" assert proxy.username is None assert proxy.password is None assert proxy.ip == "127.0.0.1" # Should auto-extract despite None # ==================== STRING FORM TESTS ==================== def test_proxy_config_from_string_full_format(self): """Test creating ProxyConfig from full string format (ip:port:username:password).""" proxy_str = f"{self.TEST_PROXY_DATA['ip']}:6114:{self.TEST_PROXY_DATA['username']}:{self.TEST_PROXY_DATA['password']}" proxy = ProxyConfig.from_string(proxy_str) assert proxy.server == f"http://{self.TEST_PROXY_DATA['ip']}:6114" assert proxy.username == self.TEST_PROXY_DATA['username'] assert proxy.password == self.TEST_PROXY_DATA['password'] assert proxy.ip == self.TEST_PROXY_DATA['ip'] def test_proxy_config_from_string_ip_port_only(self): """Test creating ProxyConfig from string with only ip:port.""" proxy_str = "192.168.1.1:8080" proxy = ProxyConfig.from_string(proxy_str) assert proxy.server == "http://192.168.1.1:8080" assert proxy.username is None assert proxy.password is None assert proxy.ip == "192.168.1.1" def test_proxy_config_from_string_invalid_format(self): """Test creating ProxyConfig from invalid string formats.""" invalid_formats = [ "invalid", "ip:port:user", # Missing password (3 parts) "ip:port:user:pass:extra", # Too many parts (5 parts) "", "::", # Empty parts but 3 total (invalid) "::::", # Empty parts but 5 total (invalid) ] for proxy_str in invalid_formats: with pytest.raises(ValueError, match="Invalid proxy string format"): ProxyConfig.from_string(proxy_str) def test_proxy_config_from_string_edge_cases_that_work(self): """Test string formats that should work but might be edge cases.""" # These cases actually work as valid formats edge_cases = [ (":", "http://:", ""), # ip:port format with empty values (":::", "http://:", ""), # ip:port:user:pass format with empty values ] for proxy_str, expected_server, expected_ip in edge_cases: proxy = ProxyConfig.from_string(proxy_str) assert proxy.server == expected_server assert proxy.ip == expected_ip def test_proxy_config_from_string_edge_cases(self): """Test string parsing edge cases.""" # Test with different port numbers proxy_str = "10.0.0.1:3128:user:pass" proxy = ProxyConfig.from_string(proxy_str) assert proxy.server == "http://10.0.0.1:3128" # Test with special characters in credentials proxy_str = "10.0.0.1:8080:user@domain:pass:word" with pytest.raises(ValueError): # Should fail due to extra colon in password ProxyConfig.from_string(proxy_str) # ==================== ENVIRONMENT VARIABLE TESTS ==================== def test_proxy_config_from_env_single_proxy(self): """Test loading single proxy from environment variable.""" proxy_str = f"{self.TEST_PROXY_DATA['ip']}:6114:{self.TEST_PROXY_DATA['username']}:{self.TEST_PROXY_DATA['password']}" with patch.dict(os.environ, {'TEST_PROXIES': proxy_str}): proxies = ProxyConfig.from_env('TEST_PROXIES') assert len(proxies) == 1 proxy = proxies[0] assert proxy.ip == self.TEST_PROXY_DATA['ip'] assert proxy.username == self.TEST_PROXY_DATA['username'] assert proxy.password == self.TEST_PROXY_DATA['password'] def test_proxy_config_from_env_multiple_proxies(self): """Test loading multiple proxies from environment variable.""" proxy_list = [ "192.168.1.1:8080:user1:pass1", "192.168.1.2:8080:user2:pass2", "10.0.0.1:3128" # No auth ] proxy_str = ",".join(proxy_list) with patch.dict(os.environ, {'TEST_PROXIES': proxy_str}): proxies = ProxyConfig.from_env('TEST_PROXIES') assert len(proxies) == 3 # Check first proxy assert proxies[0].ip == "192.168.1.1" assert proxies[0].username == "user1" assert proxies[0].password == "pass1" # Check second proxy assert proxies[1].ip == "192.168.1.2" assert proxies[1].username == "user2" assert proxies[1].password == "pass2" # Check third proxy (no auth) assert proxies[2].ip == "10.0.0.1" assert proxies[2].username is None assert proxies[2].password is None def test_proxy_config_from_env_empty_var(self): """Test loading from empty environment variable.""" with patch.dict(os.environ, {'TEST_PROXIES': ''}): proxies = ProxyConfig.from_env('TEST_PROXIES') assert len(proxies) == 0 def test_proxy_config_from_env_missing_var(self): """Test loading from missing environment variable.""" # Ensure the env var doesn't exist with patch.dict(os.environ, {}, clear=True): proxies = ProxyConfig.from_env('NON_EXISTENT_VAR') assert len(proxies) == 0 def test_proxy_config_from_env_with_empty_entries(self): """Test loading proxies with empty entries in the list.""" proxy_str = "192.168.1.1:8080:user:pass,,10.0.0.1:3128," with patch.dict(os.environ, {'TEST_PROXIES': proxy_str}): proxies = ProxyConfig.from_env('TEST_PROXIES') assert len(proxies) == 2 # Empty entries should be skipped assert proxies[0].ip == "192.168.1.1" assert proxies[1].ip == "10.0.0.1" def test_proxy_config_from_env_with_invalid_entries(self): """Test loading proxies with some invalid entries.""" proxy_str = "192.168.1.1:8080:user:pass,invalid_proxy,10.0.0.1:3128" with patch.dict(os.environ, {'TEST_PROXIES': proxy_str}): # Should handle errors gracefully and return valid proxies proxies = ProxyConfig.from_env('TEST_PROXIES') # Depending on implementation, might return partial list or empty # This tests error handling assert isinstance(proxies, list) # ==================== SERIALIZATION TESTS ==================== def test_proxy_config_to_dict(self): """Test converting ProxyConfig to dictionary.""" proxy = ProxyConfig( server=f"http://{self.TEST_PROXY_DATA['server']}", username=self.TEST_PROXY_DATA['username'], password=self.TEST_PROXY_DATA['password'], ip=self.TEST_PROXY_DATA['ip'] ) result_dict = proxy.to_dict() expected = { "server": f"http://{self.TEST_PROXY_DATA['server']}", "username": self.TEST_PROXY_DATA['username'], "password": self.TEST_PROXY_DATA['password'], "ip": self.TEST_PROXY_DATA['ip'] } assert result_dict == expected def test_proxy_config_clone(self): """Test cloning ProxyConfig with modifications.""" original = ProxyConfig( server="http://127.0.0.1:8080", username="user", password="pass" ) # Clone with modifications cloned = original.clone(username="new_user", password="new_pass") # Original should be unchanged assert original.username == "user" assert original.password == "pass" # Clone should have new values assert cloned.username == "new_user" assert cloned.password == "new_pass" assert cloned.server == original.server # Unchanged value def test_proxy_config_roundtrip_serialization(self): """Test that ProxyConfig can be serialized and deserialized without loss.""" original = ProxyConfig( server=f"http://{self.TEST_PROXY_DATA['server']}", username=self.TEST_PROXY_DATA['username'], password=self.TEST_PROXY_DATA['password'], ip=self.TEST_PROXY_DATA['ip'] ) # Serialize to dict and back serialized = original.to_dict() deserialized = ProxyConfig.from_dict(serialized) assert deserialized.server == original.server assert deserialized.username == original.username assert deserialized.password == original.password assert deserialized.ip == original.ip # ==================== INTEGRATION TESTS ==================== @pytest.mark.asyncio async def test_crawler_with_proxy_config_object(self): """Test AsyncWebCrawler with ProxyConfig object.""" proxy_config = ProxyConfig( server=f"http://{self.TEST_PROXY_DATA['server']}", username=self.TEST_PROXY_DATA['username'], password=self.TEST_PROXY_DATA['password'] ) browser_config = BrowserConfig(headless=True) # Test that the crawler accepts the ProxyConfig object without errors async with AsyncWebCrawler(config=browser_config) as crawler: try: # Note: This might fail due to actual proxy connection, but should not fail due to config issues result = await crawler.arun( url=self.test_url, config=CrawlerRunConfig( cache_mode=CacheMode.BYPASS, proxy_config=proxy_config, page_timeout=10000 # Short timeout for testing ) ) # If we get here, proxy config was accepted assert result is not None except Exception as e: # We expect connection errors with test proxies, but not config errors error_msg = str(e).lower() assert "attribute" not in error_msg, f"Config error: {e}" assert "proxy_config" not in error_msg, f"Proxy config error: {e}" @pytest.mark.asyncio async def test_crawler_with_proxy_config_dict(self): """Test AsyncWebCrawler with ProxyConfig from dictionary.""" proxy_dict = { "server": f"http://{self.TEST_PROXY_DATA['server']}", "username": self.TEST_PROXY_DATA['username'], "password": self.TEST_PROXY_DATA['password'] } proxy_config = ProxyConfig.from_dict(proxy_dict) browser_config = BrowserConfig(headless=True) async with AsyncWebCrawler(config=browser_config) as crawler: try: result = await crawler.arun( url=self.test_url, config=CrawlerRunConfig( cache_mode=CacheMode.BYPASS, proxy_config=proxy_config, page_timeout=10000 ) ) assert result is not None except Exception as e: error_msg = str(e).lower() assert "attribute" not in error_msg, f"Config error: {e}" @pytest.mark.asyncio async def test_crawler_with_proxy_config_from_string(self): """Test AsyncWebCrawler with ProxyConfig from string.""" proxy_str = f"{self.TEST_PROXY_DATA['ip']}:6114:{self.TEST_PROXY_DATA['username']}:{self.TEST_PROXY_DATA['password']}" proxy_config = ProxyConfig.from_string(proxy_str) browser_config = BrowserConfig(headless=True) async with AsyncWebCrawler(config=browser_config) as crawler: try: result = await crawler.arun( url=self.test_url, config=CrawlerRunConfig( cache_mode=CacheMode.BYPASS, proxy_config=proxy_config, page_timeout=10000 ) ) assert result is not None except Exception as e: error_msg = str(e).lower() assert "attribute" not in error_msg, f"Config error: {e}" # ==================== EDGE CASES AND ERROR HANDLING ==================== def test_proxy_config_with_none_server(self): """Test ProxyConfig behavior with None server.""" proxy = ProxyConfig(server=None) assert proxy.server is None assert proxy.ip is None # Should not crash def test_proxy_config_with_empty_string_server(self): """Test ProxyConfig behavior with empty string server.""" proxy = ProxyConfig(server="") assert proxy.server == "" assert proxy.ip is None or proxy.ip == "" def test_proxy_config_special_characters_in_credentials(self): """Test ProxyConfig with special characters in username/password.""" special_chars_tests = [ ("user@domain.com", "pass!@#$%"), ("user_123", "p@ssw0rd"), ("user-test", "pass-word"), ] for username, password in special_chars_tests: proxy = ProxyConfig( server="http://127.0.0.1:8080", username=username, password=password ) assert proxy.username == username assert proxy.password == password def test_proxy_config_unicode_handling(self): """Test ProxyConfig with unicode characters.""" proxy = ProxyConfig( server="http://127.0.0.1:8080", username="ユーザー", # Japanese characters password="пароль" # Cyrillic characters ) assert proxy.username == "ユーザー" assert proxy.password == "пароль" # ==================== PERFORMANCE TESTS ==================== def test_proxy_config_creation_performance(self): """Test that ProxyConfig creation is reasonably fast.""" import time start_time = time.time() for i in range(1000): proxy = ProxyConfig( server=f"http://192.168.1.{i % 255}:8080", username=f"user{i}", password=f"pass{i}" ) end_time = time.time() # Should be able to create 1000 configs in less than 1 second assert (end_time - start_time) < 1.0 def test_proxy_config_from_env_performance(self): """Test that loading many proxies from env is reasonably fast.""" import time # Create a large list of proxy strings proxy_list = [f"192.168.1.{i}:8080:user{i}:pass{i}" for i in range(100)] proxy_str = ",".join(proxy_list) with patch.dict(os.environ, {'PERF_TEST_PROXIES': proxy_str}): start_time = time.time() proxies = ProxyConfig.from_env('PERF_TEST_PROXIES') end_time = time.time() assert len(proxies) == 100 # Should be able to parse 100 proxies in less than 1 second assert (end_time - start_time) < 1.0 # ==================== STANDALONE TEST FUNCTIONS ==================== @pytest.mark.asyncio async def test_dict_proxy(): """Original test function for dict proxy - kept for backward compatibility.""" proxy_config = { "server": "23.95.150.145:6114", "username": "cfyswbwn", "password": "1gs266hoqysi" } proxy_config_obj = ProxyConfig.from_dict(proxy_config) browser_config = BrowserConfig(headless=True) async with AsyncWebCrawler(config=browser_config) as crawler: try: result = await crawler.arun(url="https://httpbin.org/ip", config=CrawlerRunConfig( stream=False, cache_mode=CacheMode.BYPASS, proxy_config=proxy_config_obj, page_timeout=10000 )) print("Dict proxy test passed!") print(result.markdown[:200] if result and result.markdown else "No result") except Exception as e: print(f"Dict proxy test error (expected): {e}") @pytest.mark.asyncio async def test_string_proxy(): """Test function for string proxy format.""" proxy_str = "23.95.150.145:6114:cfyswbwn:1gs266hoqysi" proxy_config_obj = ProxyConfig.from_string(proxy_str) browser_config = BrowserConfig(headless=True) async with AsyncWebCrawler(config=browser_config) as crawler: try: result = await crawler.arun(url="https://httpbin.org/ip", config=CrawlerRunConfig( stream=False, cache_mode=CacheMode.BYPASS, proxy_config=proxy_config_obj, page_timeout=10000 )) print("String proxy test passed!") print(result.markdown[:200] if result and result.markdown else "No result") except Exception as e: print(f"String proxy test error (expected): {e}") @pytest.mark.asyncio async def test_env_proxy(): """Test function for environment variable proxy.""" # Set environment variable os.environ['TEST_PROXIES'] = "23.95.150.145:6114:cfyswbwn:1gs266hoqysi" proxies = ProxyConfig.from_env('TEST_PROXIES') if proxies: proxy_config_obj = proxies[0] # Use first proxy browser_config = BrowserConfig(headless=True) async with AsyncWebCrawler(config=browser_config) as crawler: try: result = await crawler.arun(url="https://httpbin.org/ip", config=CrawlerRunConfig( stream=False, cache_mode=CacheMode.BYPASS, proxy_config=proxy_config_obj, page_timeout=10000 )) print("Environment proxy test passed!") print(result.markdown[:200] if result and result.markdown else "No result") except Exception as e: print(f"Environment proxy test error (expected): {e}") else: print("No proxies loaded from environment") if __name__ == "__main__": print("Running comprehensive ProxyConfig tests...") print("=" * 50) # Run the standalone test functions print("\n1. Testing dict proxy format...") asyncio.run(test_dict_proxy()) print("\n2. Testing string proxy format...") asyncio.run(test_string_proxy()) print("\n3. Testing environment variable proxy format...") asyncio.run(test_env_proxy()) print("\n" + "=" * 50) print("To run the full pytest suite, use: pytest " + __file__) print("=" * 50)
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/docker/test_docker.py
tests/docker/test_docker.py
import requests import time import httpx import asyncio from typing import Dict, Any from crawl4ai import ( BrowserConfig, CrawlerRunConfig, DefaultMarkdownGenerator, PruningContentFilter, JsonCssExtractionStrategy, LLMContentFilter, CacheMode ) from crawl4ai import LLMConfig from crawl4ai.docker_client import Crawl4aiDockerClient class Crawl4AiTester: def __init__(self, base_url: str = "http://localhost:11235"): self.base_url = base_url def submit_and_wait( self, request_data: Dict[str, Any], timeout: int = 300 ) -> Dict[str, Any]: # Submit crawl job response = requests.post(f"{self.base_url}/crawl", json=request_data) task_id = response.json()["task_id"] print(f"Task ID: {task_id}") # Poll for result start_time = time.time() while True: if time.time() - start_time > timeout: raise TimeoutError( f"Task {task_id} did not complete within {timeout} seconds" ) result = requests.get(f"{self.base_url}/task/{task_id}") status = result.json() if status["status"] == "failed": print("Task failed:", status.get("error")) raise Exception(f"Task failed: {status.get('error')}") if status["status"] == "completed": return status time.sleep(2) async def test_direct_api(): """Test direct API endpoints without using the client SDK""" print("\n=== Testing Direct API Calls ===") # Test 1: Basic crawl with content filtering browser_config = BrowserConfig( headless=True, viewport_width=1200, viewport_height=800 ) crawler_config = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, markdown_generator=DefaultMarkdownGenerator( content_filter=PruningContentFilter( threshold=0.48, threshold_type="fixed", min_word_threshold=0 ), options={"ignore_links": True} ) ) request_data = { "urls": ["https://example.com"], "browser_config": browser_config.dump(), "crawler_config": crawler_config.dump() } # Make direct API call async with httpx.AsyncClient() as client: response = await client.post( "http://localhost:11235/crawl", json=request_data, timeout=300 ) assert response.status_code == 200 result = response.json() print("Basic crawl result:", result["success"]) # Test 2: Structured extraction with JSON CSS schema = { "baseSelector": "article.post", "fields": [ {"name": "title", "selector": "h1", "type": "text"}, {"name": "content", "selector": ".content", "type": "html"} ] } crawler_config = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, extraction_strategy=JsonCssExtractionStrategy(schema=schema) ) request_data["crawler_config"] = crawler_config.dump() async with httpx.AsyncClient() as client: response = await client.post( "http://localhost:11235/crawl", json=request_data ) assert response.status_code == 200 result = response.json() print("Structured extraction result:", result["success"]) # Test 3: Raw HTML request_data["urls"] = ["raw://<html><body><h1>Hello, World!</h1><a href='https://example.com'>Example</a></body></html>"] async with httpx.AsyncClient() as client: response = await client.post( "http://localhost:11235/crawl", json=request_data ) assert response.status_code == 200 result = response.json() print("Raw HTML result:", result["success"]) # Test 3: Get schema # async with httpx.AsyncClient() as client: # response = await client.get("http://localhost:8000/schema") # assert response.status_code == 200 # schemas = response.json() # print("Retrieved schemas for:", list(schemas.keys())) async def test_with_client(): """Test using the Crawl4AI Docker client SDK""" print("\n=== Testing Client SDK ===") async with Crawl4aiDockerClient(base_url="http://localhost:11235", verbose=True) as client: # Test 1: Basic crawl browser_config = BrowserConfig(headless=True) crawler_config = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, markdown_generator=DefaultMarkdownGenerator( content_filter=PruningContentFilter( threshold=0.48, threshold_type="fixed" ) ) ) result = await client.crawl( urls=["https://example.com"], browser_config=browser_config, crawler_config=crawler_config ) print("Client SDK basic crawl:", result.success) # Test 2: LLM extraction with streaming crawler_config = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, markdown_generator=DefaultMarkdownGenerator( content_filter=LLMContentFilter( llm_config=LLMConfig(provider="openai/gpt-40"), instruction="Extract key technical concepts" ) ), stream=True ) async for result in await client.crawl( urls=["https://example.com"], browser_config=browser_config, crawler_config=crawler_config ): print(f"Streaming result for: {result.url}") # # Test 3: Get schema # schemas = await client.get_schema() # print("Retrieved client schemas for:", list(schemas.keys())) async def main(): """Run all tests""" # Test direct API print("Testing direct API calls...") await test_direct_api() # Test client SDK print("\nTesting client SDK...") await test_with_client() if __name__ == "__main__": asyncio.run(main())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/docker/test_hooks_client.py
tests/docker/test_hooks_client.py
#!/usr/bin/env python3 """ Test client for demonstrating user-provided hooks in Crawl4AI Docker API """ import requests import json from typing import Dict, Any API_BASE_URL = "http://localhost:11234" # Adjust if needed def test_hooks_info(): """Get information about available hooks""" print("=" * 70) print("Testing: GET /hooks/info") print("=" * 70) response = requests.get(f"{API_BASE_URL}/hooks/info") if response.status_code == 200: data = response.json() print("Available Hook Points:") for hook, info in data['available_hooks'].items(): print(f"\n{hook}:") print(f" Parameters: {', '.join(info['parameters'])}") print(f" Description: {info['description']}") else: print(f"Error: {response.status_code}") print(response.text) def test_basic_crawl_with_hooks(): """Test basic crawling with user-provided hooks""" print("\n" + "=" * 70) print("Testing: POST /crawl with hooks") print("=" * 70) # Define hooks as Python code strings hooks_code = { "on_page_context_created": """ async def hook(page, context, **kwargs): print("Hook: Setting up page context") # Block images to speed up crawling await context.route("**/*.{png,jpg,jpeg,gif,webp}", lambda route: route.abort()) print("Hook: Images blocked") return page """, "before_retrieve_html": """ async def hook(page, context, **kwargs): print("Hook: Before retrieving HTML") # Scroll to bottom to load lazy content await page.evaluate("window.scrollTo(0, document.body.scrollHeight)") await page.wait_for_timeout(1000) print("Hook: Scrolled to bottom") return page """, "before_goto": """ async def hook(page, context, url, **kwargs): print(f"Hook: About to navigate to {url}") # Add custom headers await page.set_extra_http_headers({ 'X-Test-Header': 'crawl4ai-hooks-test' }) return page """ } # Create request payload payload = { "urls": ["https://httpbin.org/html"], "hooks": { "code": hooks_code, "timeout": 30 } } print("Sending request with hooks...") response = requests.post(f"{API_BASE_URL}/crawl", json=payload) if response.status_code == 200: data = response.json() print("\n✅ Crawl successful!") # Check hooks status if 'hooks' in data: hooks_info = data['hooks'] print("\nHooks Execution Summary:") print(f" Status: {hooks_info['status']['status']}") print(f" Attached hooks: {', '.join(hooks_info['status']['attached_hooks'])}") if hooks_info['status']['validation_errors']: print("\n⚠️ Validation Errors:") for error in hooks_info['status']['validation_errors']: print(f" - {error['hook_point']}: {error['error']}") if 'summary' in hooks_info: summary = hooks_info['summary'] print(f"\nExecution Statistics:") print(f" Total executions: {summary['total_executions']}") print(f" Successful: {summary['successful']}") print(f" Failed: {summary['failed']}") print(f" Timed out: {summary['timed_out']}") print(f" Success rate: {summary['success_rate']:.1f}%") if hooks_info['execution_log']: print("\nExecution Log:") for log_entry in hooks_info['execution_log']: status_icon = "✅" if log_entry['status'] == 'success' else "❌" print(f" {status_icon} {log_entry['hook_point']}: {log_entry['status']} ({log_entry.get('execution_time', 0):.2f}s)") if hooks_info['errors']: print("\n❌ Hook Errors:") for error in hooks_info['errors']: print(f" - {error['hook_point']}: {error['error']}") # Show crawl results if 'results' in data: print(f"\nCrawled {len(data['results'])} URL(s)") for result in data['results']: print(f" - {result['url']}: {'✅' if result['success'] else '❌'}") else: print(f"❌ Error: {response.status_code}") print(response.text) def test_invalid_hook(): """Test with an invalid hook to see error handling""" print("\n" + "=" * 70) print("Testing: Invalid hook handling") print("=" * 70) # Intentionally broken hook hooks_code = { "on_page_context_created": """ def hook(page, context): # Missing async! return page """, "before_retrieve_html": """ async def hook(page, context, **kwargs): # This will cause an error await page.non_existent_method() return page """ } payload = { "urls": ["https://httpbin.org/html"], "hooks": { "code": hooks_code, "timeout": 5 } } print("Sending request with invalid hooks...") response = requests.post(f"{API_BASE_URL}/crawl", json=payload) if response.status_code == 200: data = response.json() if 'hooks' in data: hooks_info = data['hooks'] print(f"\nHooks Status: {hooks_info['status']['status']}") if hooks_info['status']['validation_errors']: print("\n✅ Validation caught errors (as expected):") for error in hooks_info['status']['validation_errors']: print(f" - {error['hook_point']}: {error['error']}") if hooks_info['errors']: print("\n✅ Runtime errors handled gracefully:") for error in hooks_info['errors']: print(f" - {error['hook_point']}: {error['error']}") # The crawl should still succeed despite hook errors if data.get('success'): print("\n✅ Crawl succeeded despite hook errors (error isolation working!)") else: print(f"Error: {response.status_code}") print(response.text) def test_authentication_hook(): """Test authentication using hooks""" print("\n" + "=" * 70) print("Testing: Authentication with hooks") print("=" * 70) hooks_code = { "before_goto": """ async def hook(page, context, url, **kwargs): # For httpbin.org basic auth test, set Authorization header import base64 # httpbin.org/basic-auth/user/passwd expects username="user" and password="passwd" credentials = base64.b64encode(b"user:passwd").decode('ascii') await page.set_extra_http_headers({ 'Authorization': f'Basic {credentials}' }) print(f"Hook: Set Authorization header for {url}") return page """, "on_page_context_created": """ async def hook(page, context, **kwargs): # Example: Add cookies for session tracking await context.add_cookies([ { 'name': 'session_id', 'value': 'test_session_123', 'domain': '.httpbin.org', 'path': '/', 'httpOnly': True, 'secure': True } ]) print("Hook: Added session cookie") return page """ } payload = { "urls": ["https://httpbin.org/basic-auth/user/passwd"], "hooks": { "code": hooks_code, "timeout": 30 } } print("Sending request with authentication hook...") response = requests.post(f"{API_BASE_URL}/crawl", json=payload) if response.status_code == 200: data = response.json() if data.get('success'): print("✅ Crawl with authentication hook successful") # Check if hooks executed if 'hooks' in data: hooks_info = data['hooks'] if hooks_info.get('summary', {}).get('successful', 0) > 0: print(f"✅ Authentication hooks executed: {hooks_info['summary']['successful']} successful") # Check for any hook errors if hooks_info.get('errors'): print("⚠️ Hook errors:") for error in hooks_info['errors']: print(f" - {error}") # Check if authentication worked by looking at the result if 'results' in data and len(data['results']) > 0: result = data['results'][0] if result.get('success'): print("✅ Page crawled successfully (authentication worked!)") # httpbin.org/basic-auth returns JSON with authenticated=true when successful if 'authenticated' in str(result.get('html', '')): print("✅ Authentication confirmed in response content") else: print(f"❌ Crawl failed: {result.get('error_message', 'Unknown error')}") else: print("❌ Request failed") print(f"Response: {json.dumps(data, indent=2)}") else: print(f"❌ Error: {response.status_code}") try: error_data = response.json() print(f"Error details: {json.dumps(error_data, indent=2)}") except: print(f"Error text: {response.text[:500]}") def test_streaming_with_hooks(): """Test streaming endpoint with hooks""" print("\n" + "=" * 70) print("Testing: POST /crawl/stream with hooks") print("=" * 70) hooks_code = { "before_retrieve_html": """ async def hook(page, context, **kwargs): await page.evaluate("document.querySelectorAll('img').forEach(img => img.remove())") return page """ } payload = { "urls": ["https://httpbin.org/html", "https://httpbin.org/json"], "hooks": { "code": hooks_code, "timeout": 10 } } print("Sending streaming request with hooks...") with requests.post(f"{API_BASE_URL}/crawl/stream", json=payload, stream=True) as response: if response.status_code == 200: # Check headers for hooks status hooks_status = response.headers.get('X-Hooks-Status') if hooks_status: print(f"Hooks Status (from header): {hooks_status}") print("\nStreaming results:") for line in response.iter_lines(): if line: try: result = json.loads(line) if 'url' in result: print(f" Received: {result['url']}") elif 'status' in result: print(f" Stream status: {result['status']}") except json.JSONDecodeError: print(f" Raw: {line.decode()}") else: print(f"Error: {response.status_code}") def test_basic_without_hooks(): """Test basic crawl without hooks""" print("\n" + "=" * 70) print("Testing: POST /crawl with no hooks") print("=" * 70) payload = { "urls": ["https://httpbin.org/html", "https://httpbin.org/json"] } response = requests.post(f"{API_BASE_URL}/crawl", json=payload) if response.status_code == 200: data = response.json() print(f"Response: {json.dumps(data, indent=2)}") else: print(f"Error: {response.status_code}") def main(): """Run all tests""" print("🔧 Crawl4AI Docker API - Hooks Testing") print("=" * 70) # Test 1: Get hooks information # test_hooks_info() # Test 2: Basic crawl with hooks # test_basic_crawl_with_hooks() # Test 3: Invalid hooks (error handling) test_invalid_hook() # # Test 4: Authentication hook # test_authentication_hook() # # Test 5: Streaming with hooks # test_streaming_with_hooks() # # Test 6: Basic crawl without hooks # test_basic_without_hooks() print("\n" + "=" * 70) print("✅ All tests completed!") print("=" * 70) if __name__ == "__main__": main()
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/docker/test_server_requests.py
tests/docker/test_server_requests.py
import pytest import pytest_asyncio import httpx import json import asyncio import os from typing import List, Dict, Any, AsyncGenerator from dotenv import load_dotenv load_dotenv() # Optional: Import crawl4ai classes directly for reference/easier payload creation aid # You don't strictly NEED these imports for the tests to run against the server, # but they help in understanding the structure you are mimicking in JSON. from crawl4ai import ( BrowserConfig, CrawlerRunConfig, CacheMode, DefaultMarkdownGenerator, PruningContentFilter, BM25ContentFilter, BFSDeepCrawlStrategy, FilterChain, ContentTypeFilter, DomainFilter, CompositeScorer, KeywordRelevanceScorer, PathDepthScorer, JsonCssExtractionStrategy, LLMExtractionStrategy, LLMConfig ) # --- Test Configuration --- # BASE_URL = os.getenv("CRAWL4AI_TEST_URL", "http://localhost:8020") # Make base URL configurable BASE_URL = os.getenv("CRAWL4AI_TEST_URL", "http://localhost:11235") # Make base URL configurable # Use a known simple HTML page for basic tests SIMPLE_HTML_URL = "https://httpbin.org/html" # Use a site suitable for scraping tests SCRAPE_TARGET_URL = "http://books.toscrape.com/" # Use a site with internal links for deep crawl tests DEEP_CRAWL_URL = "https://python.org" # --- Pytest Fixtures --- # Use the built-in event_loop fixture from pytest_asyncio # The custom implementation was causing issues with closing the loop @pytest_asyncio.fixture(scope="function") # Changed to function scope to avoid event loop issues async def async_client() -> AsyncGenerator[httpx.AsyncClient, None]: """Provides an async HTTP client""" client = httpx.AsyncClient(base_url=BASE_URL, timeout=120.0) yield client await client.aclose() # --- Helper Functions --- async def check_server_health(client: httpx.AsyncClient): """Check if the server is healthy before running tests.""" try: response = await client.get("/health") response.raise_for_status() print(f"\nServer healthy: {response.json()}") return True except (httpx.RequestError, httpx.HTTPStatusError) as e: pytest.fail(f"Server health check failed: {e}. Is the server running at {BASE_URL}?", pytrace=False) async def assert_crawl_result_structure(result: Dict[str, Any]): """Asserts the basic structure of a single crawl result.""" assert isinstance(result, dict) assert "url" in result assert "success" in result assert "html" in result # Add more common checks if needed async def process_streaming_response(response: httpx.Response) -> List[Dict[str, Any]]: """Processes an NDJSON streaming response.""" results = [] completed = False async for line in response.aiter_lines(): if line: try: data = json.loads(line) if data.get("status") == "completed": completed = True break # Stop processing after completion marker else: results.append(data) except json.JSONDecodeError: pytest.fail(f"Failed to decode JSON line: {line}") assert completed, "Streaming response did not end with a completion marker." return results # --- Test Class --- @pytest.mark.asyncio class TestCrawlEndpoints: @pytest_asyncio.fixture(autouse=True) async def check_health_before_tests(self, async_client: httpx.AsyncClient): """Fixture to ensure server is healthy before each test in the class.""" await check_server_health(async_client) # 1. Simple Requests (Primitives) async def test_simple_crawl_single_url(self, async_client: httpx.AsyncClient): """Test /crawl with a single URL and simple config values.""" payload = { "urls": [SIMPLE_HTML_URL], "browser_config": { "type": "BrowserConfig", "params": { "headless": True, } }, "crawler_config": { "type": "CrawlerRunConfig", "params": { "stream": False, # Explicitly false for /crawl "screenshot": False, "cache_mode": CacheMode.BYPASS.value # Use enum value } } } try: response = await async_client.post("/crawl", json=payload) print(f"Response status: {response.status_code}") response.raise_for_status() data = response.json() except httpx.HTTPStatusError as e: print(f"Server error: {e}") print(f"Response content: {e.response.text}") raise assert data["success"] is True assert isinstance(data["results"], list) assert len(data["results"]) == 1 result = data["results"][0] await assert_crawl_result_structure(result) assert result["success"] is True assert result["url"] == SIMPLE_HTML_URL assert "<h1>Herman Melville - Moby-Dick</h1>" in result["html"] # We don't specify a markdown generator in this test, so don't make assumptions about markdown field # It might be null, missing, or populated depending on the server's default behavior async def test_crawl_with_stream_direct(self, async_client: httpx.AsyncClient): """Test that /crawl endpoint handles stream=True directly without redirect.""" payload = { "urls": [SIMPLE_HTML_URL], "browser_config": { "type": "BrowserConfig", "params": { "headless": True, } }, "crawler_config": { "type": "CrawlerRunConfig", "params": { "stream": True, # Set stream to True for direct streaming "screenshot": False, "cache_mode": CacheMode.BYPASS.value } } } # Send a request to the /crawl endpoint - should handle streaming directly async with async_client.stream("POST", "/crawl", json=payload) as response: assert response.status_code == 200 assert response.headers["content-type"] == "application/x-ndjson" assert response.headers.get("x-stream-status") == "active" results = await process_streaming_response(response) assert len(results) == 1 result = results[0] await assert_crawl_result_structure(result) assert result["success"] is True assert result["url"] == SIMPLE_HTML_URL assert "<h1>Herman Melville - Moby-Dick</h1>" in result["html"] async def test_simple_crawl_single_url_streaming(self, async_client: httpx.AsyncClient): """Test /crawl/stream with a single URL and simple config values.""" payload = { "urls": [SIMPLE_HTML_URL], "browser_config": { "type": "BrowserConfig", "params": { "headless": True, } }, "crawler_config": { "type": "CrawlerRunConfig", "params": { "stream": True, # Must be true for /crawl/stream "screenshot": False, "cache_mode": CacheMode.BYPASS.value } } } async with async_client.stream("POST", "/crawl/stream", json=payload) as response: response.raise_for_status() results = await process_streaming_response(response) assert len(results) == 1 result = results[0] await assert_crawl_result_structure(result) assert result["success"] is True assert result["url"] == SIMPLE_HTML_URL assert "<h1>Herman Melville - Moby-Dick</h1>" in result["html"] # 2. Multi-URL and Dispatcher async def test_multi_url_crawl(self, async_client: httpx.AsyncClient): """Test /crawl with multiple URLs, implicitly testing dispatcher.""" urls = [SIMPLE_HTML_URL, "https://httpbin.org/links/10/0"] payload = { "urls": urls, "browser_config": { "type": "BrowserConfig", "params": {"headless": True} }, "crawler_config": { "type": "CrawlerRunConfig", "params": {"stream": False, "cache_mode": CacheMode.BYPASS.value} } } try: print(f"Sending deep crawl request to server...") response = await async_client.post("/crawl", json=payload) print(f"Response status: {response.status_code}") if response.status_code >= 400: error_detail = response.json().get('detail', 'No detail provided') print(f"Error detail: {error_detail}") print(f"Full response: {response.text}") response.raise_for_status() data = response.json() except httpx.HTTPStatusError as e: print(f"Server error status: {e.response.status_code}") print(f"Server error response: {e.response.text}") try: error_json = e.response.json() print(f"Parsed error: {error_json}") except: print("Could not parse error response as JSON") raise assert data["success"] is True assert isinstance(data["results"], list) assert len(data["results"]) == len(urls) for result in data["results"]: await assert_crawl_result_structure(result) assert result["success"] is True assert result["url"] in urls async def test_multi_url_crawl_streaming(self, async_client: httpx.AsyncClient): """Test /crawl/stream with multiple URLs.""" urls = [SIMPLE_HTML_URL, "https://httpbin.org/links/10/0"] payload = { "urls": urls, "browser_config": { "type": "BrowserConfig", "params": {"headless": True} }, "crawler_config": { "type": "CrawlerRunConfig", "params": {"stream": True, "cache_mode": CacheMode.BYPASS.value} } } async with async_client.stream("POST", "/crawl/stream", json=payload) as response: response.raise_for_status() results = await process_streaming_response(response) assert len(results) == len(urls) processed_urls = set() for result in results: await assert_crawl_result_structure(result) assert result["success"] is True assert result["url"] in urls processed_urls.add(result["url"]) assert processed_urls == set(urls) # Ensure all URLs were processed # 3. Class Values and Nested Classes (Markdown Generator) async def test_crawl_with_markdown_pruning_filter(self, async_client: httpx.AsyncClient): """Test /crawl with MarkdownGenerator using PruningContentFilter.""" payload = { "urls": [SIMPLE_HTML_URL], "browser_config": {"type": "BrowserConfig", "params": {"headless": True}}, "crawler_config": { "type": "CrawlerRunConfig", "params": { "cache_mode": CacheMode.ENABLED.value, # Test different cache mode "markdown_generator": { "type": "DefaultMarkdownGenerator", "params": { "content_filter": { "type": "PruningContentFilter", "params": { "threshold": 0.5, # Example param "threshold_type": "relative" } } } } } } } try: print(f"Sending deep crawl request to server...") response = await async_client.post("/crawl", json=payload) print(f"Response status: {response.status_code}") if response.status_code >= 400: error_detail = response.json().get('detail', 'No detail provided') print(f"Error detail: {error_detail}") print(f"Full response: {response.text}") response.raise_for_status() data = response.json() except httpx.HTTPStatusError as e: print(f"Server error status: {e.response.status_code}") print(f"Server error response: {e.response.text}") try: error_json = e.response.json() print(f"Parsed error: {error_json}") except: print("Could not parse error response as JSON") raise assert data["success"] is True assert len(data["results"]) == 1 result = data["results"][0] await assert_crawl_result_structure(result) assert result["success"] is True assert "markdown" in result assert isinstance(result["markdown"], dict) assert "raw_markdown" in result["markdown"] assert "fit_markdown" in result["markdown"] # Pruning creates fit_markdown assert "Moby-Dick" in result["markdown"]["raw_markdown"] # Fit markdown content might be different/shorter due to pruning assert len(result["markdown"]["fit_markdown"]) <= len(result["markdown"]["raw_markdown"]) async def test_crawl_with_markdown_bm25_filter(self, async_client: httpx.AsyncClient): """Test /crawl with MarkdownGenerator using BM25ContentFilter.""" payload = { "urls": [SIMPLE_HTML_URL], "browser_config": {"type": "BrowserConfig", "params": {"headless": True}}, "crawler_config": { "type": "CrawlerRunConfig", "params": { "markdown_generator": { "type": "DefaultMarkdownGenerator", "params": { "content_filter": { "type": "BM25ContentFilter", "params": { "user_query": "Herman Melville", # Query for BM25 "bm25_threshold": 0.1, # Lower threshold to increase matches "language": "english" # Valid parameters } } } } } } } try: print(f"Payload for BM25 test: {json.dumps(payload)}") response = await async_client.post("/crawl", json=payload) print(f"Response status: {response.status_code}") if response.status_code >= 400: error_detail = response.json().get('detail', 'No detail provided') print(f"Error detail: {error_detail}") print(f"Full response: {response.text}") response.raise_for_status() data = response.json() except httpx.HTTPStatusError as e: print(f"Server error status: {e.response.status_code}") print(f"Server error response: {e.response.text}") try: error_json = e.response.json() print(f"Parsed error: {error_json}") except: print("Could not parse error response as JSON") raise assert data["success"] is True assert len(data["results"]) == 1 result = data["results"][0] await assert_crawl_result_structure(result) assert result["success"] is True assert "markdown" in result assert isinstance(result["markdown"], dict) assert "raw_markdown" in result["markdown"] assert "fit_markdown" in result["markdown"] # BM25 creates fit_markdown # Print values for debug print(f"Raw markdown length: {len(result['markdown']['raw_markdown'])}") print(f"Fit markdown length: {len(result['markdown']['fit_markdown'])}") # Either fit_markdown has content (possibly including our query terms) # or it might be empty if no good BM25 matches were found # Don't assert specific content since it can be environment-dependent # 4. Deep Crawling async def test_deep_crawl(self, async_client: httpx.AsyncClient): """Test /crawl with a deep crawl strategy.""" payload = { "urls": [DEEP_CRAWL_URL], # Start URL "browser_config": {"type": "BrowserConfig", "params": {"headless": True}}, "crawler_config": { "type": "CrawlerRunConfig", "params": { "stream": False, "cache_mode": CacheMode.BYPASS.value, "deep_crawl_strategy": { "type": "BFSDeepCrawlStrategy", "params": { "max_depth": 1, # Limit depth for testing speed "max_pages": 5, # Limit pages to crawl "filter_chain": { "type": "FilterChain", "params": { "filters": [ { "type": "ContentTypeFilter", "params": {"allowed_types": ["text/html"]} }, { "type": "DomainFilter", "params": {"allowed_domains": ["python.org", "docs.python.org"]} # Include important subdomains } ] } }, "url_scorer": { "type": "CompositeScorer", "params": { "scorers": [ { "type": "KeywordRelevanceScorer", "params": {"keywords": ["documentation", "tutorial"]} }, { "type": "PathDepthScorer", "params": {"weight": 0.5, "optimal_depth": 2} } ] } } } } } } } try: print(f"Sending deep crawl request to server...") response = await async_client.post("/crawl", json=payload) print(f"Response status: {response.status_code}") if response.status_code >= 400: error_detail = response.json().get('detail', 'No detail provided') print(f"Error detail: {error_detail}") print(f"Full response: {response.text}") response.raise_for_status() data = response.json() except httpx.HTTPStatusError as e: print(f"Server error status: {e.response.status_code}") print(f"Server error response: {e.response.text}") try: error_json = e.response.json() print(f"Parsed error: {error_json}") except: print("Could not parse error response as JSON") raise assert data["success"] is True assert isinstance(data["results"], list) # Expect more than 1 result due to deep crawl (start URL + crawled links) assert len(data["results"]) > 1 assert len(data["results"]) <= 6 # Start URL + max_links=5 start_url_found = False crawled_urls_found = False for result in data["results"]: await assert_crawl_result_structure(result) assert result["success"] is True # Print URL for debugging print(f"Crawled URL: {result['url']}") # Allow URLs that contain python.org (including subdomains like docs.python.org) assert "python.org" in result["url"] if result["url"] == DEEP_CRAWL_URL: start_url_found = True else: crawled_urls_found = True assert start_url_found assert crawled_urls_found # 5. Extraction without LLM (JSON/CSS) async def test_json_css_extraction(self, async_client: httpx.AsyncClient): """Test /crawl with JsonCssExtractionStrategy.""" payload = { "urls": [SCRAPE_TARGET_URL], "browser_config": {"type": "BrowserConfig", "params": {"headless": True}}, "crawler_config": { "type": "CrawlerRunConfig", "params": { "cache_mode": CacheMode.BYPASS.value, "extraction_strategy": { "type": "JsonCssExtractionStrategy", "params": { "schema": { "type": "dict", # IMPORTANT: Wrap schema dict with type/value structure "value": { "name": "BookList", "baseSelector": "ol.row li.col-xs-6", # Select each book item "fields": [ {"name": "title", "selector": "article.product_pod h3 a", "type": "attribute", "attribute": "title"}, {"name": "price", "selector": "article.product_pod .price_color", "type": "text"}, {"name": "rating", "selector": "article.product_pod p.star-rating", "type": "attribute", "attribute": "class"} ] } } } } } } } try: print(f"Sending deep crawl request to server...") response = await async_client.post("/crawl", json=payload) print(f"Response status: {response.status_code}") if response.status_code >= 400: error_detail = response.json().get('detail', 'No detail provided') print(f"Error detail: {error_detail}") print(f"Full response: {response.text}") response.raise_for_status() data = response.json() except httpx.HTTPStatusError as e: print(f"Server error status: {e.response.status_code}") print(f"Server error response: {e.response.text}") try: error_json = e.response.json() print(f"Parsed error: {error_json}") except: print("Could not parse error response as JSON") raise assert data["success"] is True assert len(data["results"]) == 1 result = data["results"][0] await assert_crawl_result_structure(result) assert result["success"] is True assert "extracted_content" in result assert result["extracted_content"] is not None # Extracted content should be a JSON string representing a list of dicts try: extracted_data = json.loads(result["extracted_content"]) assert isinstance(extracted_data, list) assert len(extracted_data) > 0 # Should find some books # Check structure of the first extracted item first_item = extracted_data[0] assert "title" in first_item assert "price" in first_item assert "rating" in first_item assert "star-rating" in first_item["rating"] # e.g., "star-rating Three" except (json.JSONDecodeError, AssertionError) as e: pytest.fail(f"Extracted content parsing or validation failed: {e}\nContent: {result['extracted_content']}") # 6. Extraction with LLM async def test_llm_extraction(self, async_client: httpx.AsyncClient): """ Test /crawl with LLMExtractionStrategy. NOTE: Requires the server to have appropriate LLM API keys (e.g., OPENAI_API_KEY) configured via .llm.env or environment variables. This test uses the default provider configured in the server's config.yml. """ payload = { "urls": [SIMPLE_HTML_URL], "browser_config": {"type": "BrowserConfig", "params": {"headless": True}}, "crawler_config": { "type": "CrawlerRunConfig", "params": { "cache_mode": CacheMode.BYPASS.value, "extraction_strategy": { "type": "LLMExtractionStrategy", "params": { "instruction": "Extract the main title and the author mentioned in the text into JSON.", # LLMConfig is implicitly defined by server's config.yml and .llm.env # If you needed to override provider/token PER REQUEST: "llm_config": { "type": "LLMConfig", "params": { "provider": "openai/gpt-4o", # Example override "api_token": os.getenv("OPENAI_API_KEY") # Example override } }, "schema": { # Optional: Provide a schema for structured output "type": "dict", # IMPORTANT: Wrap schema dict "value": { "title": "Book Info", "type": "object", "properties": { "title": {"type": "string", "description": "The main title of the work"}, "author": {"type": "string", "description": "The author of the work"} }, "required": ["title", "author"] } } } } } } } try: response = await async_client.post("/crawl", json=payload) response.raise_for_status() # Will raise if server returns 500 (e.g., bad API key) data = response.json() except httpx.HTTPStatusError as e: # Catch potential server errors (like 500 due to missing/invalid API keys) pytest.fail(f"LLM extraction request failed: {e}. Response: {e.response.text}. Check server logs and ensure API keys are correctly configured for the server.") except httpx.RequestError as e: pytest.fail(f"LLM extraction request failed: {e}.") assert data["success"] is True assert len(data["results"]) == 1 result = data["results"][0] await assert_crawl_result_structure(result) assert result["success"] is True assert "extracted_content" in result assert result["extracted_content"] is not None # Extracted content should be JSON (because we provided a schema) try: extracted_data = json.loads(result["extracted_content"]) print(f"\nLLM Extracted Data: {extracted_data}") # Print for verification # Handle both dict and list formats (server returns a list) if isinstance(extracted_data, list): assert len(extracted_data) > 0 extracted_item = extracted_data[0] # Take first item assert isinstance(extracted_item, dict) assert "title" in extracted_item assert "author" in extracted_item assert "Moby-Dick" in extracted_item.get("title", "") assert "Herman Melville" in extracted_item.get("author", "") else: assert isinstance(extracted_data, dict) assert "title" in extracted_data assert "author" in extracted_data assert "Moby-Dick" in extracted_data.get("title", "") assert "Herman Melville" in extracted_data.get("author", "") except (json.JSONDecodeError, AssertionError) as e: pytest.fail(f"LLM extracted content parsing or validation failed: {e}\nContent: {result['extracted_content']}") except Exception as e: # Catch any other unexpected error pytest.fail(f"An unexpected error occurred during LLM result processing: {e}\nContent: {result['extracted_content']}") # 7. Error Handling Tests async def test_invalid_url_handling(self, async_client: httpx.AsyncClient): """Test error handling for invalid URLs.""" payload = { "urls": ["invalid-url", "https://nonexistent-domain-12345.com"], "browser_config": {"type": "BrowserConfig", "params": {"headless": True}}, "crawler_config": {"type": "CrawlerRunConfig", "params": {"cache_mode": CacheMode.BYPASS.value}} } response = await async_client.post("/crawl", json=payload) # Should return 200 with failed results, not 500 print(f"Status code: {response.status_code}") print(f"Response: {response.text}") assert response.status_code == 500 data = response.json() assert data["detail"].startswith("Crawl request failed:") async def test_mixed_success_failure_urls(self, async_client: httpx.AsyncClient): """Test handling of mixed success/failure URLs.""" payload = { "urls": [ SIMPLE_HTML_URL, # Should succeed "https://nonexistent-domain-12345.com", # Should fail "https://invalid-url-with-special-chars-!@#$%^&*()", # Should fail ], "browser_config": {"type": "BrowserConfig", "params": {"headless": True}}, "crawler_config": { "type": "CrawlerRunConfig", "params": { "cache_mode": CacheMode.BYPASS.value, "markdown_generator": { "type": "DefaultMarkdownGenerator", "params": { "content_filter": { "type": "PruningContentFilter", "params": {"threshold": 0.5} } } } } } } response = await async_client.post("/crawl", json=payload) assert response.status_code == 200 data = response.json() assert data["success"] is True assert len(data["results"]) == 3 success_count = 0 failure_count = 0 for result in data["results"]: if result["success"]: success_count += 1 else: failure_count += 1 assert "error_message" in result assert len(result["error_message"]) > 0 assert success_count >= 1 # At least one should succeed assert failure_count >= 1 # At least one should fail async def test_streaming_mixed_urls(self, async_client: httpx.AsyncClient): """Test streaming with mixed success/failure URLs.""" payload = { "urls": [ SIMPLE_HTML_URL, # Should succeed "https://nonexistent-domain-12345.com", # Should fail ], "browser_config": {"type": "BrowserConfig", "params": {"headless": True}}, "crawler_config": { "type": "CrawlerRunConfig", "params": { "stream": True,
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
true
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/docker/test_hooks_comprehensive.py
tests/docker/test_hooks_comprehensive.py
#!/usr/bin/env python3 """ Comprehensive test demonstrating all hook types from hooks_example.py adapted for the Docker API with real URLs """ import requests import json import time from typing import Dict, Any API_BASE_URL = "http://localhost:11234" def test_all_hooks_demo(): """Demonstrate all 8 hook types with practical examples""" print("=" * 70) print("Testing: All Hooks Comprehensive Demo") print("=" * 70) hooks_code = { "on_browser_created": """ async def hook(browser, **kwargs): # Hook called after browser is created print("[HOOK] on_browser_created - Browser is ready!") # Browser-level configurations would go here return browser """, "on_page_context_created": """ async def hook(page, context, **kwargs): # Hook called after a new page and context are created print("[HOOK] on_page_context_created - New page created!") # Set viewport size for consistent rendering await page.set_viewport_size({"width": 1920, "height": 1080}) # Add cookies for the session (using httpbin.org domain) await context.add_cookies([ { "name": "test_session", "value": "abc123xyz", "domain": ".httpbin.org", "path": "/", "httpOnly": True, "secure": True } ]) # Block ads and tracking scripts to speed up crawling await context.route("**/*.{png,jpg,jpeg,gif,webp,svg}", lambda route: route.abort()) await context.route("**/analytics/*", lambda route: route.abort()) await context.route("**/ads/*", lambda route: route.abort()) print("[HOOK] Viewport set, cookies added, and ads blocked") return page """, "on_user_agent_updated": """ async def hook(page, context, user_agent, **kwargs): # Hook called when user agent is updated print(f"[HOOK] on_user_agent_updated - User agent: {user_agent[:50]}...") return page """, "before_goto": """ async def hook(page, context, url, **kwargs): # Hook called before navigating to each URL print(f"[HOOK] before_goto - About to visit: {url}") # Add custom headers for the request await page.set_extra_http_headers({ "X-Custom-Header": "crawl4ai-test", "Accept-Language": "en-US,en;q=0.9", "DNT": "1" }) return page """, "after_goto": """ async def hook(page, context, url, response, **kwargs): # Hook called after navigating to each URL print(f"[HOOK] after_goto - Successfully loaded: {url}") # Wait a moment for dynamic content to load await page.wait_for_timeout(1000) # Check if specific elements exist (with error handling) try: # For httpbin.org, wait for body element await page.wait_for_selector("body", timeout=2000) print("[HOOK] Body element found and loaded") except: print("[HOOK] Timeout waiting for body, continuing anyway") return page """, "on_execution_started": """ async def hook(page, context, **kwargs): # Hook called after custom JavaScript execution print("[HOOK] on_execution_started - Custom JS executed!") # You could inject additional JavaScript here if needed await page.evaluate("console.log('[INJECTED] Hook JS running');") return page """, "before_retrieve_html": """ async def hook(page, context, **kwargs): # Hook called before retrieving the HTML content print("[HOOK] before_retrieve_html - Preparing to get HTML") # Scroll to bottom to trigger lazy loading await page.evaluate("window.scrollTo(0, document.body.scrollHeight);") await page.wait_for_timeout(500) # Scroll back to top await page.evaluate("window.scrollTo(0, 0);") await page.wait_for_timeout(500) # One more scroll to middle for good measure await page.evaluate("window.scrollTo(0, document.body.scrollHeight / 2);") print("[HOOK] Scrolling completed for lazy-loaded content") return page """, "before_return_html": """ async def hook(page, context, html, **kwargs): # Hook called before returning the HTML content print(f"[HOOK] before_return_html - HTML length: {len(html)} characters") # Log some page metrics metrics = await page.evaluate('''() => { return { images: document.images.length, links: document.links.length, scripts: document.scripts.length } }''') print(f"[HOOK] Page metrics - Images: {metrics['images']}, Links: {metrics['links']}, Scripts: {metrics['scripts']}") return page """ } # Create request payload payload = { "urls": ["https://httpbin.org/html"], "hooks": { "code": hooks_code, "timeout": 30 }, "crawler_config": { "js_code": "window.scrollTo(0, document.body.scrollHeight);", "wait_for": "body", "cache_mode": "bypass" } } print("\nSending request with all 8 hooks...") start_time = time.time() response = requests.post(f"{API_BASE_URL}/crawl", json=payload) elapsed_time = time.time() - start_time print(f"Request completed in {elapsed_time:.2f} seconds") if response.status_code == 200: data = response.json() print("\n✅ Request successful!") # Check hooks execution if 'hooks' in data: hooks_info = data['hooks'] print("\n📊 Hooks Execution Summary:") print(f" Status: {hooks_info['status']['status']}") print(f" Attached hooks: {len(hooks_info['status']['attached_hooks'])}") for hook_name in hooks_info['status']['attached_hooks']: print(f" ✓ {hook_name}") if 'summary' in hooks_info: summary = hooks_info['summary'] print(f"\n📈 Execution Statistics:") print(f" Total executions: {summary['total_executions']}") print(f" Successful: {summary['successful']}") print(f" Failed: {summary['failed']}") print(f" Timed out: {summary['timed_out']}") print(f" Success rate: {summary['success_rate']:.1f}%") if hooks_info.get('execution_log'): print(f"\n📝 Execution Log:") for log_entry in hooks_info['execution_log']: status_icon = "✅" if log_entry['status'] == 'success' else "❌" exec_time = log_entry.get('execution_time', 0) print(f" {status_icon} {log_entry['hook_point']}: {exec_time:.3f}s") # Check crawl results if 'results' in data and len(data['results']) > 0: print(f"\n📄 Crawl Results:") for result in data['results']: print(f" URL: {result['url']}") print(f" Success: {result.get('success', False)}") if result.get('html'): print(f" HTML length: {len(result['html'])} characters") else: print(f"❌ Error: {response.status_code}") try: error_data = response.json() print(f"Error details: {json.dumps(error_data, indent=2)}") except: print(f"Error text: {response.text[:500]}") def test_authentication_flow(): """Test a complete authentication flow with multiple hooks""" print("\n" + "=" * 70) print("Testing: Authentication Flow with Multiple Hooks") print("=" * 70) hooks_code = { "on_page_context_created": """ async def hook(page, context, **kwargs): print("[HOOK] Setting up authentication context") # Add authentication cookies await context.add_cookies([ { "name": "auth_token", "value": "fake_jwt_token_here", "domain": ".httpbin.org", "path": "/", "httpOnly": True, "secure": True } ]) # Set localStorage items (for SPA authentication) await page.evaluate(''' localStorage.setItem('user_id', '12345'); localStorage.setItem('auth_time', new Date().toISOString()); ''') return page """, "before_goto": """ async def hook(page, context, url, **kwargs): print(f"[HOOK] Adding auth headers for {url}") # Add Authorization header import base64 credentials = base64.b64encode(b"user:passwd").decode('ascii') await page.set_extra_http_headers({ 'Authorization': f'Basic {credentials}', 'X-API-Key': 'test-api-key-123' }) return page """ } payload = { "urls": [ "https://httpbin.org/basic-auth/user/passwd" ], "hooks": { "code": hooks_code, "timeout": 15 } } print("\nTesting authentication with httpbin endpoints...") response = requests.post(f"{API_BASE_URL}/crawl", json=payload) if response.status_code == 200: data = response.json() print("✅ Authentication test completed") if 'results' in data: for i, result in enumerate(data['results']): print(f"\n URL {i+1}: {result['url']}") if result.get('success'): # Check for authentication success indicators html_content = result.get('html', '') if '"authenticated"' in html_content and 'true' in html_content: print(" ✅ Authentication successful! Basic auth worked.") else: print(" ⚠️ Page loaded but auth status unclear") else: print(f" ❌ Failed: {result.get('error_message', 'Unknown error')}") else: print(f"❌ Error: {response.status_code}") def test_performance_optimization_hooks(): """Test hooks for performance optimization""" print("\n" + "=" * 70) print("Testing: Performance Optimization Hooks") print("=" * 70) hooks_code = { "on_page_context_created": """ async def hook(page, context, **kwargs): print("[HOOK] Optimizing page for performance") # Block resource-heavy content await context.route("**/*.{png,jpg,jpeg,gif,webp,svg,ico}", lambda route: route.abort()) await context.route("**/*.{woff,woff2,ttf,otf}", lambda route: route.abort()) await context.route("**/*.{mp4,webm,ogg,mp3,wav}", lambda route: route.abort()) await context.route("**/googletagmanager.com/*", lambda route: route.abort()) await context.route("**/google-analytics.com/*", lambda route: route.abort()) await context.route("**/doubleclick.net/*", lambda route: route.abort()) await context.route("**/facebook.com/*", lambda route: route.abort()) # Disable animations and transitions await page.add_style_tag(content=''' *, *::before, *::after { animation-duration: 0s !important; animation-delay: 0s !important; transition-duration: 0s !important; transition-delay: 0s !important; } ''') print("[HOOK] Performance optimizations applied") return page """, "before_retrieve_html": """ async def hook(page, context, **kwargs): print("[HOOK] Removing unnecessary elements before extraction") # Remove ads, popups, and other unnecessary elements await page.evaluate('''() => { // Remove common ad containers const adSelectors = [ '.ad', '.ads', '.advertisement', '[id*="ad-"]', '[class*="ad-"]', '.popup', '.modal', '.overlay', '.cookie-banner', '.newsletter-signup' ]; adSelectors.forEach(selector => { document.querySelectorAll(selector).forEach(el => el.remove()); }); // Remove script tags to clean up HTML document.querySelectorAll('script').forEach(el => el.remove()); // Remove style tags we don't need document.querySelectorAll('style').forEach(el => el.remove()); }''') return page """ } payload = { "urls": ["https://httpbin.org/html"], "hooks": { "code": hooks_code, "timeout": 10 } } print("\nTesting performance optimization hooks...") start_time = time.time() response = requests.post(f"{API_BASE_URL}/crawl", json=payload) elapsed_time = time.time() - start_time print(f"Request completed in {elapsed_time:.2f} seconds") if response.status_code == 200: data = response.json() print("✅ Performance optimization test completed") if 'results' in data and len(data['results']) > 0: result = data['results'][0] if result.get('html'): print(f" HTML size: {len(result['html'])} characters") print(" Resources blocked, ads removed, animations disabled") else: print(f"❌ Error: {response.status_code}") def test_content_extraction_hooks(): """Test hooks for intelligent content extraction""" print("\n" + "=" * 70) print("Testing: Content Extraction Hooks") print("=" * 70) hooks_code = { "after_goto": """ async def hook(page, context, url, response, **kwargs): print(f"[HOOK] Waiting for dynamic content on {url}") # Wait for any lazy-loaded content await page.wait_for_timeout(2000) # Trigger any "Load More" buttons try: load_more = await page.query_selector('[class*="load-more"], [class*="show-more"], button:has-text("Load More")') if load_more: await load_more.click() await page.wait_for_timeout(1000) print("[HOOK] Clicked 'Load More' button") except: pass return page """, "before_retrieve_html": """ async def hook(page, context, **kwargs): print("[HOOK] Extracting structured data") # Extract metadata metadata = await page.evaluate('''() => { const getMeta = (name) => { const element = document.querySelector(`meta[name="${name}"], meta[property="${name}"]`); return element ? element.getAttribute('content') : null; }; return { title: document.title, description: getMeta('description') || getMeta('og:description'), author: getMeta('author'), keywords: getMeta('keywords'), ogTitle: getMeta('og:title'), ogImage: getMeta('og:image'), canonical: document.querySelector('link[rel="canonical"]')?.href, jsonLd: Array.from(document.querySelectorAll('script[type="application/ld+json"]')) .map(el => el.textContent).filter(Boolean) }; }''') print(f"[HOOK] Extracted metadata: {json.dumps(metadata, indent=2)}") # Infinite scroll handling for i in range(3): await page.evaluate("window.scrollTo(0, document.body.scrollHeight);") await page.wait_for_timeout(1000) print(f"[HOOK] Scroll iteration {i+1}/3") return page """ } payload = { "urls": ["https://httpbin.org/html", "https://httpbin.org/json"], "hooks": { "code": hooks_code, "timeout": 20 } } print("\nTesting content extraction hooks...") response = requests.post(f"{API_BASE_URL}/crawl", json=payload) if response.status_code == 200: data = response.json() print("✅ Content extraction test completed") if 'hooks' in data and 'summary' in data['hooks']: summary = data['hooks']['summary'] print(f" Hooks executed: {summary['successful']}/{summary['total_executions']}") if 'results' in data: for result in data['results']: print(f"\n URL: {result['url']}") print(f" Success: {result.get('success', False)}") else: print(f"❌ Error: {response.status_code}") def main(): """Run comprehensive hook tests""" print("🔧 Crawl4AI Docker API - Comprehensive Hooks Testing") print("Based on docs/examples/hooks_example.py") print("=" * 70) tests = [ ("All Hooks Demo", test_all_hooks_demo), ("Authentication Flow", test_authentication_flow), ("Performance Optimization", test_performance_optimization_hooks), ("Content Extraction", test_content_extraction_hooks), ] for i, (name, test_func) in enumerate(tests, 1): print(f"\n📌 Test {i}/{len(tests)}: {name}") try: test_func() print(f"✅ {name} completed") except Exception as e: print(f"❌ {name} failed: {e}") import traceback traceback.print_exc() print("\n" + "=" * 70) print("🎉 All comprehensive hook tests completed!") print("=" * 70) if __name__ == "__main__": main()
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/docker/simple_api_test.py
tests/docker/simple_api_test.py
#!/usr/bin/env python3 """ Simple API Test for Crawl4AI Docker Server v0.7.0 Uses only built-in Python modules to test all endpoints. """ import urllib.request import urllib.parse import json import time import sys from typing import Dict, List, Optional # Configuration BASE_URL = "http://localhost:11234" # Change to your server URL TEST_TIMEOUT = 30 class SimpleApiTester: def __init__(self, base_url: str = BASE_URL): self.base_url = base_url self.token = None self.results = [] def log(self, message: str): print(f"[INFO] {message}") def test_get_endpoint(self, endpoint: str) -> Dict: """Test a GET endpoint""" url = f"{self.base_url}{endpoint}" start_time = time.time() try: req = urllib.request.Request(url) if self.token: req.add_header('Authorization', f'Bearer {self.token}') with urllib.request.urlopen(req, timeout=TEST_TIMEOUT) as response: response_time = time.time() - start_time status_code = response.getcode() content = response.read().decode('utf-8') # Try to parse JSON try: data = json.loads(content) except: data = {"raw_response": content[:200]} return { "endpoint": endpoint, "method": "GET", "status": "PASS" if status_code < 400 else "FAIL", "status_code": status_code, "response_time": response_time, "data": data } except Exception as e: response_time = time.time() - start_time return { "endpoint": endpoint, "method": "GET", "status": "FAIL", "status_code": None, "response_time": response_time, "error": str(e) } def test_post_endpoint(self, endpoint: str, payload: Dict) -> Dict: """Test a POST endpoint""" url = f"{self.base_url}{endpoint}" start_time = time.time() try: data = json.dumps(payload).encode('utf-8') req = urllib.request.Request(url, data=data, method='POST') req.add_header('Content-Type', 'application/json') if self.token: req.add_header('Authorization', f'Bearer {self.token}') with urllib.request.urlopen(req, timeout=TEST_TIMEOUT) as response: response_time = time.time() - start_time status_code = response.getcode() content = response.read().decode('utf-8') # Try to parse JSON try: data = json.loads(content) except: data = {"raw_response": content[:200]} return { "endpoint": endpoint, "method": "POST", "status": "PASS" if status_code < 400 else "FAIL", "status_code": status_code, "response_time": response_time, "data": data } except Exception as e: response_time = time.time() - start_time return { "endpoint": endpoint, "method": "POST", "status": "FAIL", "status_code": None, "response_time": response_time, "error": str(e) } def print_result(self, result: Dict): """Print a formatted test result""" status_color = { "PASS": "✅", "FAIL": "❌", "SKIP": "⏭️" } print(f"{status_color[result['status']]} {result['method']} {result['endpoint']} " f"| {result['response_time']:.3f}s | Status: {result['status_code'] or 'N/A'}") if result['status'] == 'FAIL' and 'error' in result: print(f" Error: {result['error']}") self.results.append(result) def run_all_tests(self): """Run all API tests""" print("🚀 Starting Crawl4AI v0.7.0 API Test Suite") print(f"📡 Testing server at: {self.base_url}") print("=" * 60) # # Test basic endpoints # print("\n=== BASIC ENDPOINTS ===") # # Health check # result = self.test_get_endpoint("/health") # self.print_result(result) # # Schema endpoint # result = self.test_get_endpoint("/schema") # self.print_result(result) # # Metrics endpoint # result = self.test_get_endpoint("/metrics") # self.print_result(result) # # Root redirect # result = self.test_get_endpoint("/") # self.print_result(result) # # Test authentication # print("\n=== AUTHENTICATION ===") # # Get token # token_payload = {"email": "test@example.com"} # result = self.test_post_endpoint("/token", token_payload) # self.print_result(result) # # Extract token if successful # if result['status'] == 'PASS' and 'data' in result: # token = result['data'].get('access_token') # if token: # self.token = token # self.log(f"Successfully obtained auth token: {token[:20]}...") # Test core APIs print("\n=== CORE APIs ===") test_url = "https://example.com" test_raw_html_url = "raw://<html><body><h1>Hello, World!</h1></body></html>" # Test markdown endpoint md_payload = { "url": test_url, "f": "fit", "q": "test query", "c": "0" } result = self.test_post_endpoint("/md", md_payload) # print(result['data'].get('markdown', '')) self.print_result(result) # Test markdown endpoint with raw HTML raw_md_payload = { "url": test_raw_html_url, "f": "fit", "q": "test query", "c": "0" } result = self.test_post_endpoint("/md", raw_md_payload) self.print_result(result) # Test HTML endpoint html_payload = {"url": test_url} result = self.test_post_endpoint("/html", html_payload) self.print_result(result) # Test screenshot endpoint screenshot_payload = { "url": test_url, "screenshot_wait_for": 2 } result = self.test_post_endpoint("/screenshot", screenshot_payload) self.print_result(result) # Test PDF endpoint pdf_payload = {"url": test_url} result = self.test_post_endpoint("/pdf", pdf_payload) self.print_result(result) # Test JavaScript execution js_payload = { "url": test_url, "scripts": ["(() => document.title)()"] } result = self.test_post_endpoint("/execute_js", js_payload) self.print_result(result) # Test crawl endpoint crawl_payload = { "urls": [test_url], "browser_config": {}, "crawler_config": {} } result = self.test_post_endpoint("/crawl", crawl_payload) self.print_result(result) # Test crawl endpoint with raw HTML crawl_payload = { "urls": [test_raw_html_url], "browser_config": {}, "crawler_config": {} } result = self.test_post_endpoint("/crawl", crawl_payload) self.print_result(result) # Test config dump config_payload = {"code": "CrawlerRunConfig()"} result = self.test_post_endpoint("/config/dump", config_payload) self.print_result(result) # Test LLM endpoint llm_endpoint = f"/llm/{test_url}?q=Extract%20main%20content" result = self.test_get_endpoint(llm_endpoint) self.print_result(result) # Test ask endpoint ask_endpoint = "/ask?context_type=all&query=crawl4ai&max_results=5" result = self.test_get_endpoint(ask_endpoint) print(result) self.print_result(result) # Test job APIs print("\n=== JOB APIs ===") # Test LLM job llm_job_payload = { "url": test_url, "q": "Extract main content", "cache": False } result = self.test_post_endpoint("/llm/job", llm_job_payload) self.print_result(result) # Test crawl job crawl_job_payload = { "urls": [test_url], "browser_config": {}, "crawler_config": {} } result = self.test_post_endpoint("/crawl/job", crawl_job_payload) self.print_result(result) # Test MCP print("\n=== MCP APIs ===") # Test MCP schema result = self.test_get_endpoint("/mcp/schema") self.print_result(result) # Test error handling print("\n=== ERROR HANDLING ===") # Test invalid URL invalid_payload = {"url": "invalid-url", "f": "fit"} result = self.test_post_endpoint("/md", invalid_payload) self.print_result(result) # Test invalid endpoint result = self.test_get_endpoint("/nonexistent") self.print_result(result) # Print summary self.print_summary() def print_summary(self): """Print test results summary""" print("\n" + "=" * 60) print("📊 TEST RESULTS SUMMARY") print("=" * 60) total = len(self.results) passed = sum(1 for r in self.results if r['status'] == 'PASS') failed = sum(1 for r in self.results if r['status'] == 'FAIL') print(f"Total Tests: {total}") print(f"✅ Passed: {passed}") print(f"❌ Failed: {failed}") print(f"📈 Success Rate: {(passed/total)*100:.1f}%") if failed > 0: print("\n❌ FAILED TESTS:") for result in self.results: if result['status'] == 'FAIL': print(f" • {result['method']} {result['endpoint']}") if 'error' in result: print(f" Error: {result['error']}") # Performance statistics response_times = [r['response_time'] for r in self.results if r['response_time'] > 0] if response_times: avg_time = sum(response_times) / len(response_times) max_time = max(response_times) print(f"\n⏱️ Average Response Time: {avg_time:.3f}s") print(f"⏱️ Max Response Time: {max_time:.3f}s") # Save detailed report report_file = f"crawl4ai_test_report_{int(time.time())}.json" with open(report_file, 'w') as f: json.dump({ "timestamp": time.time(), "server_url": self.base_url, "version": "0.7.0", "summary": { "total": total, "passed": passed, "failed": failed }, "results": self.results }, f, indent=2) print(f"\n📄 Detailed report saved to: {report_file}") def main(): """Main test runner""" import argparse parser = argparse.ArgumentParser(description='Crawl4AI v0.7.0 API Test Suite') parser.add_argument('--url', default=BASE_URL, help='Base URL of the server') args = parser.parse_args() tester = SimpleApiTester(args.url) try: tester.run_all_tests() except KeyboardInterrupt: print("\n🛑 Test suite interrupted by user") except Exception as e: print(f"\n💥 Test suite failed with error: {e}") sys.exit(1) if __name__ == "__main__": main()
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/docker/test_server.py
tests/docker/test_server.py
import asyncio import json from typing import Optional from urllib.parse import quote async def test_endpoint( endpoint: str, url: str, params: Optional[dict] = None, expected_status: int = 200 ) -> None: """Test an endpoint and print results""" import aiohttp params = params or {} param_str = "&".join(f"{k}={v}" for k, v in params.items()) full_url = f"http://localhost:8000/{endpoint}/{quote(url)}" if param_str: full_url += f"?{param_str}" print(f"\nTesting: {full_url}") try: async with aiohttp.ClientSession() as session: async with session.get(full_url) as response: status = response.status try: data = await response.json() except: data = await response.text() print(f"Status: {status} (Expected: {expected_status})") if isinstance(data, dict): print(f"Response: {json.dumps(data, indent=2)}") else: print(f"Response: {data[:500]}...") # First 500 chars assert status == expected_status return data except Exception as e: print(f"Error: {str(e)}") return None async def test_llm_task_completion(task_id: str) -> None: """Poll task until completion""" for _ in range(10): # Try 10 times result = await test_endpoint("llm", task_id) if result and result.get("status") in ["completed", "failed"]: return result print("Task still processing, waiting 5 seconds...") await asyncio.sleep(5) print("Task timed out") async def run_tests(): print("Starting API Tests...") # Test URLs urls = [ "example.com", "https://www.python.org", "https://news.ycombinator.com/news", "https://github.com/trending" ] print("\n=== Testing Markdown Endpoint ===") for url in[] : #urls: # Test different filter types for filter_type in ["raw", "fit", "bm25", "llm"]: params = {"f": filter_type} if filter_type in ["bm25", "llm"]: params["q"] = "extract main content" # Test with and without cache for cache in ["0", "1"]: params["c"] = cache await test_endpoint("md", url, params) await asyncio.sleep(1) # Be nice to the server print("\n=== Testing LLM Endpoint ===") for url in []: # urls: # Test basic extraction result = await test_endpoint( "llm", url, {"q": "Extract title and main content"} ) if result and "task_id" in result: print("\nChecking task completion...") await test_llm_task_completion(result["task_id"]) # Test with schema schema = { "type": "object", "properties": { "title": {"type": "string"}, "content": {"type": "string"}, "links": {"type": "array", "items": {"type": "string"}} } } result = await test_endpoint( "llm", url, { "q": "Extract content with links", "s": json.dumps(schema), "c": "1" # Test with cache } ) if result and "task_id" in result: print("\nChecking schema task completion...") await test_llm_task_completion(result["task_id"]) await asyncio.sleep(2) # Be nice to the server print("\n=== Testing Error Cases ===") # Test invalid URL await test_endpoint( "md", "not_a_real_url", expected_status=500 ) # Test invalid filter type await test_endpoint( "md", "example.com", {"f": "invalid"}, expected_status=422 ) # Test LLM without query await test_endpoint( "llm", "example.com" ) # Test invalid task ID await test_endpoint( "llm", "llm_invalid_task", expected_status=404 ) print("\nAll tests completed!") if __name__ == "__main__": asyncio.run(run_tests())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/docker/test_config_object.py
tests/docker/test_config_object.py
import json from crawl4ai import ( CrawlerRunConfig, DefaultMarkdownGenerator, RegexChunking, JsonCssExtractionStrategy, BM25ContentFilter, CacheMode ) from crawl4ai.deep_crawling import BFSDeepCrawlStrategy from crawl4ai.deep_crawling.filters import FastFilterChain from crawl4ai.deep_crawling.filters import FastContentTypeFilter, FastDomainFilter from crawl4ai.deep_crawling.scorers import FastKeywordRelevanceScorer def create_test_config() -> CrawlerRunConfig: # Set up content filtering and markdown generation content_filter = BM25ContentFilter( user_query="technology articles", ) markdown_generator = DefaultMarkdownGenerator( content_filter=content_filter, options={"ignore_links": False, "body_width": 0} ) # Set up extraction strategy extraction_schema = { "name": "ArticleExtractor", "baseSelector": "article.content", "fields": [ {"name": "title", "selector": "h1", "type": "text"}, {"name": "content", "selector": ".article-body", "type": "html"} ] } extraction_strategy = JsonCssExtractionStrategy(schema=extraction_schema) # Set up deep crawling filter_chain = FastFilterChain([ FastContentTypeFilter(["text/html"]), FastDomainFilter(blocked_domains=["ads.*"]) ]) url_scorer = FastKeywordRelevanceScorer( keywords=["article", "blog"], weight=1.0 ) deep_crawl_strategy = BFSDeepCrawlStrategy( max_depth=3, filter_chain=filter_chain, url_scorer=url_scorer ) # Create the config config = CrawlerRunConfig( word_count_threshold=200, extraction_strategy=extraction_strategy, chunking_strategy=RegexChunking(patterns=[r"\n\n"]), markdown_generator=markdown_generator, css_selector="main.content", excluded_tags=["nav", "footer"], keep_attrs=["href", "src"], cache_mode=CacheMode.BYPASS, wait_until="networkidle", page_timeout=30000, scan_full_page=True, deep_crawl_strategy=deep_crawl_strategy, verbose=True, stream=True ) return config def test_config_serialization_cycle(): # Create original config original_config = create_test_config() # Dump to serializable dictionary serialized = original_config.dump() print(json.dumps(serialized, indent=2)) # Load back into config object deserialized_config = CrawlerRunConfig.load(serialized) # Verify core attributes assert deserialized_config.word_count_threshold == original_config.word_count_threshold assert deserialized_config.css_selector == original_config.css_selector assert deserialized_config.excluded_tags == original_config.excluded_tags assert deserialized_config.keep_attrs == original_config.keep_attrs assert deserialized_config.cache_mode == original_config.cache_mode assert deserialized_config.wait_until == original_config.wait_until assert deserialized_config.page_timeout == original_config.page_timeout assert deserialized_config.scan_full_page == original_config.scan_full_page assert deserialized_config.verbose == original_config.verbose assert deserialized_config.stream == original_config.stream # Verify complex objects assert isinstance(deserialized_config.extraction_strategy, JsonCssExtractionStrategy) assert isinstance(deserialized_config.chunking_strategy, RegexChunking) assert isinstance(deserialized_config.markdown_generator, DefaultMarkdownGenerator) assert isinstance(deserialized_config.markdown_generator.content_filter, BM25ContentFilter) assert isinstance(deserialized_config.deep_crawl_strategy, BFSDeepCrawlStrategy) # Verify deep crawl strategy configuration assert deserialized_config.deep_crawl_strategy.max_depth == 3 assert isinstance(deserialized_config.deep_crawl_strategy.filter_chain, FastFilterChain) assert isinstance(deserialized_config.deep_crawl_strategy.url_scorer, FastKeywordRelevanceScorer) print("Serialization cycle test passed successfully!") if __name__ == "__main__": test_config_serialization_cycle()
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/docker/test_filter_deep_crawl.py
tests/docker/test_filter_deep_crawl.py
""" Test the complete fix for both the filter serialization and JSON serialization issues. """ import os import traceback from typing import Any import asyncio import httpx from crawl4ai import BrowserConfig, CacheMode, CrawlerRunConfig from crawl4ai.deep_crawling import ( BFSDeepCrawlStrategy, ContentRelevanceFilter, FilterChain, URLFilter, URLPatternFilter, ) CRAWL4AI_DOCKER_PORT = os.environ.get("CRAWL4AI_DOCKER_PORT", "11234") try: BASE_PORT = int(CRAWL4AI_DOCKER_PORT) except TypeError: BASE_PORT = 11234 BASE_URL = f"http://localhost:{BASE_PORT}/" # Adjust port as needed async def test_with_docker_client(filter_chain: list[URLFilter], max_pages: int = 20, timeout: int = 30) -> bool: """Test using the Docker client (same as 1419.py).""" from crawl4ai.docker_client import Crawl4aiDockerClient print("=" * 60) print("Testing with Docker Client") print("=" * 60) try: async with Crawl4aiDockerClient( base_url=BASE_URL, verbose=True, ) as client: crawler_config = CrawlerRunConfig( deep_crawl_strategy=BFSDeepCrawlStrategy( max_depth=2, # Keep it shallow for testing max_pages=max_pages, # Limit pages for testing filter_chain=FilterChain(filter_chain) ), cache_mode=CacheMode.BYPASS, ) print("\n1. Testing crawl with filters...") results = await client.crawl( ["https://docs.crawl4ai.com"], # Simple test page browser_config=BrowserConfig(headless=True), crawler_config=crawler_config, hooks_timeout=timeout, ) if results: print(f"✅ Crawl succeeded! Type: {type(results)}") if hasattr(results, 'success'): print(f"✅ Results success: {results.success}") # Test that we can iterate results without JSON errors if hasattr(results, '__iter__'): for i, result in enumerate(results): if hasattr(result, 'url'): print(f" Result {i}: {result.url[:50]}...") else: print(f" Result {i}: {str(result)[:50]}...") else: # Handle list of results print(f"✅ Got {len(results)} results") for i, result in enumerate(results[:3]): # Show first 3 print(f" Result {i}: {result.url[:50]}...") else: print("❌ Crawl failed - no results returned") return False print("\n✅ Docker client test completed successfully!") return True except Exception as e: print(f"❌ Docker client test failed: {e}") traceback.print_exc() return False async def test_with_rest_api(filters: list[dict[str, Any]], max_pages: int = 20, timeout: int = 30) -> bool: """Test using REST API directly.""" print("\n" + "=" * 60) print("Testing with REST API") print("=" * 60) # Create filter configuration deep_crawl_strategy_payload = { "type": "BFSDeepCrawlStrategy", "params": { "max_depth": 2, "max_pages": max_pages, "filter_chain": { "type": "FilterChain", "params": { "filters": filters } } } } crawl_payload = { "urls": ["https://docs.crawl4ai.com"], "browser_config": {"type": "BrowserConfig", "params": {"headless": True}}, "crawler_config": { "type": "CrawlerRunConfig", "params": { "deep_crawl_strategy": deep_crawl_strategy_payload, "cache_mode": "bypass" } } } try: async with httpx.AsyncClient() as client: print("\n1. Sending crawl request to REST API...") response = await client.post( f"{BASE_URL}crawl", json=crawl_payload, timeout=timeout, ) if response.status_code == 200: print(f"✅ REST API returned 200 OK") data = response.json() if data.get("success"): results = data.get("results", []) print(f"✅ Got {len(results)} results") for i, result in enumerate(results[:3]): print(f" Result {i}: {result.get('url', 'unknown')[:50]}...") else: print(f"❌ Crawl not successful: {data}") return False else: print(f"❌ REST API returned {response.status_code}") print(f" Response: {response.text[:500]}") return False print("\n✅ REST API test completed successfully!") return True except Exception as e: print(f"❌ REST API test failed: {e}") traceback.print_exc() return False async def main(): """Run all tests.""" print("\n🧪 TESTING COMPLETE FIX FOR DOCKER FILTER AND JSON ISSUES") print("=" * 60) print("Make sure the server is running with the updated code!") print("=" * 60) results = [] # Test 1: Docker client max_pages_ = [20, 5] timeouts = [30, 60] filter_chain_test_cases = [ [ URLPatternFilter( # patterns=["*about*", "*privacy*", "*terms*"], patterns=["*advanced*"], reverse=True ), ], [ ContentRelevanceFilter( query="about faq", threshold=0.2, ), ], ] for idx, (filter_chain, max_pages, timeout) in enumerate(zip(filter_chain_test_cases, max_pages_, timeouts)): docker_passed = await test_with_docker_client(filter_chain=filter_chain, max_pages=max_pages, timeout=timeout) results.append((f"Docker Client w/ filter chain {idx}", docker_passed)) # Test 2: REST API max_pages_ = [20, 5, 5] timeouts = [30, 60, 60] filters_test_cases = [ [ { "type": "URLPatternFilter", "params": { "patterns": ["*advanced*"], "reverse": True } } ], [ { "type": "ContentRelevanceFilter", "params": { "query": "about faq", "threshold": 0.2, } } ], [ { "type": "ContentRelevanceFilter", "params": { "query": ["about", "faq"], "threshold": 0.2, } } ], ] for idx, (filters, max_pages, timeout) in enumerate(zip(filters_test_cases, max_pages_, timeouts)): rest_passed = await test_with_rest_api(filters=filters, max_pages=max_pages, timeout=timeout) results.append((f"REST API w/ filters {idx}", rest_passed)) # Summary print("\n" + "=" * 60) print("FINAL TEST SUMMARY") print("=" * 60) all_passed = True for test_name, passed in results: status = "✅ PASSED" if passed else "❌ FAILED" print(f"{test_name:20} {status}") if not passed: all_passed = False print("=" * 60) if all_passed: print("🎉 ALL TESTS PASSED!") else: print("⚠️ Some tests failed. Please check the server logs for details.") return 0 if all_passed else 1 if __name__ == "__main__": import sys sys.exit(asyncio.run(main()))
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/docker/test_hooks_utility.py
tests/docker/test_hooks_utility.py
""" Test script demonstrating the hooks_to_string utility and Docker client integration. """ import asyncio from crawl4ai import Crawl4aiDockerClient, hooks_to_string # Define hook functions as regular Python functions async def auth_hook(page, context, **kwargs): """Add authentication cookies.""" await context.add_cookies([{ 'name': 'test_cookie', 'value': 'test_value', 'domain': '.httpbin.org', 'path': '/' }]) return page async def scroll_hook(page, context, **kwargs): """Scroll to load lazy content.""" await page.evaluate("window.scrollTo(0, document.body.scrollHeight)") await page.wait_for_timeout(1000) return page async def viewport_hook(page, context, **kwargs): """Set custom viewport.""" await page.set_viewport_size({"width": 1920, "height": 1080}) return page async def test_hooks_utility(): """Test the hooks_to_string utility function.""" print("=" * 60) print("Testing hooks_to_string utility") print("=" * 60) # Create hooks dictionary with function objects hooks_dict = { "on_page_context_created": auth_hook, "before_retrieve_html": scroll_hook } # Convert to string format hooks_string = hooks_to_string(hooks_dict) print("\n✓ Successfully converted function objects to strings") print(f"\n✓ Converted {len(hooks_string)} hooks:") for hook_name in hooks_string.keys(): print(f" - {hook_name}") print("\n✓ Preview of converted hook:") print("-" * 60) print(hooks_string["on_page_context_created"][:200] + "...") print("-" * 60) return hooks_string async def test_docker_client_with_functions(): """Test Docker client with function objects (automatic conversion).""" print("\n" + "=" * 60) print("Testing Docker Client with Function Objects") print("=" * 60) # Note: This requires a running Crawl4AI Docker server # Uncomment the following to test with actual server: async with Crawl4aiDockerClient(base_url="http://localhost:11234", verbose=True) as client: # Pass function objects directly - they'll be converted automatically result = await client.crawl( ["https://httpbin.org/html"], hooks={ "on_page_context_created": auth_hook, "before_retrieve_html": scroll_hook }, hooks_timeout=30 ) print(f"\n✓ Crawl successful: {result.success}") print(f"✓ URL: {result.url}") print("\n✓ Docker client accepts function objects directly") print("✓ Automatic conversion happens internally") print("✓ No manual string formatting needed!") async def test_docker_client_with_strings(): """Test Docker client with pre-converted strings.""" print("\n" + "=" * 60) print("Testing Docker Client with String Hooks") print("=" * 60) # Convert hooks to strings first hooks_dict = { "on_page_context_created": viewport_hook, "before_retrieve_html": scroll_hook } hooks_string = hooks_to_string(hooks_dict) # Note: This requires a running Crawl4AI Docker server # Uncomment the following to test with actual server: async with Crawl4aiDockerClient(base_url="http://localhost:11234", verbose=True) as client: # Pass string hooks - they'll be used as-is result = await client.crawl( ["https://httpbin.org/html"], hooks=hooks_string, hooks_timeout=30 ) print(f"\n✓ Crawl successful: {result.success}") print("\n✓ Docker client also accepts pre-converted strings") print("✓ Backward compatible with existing code") async def show_usage_patterns(): """Show different usage patterns.""" print("\n" + "=" * 60) print("Usage Patterns") print("=" * 60) print("\n1. Direct function usage (simplest):") print("-" * 60) print(""" async def my_hook(page, context, **kwargs): await page.set_viewport_size({"width": 1920, "height": 1080}) return page result = await client.crawl( ["https://example.com"], hooks={"on_page_context_created": my_hook} ) """) print("\n2. Convert then use:") print("-" * 60) print(""" hooks_dict = {"on_page_context_created": my_hook} hooks_string = hooks_to_string(hooks_dict) result = await client.crawl( ["https://example.com"], hooks=hooks_string ) """) print("\n3. Manual string (backward compatible):") print("-" * 60) print(""" hooks_string = { "on_page_context_created": ''' async def hook(page, context, **kwargs): await page.set_viewport_size({"width": 1920, "height": 1080}) return page ''' } result = await client.crawl( ["https://example.com"], hooks=hooks_string ) """) async def main(): """Run all tests.""" print("\n🚀 Crawl4AI Hooks Utility Test Suite\n") # Test the utility function # await test_hooks_utility() # Show usage with Docker client # await test_docker_client_with_functions() await test_docker_client_with_strings() # Show different patterns # await show_usage_patterns() # print("\n" + "=" * 60) # print("✓ All tests completed successfully!") # print("=" * 60) # print("\nKey Benefits:") # print(" • Write hooks as regular Python functions") # print(" • IDE support with autocomplete and type checking") # print(" • Automatic conversion to API format") # print(" • Backward compatible with string hooks") # print(" • Same utility used everywhere") # print("\n") if __name__ == "__main__": asyncio.run(main())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/docker/test_llm_params.py
tests/docker/test_llm_params.py
#!/usr/bin/env python3 """ Test script for LLM temperature and base_url parameters in Crawl4AI Docker API. This demonstrates the new hierarchical configuration system: 1. Request-level parameters (highest priority) 2. Provider-specific environment variables 3. Global environment variables 4. System defaults (lowest priority) """ import asyncio import httpx import json import os from rich.console import Console from rich.panel import Panel from rich.syntax import Syntax from rich.table import Table console = Console() # Configuration BASE_URL = "http://localhost:11235" # Docker API endpoint TEST_URL = "https://httpbin.org/html" # Simple test page # --- Helper Functions --- async def check_server_health(client: httpx.AsyncClient) -> bool: """Check if the server is healthy.""" console.print("[bold cyan]Checking server health...[/]", end="") try: response = await client.get("/health", timeout=10.0) response.raise_for_status() console.print(" [bold green]✓ Server is healthy![/]") return True except Exception as e: console.print(f"\n[bold red]✗ Server health check failed: {e}[/]") console.print(f"Is the server running at {BASE_URL}?") return False def print_request(endpoint: str, payload: dict, title: str = "Request"): """Pretty print the request.""" syntax = Syntax(json.dumps(payload, indent=2), "json", theme="monokai") console.print(Panel.fit( f"[cyan]POST {endpoint}[/cyan]\n{syntax}", title=f"[bold blue]{title}[/]", border_style="blue" )) def print_response(response: dict, title: str = "Response"): """Pretty print relevant parts of the response.""" # Extract only the relevant parts relevant = {} if "markdown" in response: relevant["markdown"] = response["markdown"][:200] + "..." if len(response.get("markdown", "")) > 200 else response.get("markdown", "") if "success" in response: relevant["success"] = response["success"] if "url" in response: relevant["url"] = response["url"] if "filter" in response: relevant["filter"] = response["filter"] console.print(Panel.fit( Syntax(json.dumps(relevant, indent=2), "json", theme="monokai"), title=f"[bold green]{title}[/]", border_style="green" )) # --- Test Functions --- async def test_default_no_params(client: httpx.AsyncClient): """Test 1: No temperature or base_url specified - uses defaults""" console.rule("[bold yellow]Test 1: Default Configuration (No Parameters)[/]") payload = { "url": TEST_URL, "f": "llm", "q": "What is the main heading of this page? Answer in exactly 5 words." } print_request("/md", payload, "Request without temperature/base_url") try: response = await client.post("/md", json=payload, timeout=30.0) response.raise_for_status() data = response.json() print_response(data, "Response (using system defaults)") console.print("[dim]→ This used system defaults or environment variables if set[/]") except Exception as e: console.print(f"[red]Error: {e}[/]") async def test_request_temperature(client: httpx.AsyncClient): """Test 2: Request-level temperature (highest priority)""" console.rule("[bold yellow]Test 2: Request-Level Temperature[/]") # Test with low temperature (more focused) payload_low = { "url": TEST_URL, "f": "llm", "q": "What is the main heading? Be creative and poetic.", "temperature": 0.1 # Very low - should be less creative } print_request("/md", payload_low, "Low Temperature (0.1)") try: response = await client.post("/md", json=payload_low, timeout=30.0) response.raise_for_status() data_low = response.json() print_response(data_low, "Response with Low Temperature") console.print("[dim]→ Low temperature (0.1) should produce focused, less creative output[/]") except Exception as e: console.print(f"[red]Error: {e}[/]") console.print() # Test with high temperature (more creative) payload_high = { "url": TEST_URL, "f": "llm", "q": "What is the main heading? Be creative and poetic.", "temperature": 1.5 # High - should be more creative } print_request("/md", payload_high, "High Temperature (1.5)") try: response = await client.post("/md", json=payload_high, timeout=30.0) response.raise_for_status() data_high = response.json() print_response(data_high, "Response with High Temperature") console.print("[dim]→ High temperature (1.5) should produce more creative, varied output[/]") except Exception as e: console.print(f"[red]Error: {e}[/]") async def test_provider_override(client: httpx.AsyncClient): """Test 3: Provider override with temperature""" console.rule("[bold yellow]Test 3: Provider Override with Temperature[/]") provider = "gemini/gemini-2.5-flash-lite" payload = { "url": TEST_URL, "f": "llm", "q": "Summarize this page in one sentence.", "provider": provider, # Explicitly set provider "temperature": 0.7 } print_request("/md", payload, "Provider + Temperature Override") try: response = await client.post("/md", json=payload, timeout=30.0) response.raise_for_status() data = response.json() print_response(data, "Response with Provider Override") console.print(f"[dim]→ This explicitly uses {provider} with temperature 0.7[/]") except Exception as e: console.print(f"[red]Error: {e}[/]") async def test_base_url_custom(client: httpx.AsyncClient): """Test 4: Custom base_url (will fail unless you have a custom endpoint)""" console.rule("[bold yellow]Test 4: Custom Base URL (Demo Only)[/]") payload = { "url": TEST_URL, "f": "llm", "q": "What is this page about?", "base_url": "https://api.custom-endpoint.com/v1", # Custom endpoint "temperature": 0.5 } print_request("/md", payload, "Custom Base URL Request") console.print("[yellow]Note: This will fail unless you have a custom endpoint set up[/]") try: response = await client.post("/md", json=payload, timeout=10.0) response.raise_for_status() data = response.json() print_response(data, "Response from Custom Endpoint") except httpx.HTTPStatusError as e: console.print(f"[yellow]Expected failure (no custom endpoint): Status {e.response.status_code}[/]") except Exception as e: console.print(f"[yellow]Expected error: {e}[/]") async def test_llm_job_endpoint(client: httpx.AsyncClient): """Test 5: Test the /llm/job endpoint with temperature and base_url""" console.rule("[bold yellow]Test 5: LLM Job Endpoint with Parameters[/]") payload = { "url": TEST_URL, "q": "Extract the main title and any key information", "temperature": 0.3, # "base_url": "https://api.openai.com/v1" # Optional } print_request("/llm/job", payload, "LLM Job with Temperature") try: # Submit the job response = await client.post("/llm/job", json=payload, timeout=30.0) response.raise_for_status() job_data = response.json() if "task_id" in job_data: task_id = job_data["task_id"] console.print(f"[green]Job created with task_id: {task_id}[/]") # Poll for result (simplified - in production use proper polling) await asyncio.sleep(3) status_response = await client.get(f"/llm/job/{task_id}") status_data = status_response.json() if status_data.get("status") == "completed": console.print("[green]Job completed successfully![/]") if "result" in status_data: console.print(Panel.fit( Syntax(json.dumps(status_data["result"], indent=2), "json", theme="monokai"), title="Extraction Result", border_style="green" )) else: console.print(f"[yellow]Job status: {status_data.get('status', 'unknown')}[/]") else: console.print(f"[red]Unexpected response: {job_data}[/]") except Exception as e: console.print(f"[red]Error: {e}[/]") async def test_llm_endpoint(client: httpx.AsyncClient): """ Quick QA round-trip with /llm. Asks a trivial question against SIMPLE_URL just to show wiring. """ import time import urllib.parse page_url = "https://kidocode.com" question = "What is the title of this page?" enc = urllib.parse.quote_plus(page_url, safe="") console.print(f"GET /llm/{enc}?q={question}") try: t0 = time.time() resp = await client.get(f"/llm/{enc}", params={"q": question}) dt = time.time() - t0 console.print( f"Response Status: [bold {'green' if resp.is_success else 'red'}]{resp.status_code}[/] (took {dt:.2f}s)") resp.raise_for_status() answer = resp.json().get("answer", "") console.print(Panel(answer or "No answer returned", title="LLM answer", border_style="magenta", expand=False)) except Exception as e: console.print(f"[bold red]Error hitting /llm:[/] {e}") async def show_environment_info(): """Display current environment configuration""" console.rule("[bold cyan]Current Environment Configuration[/]") table = Table(title="LLM Environment Variables", show_header=True, header_style="bold magenta") table.add_column("Variable", style="cyan", width=30) table.add_column("Value", style="yellow") table.add_column("Description", style="dim") env_vars = [ ("LLM_PROVIDER", "Global default provider"), ("LLM_TEMPERATURE", "Global default temperature"), ("LLM_BASE_URL", "Global custom API endpoint"), ("OPENAI_API_KEY", "OpenAI API key"), ("OPENAI_TEMPERATURE", "OpenAI-specific temperature"), ("OPENAI_BASE_URL", "OpenAI-specific endpoint"), ("ANTHROPIC_API_KEY", "Anthropic API key"), ("ANTHROPIC_TEMPERATURE", "Anthropic-specific temperature"), ("GROQ_API_KEY", "Groq API key"), ("GROQ_TEMPERATURE", "Groq-specific temperature"), ] for var, desc in env_vars: value = os.environ.get(var, "[not set]") if "API_KEY" in var and value != "[not set]": # Mask API keys for security value = value[:10] + "..." if len(value) > 10 else "***" table.add_row(var, value, desc) console.print(table) console.print() # --- Main Test Runner --- async def main(): """Run all tests""" console.print(Panel.fit( "[bold cyan]Crawl4AI LLM Parameters Test Suite[/]\n" + "Testing temperature and base_url configuration hierarchy", border_style="cyan" )) # Show current environment # await show_environment_info() # Create HTTP client async with httpx.AsyncClient(base_url=BASE_URL, timeout=60.0) as client: # Check server health if not await check_server_health(client): console.print("[red]Server is not available. Please ensure the Docker container is running.[/]") return # Run tests tests = [ ("Default Configuration", test_default_no_params), ("Request Temperature", test_request_temperature), ("Provider Override", test_provider_override), ("Custom Base URL", test_base_url_custom), ("LLM Job Endpoint", test_llm_job_endpoint), ("LLM Endpoint", test_llm_endpoint), ] for i, (name, test_func) in enumerate(tests, 1): if i > 1: console.print() # Add spacing between tests try: await test_func(client) except Exception as e: console.print(f"[red]Test '{name}' failed with error: {e}[/]") console.print_exception(show_locals=False) console.rule("[bold green]All Tests Complete![/]", style="green") # Summary console.print("\n[bold cyan]Configuration Hierarchy Summary:[/]") console.print("1. [yellow]Request parameters[/] - Highest priority (temperature, base_url in API call)") console.print("2. [yellow]Provider-specific env[/] - e.g., OPENAI_TEMPERATURE, GROQ_BASE_URL") console.print("3. [yellow]Global env variables[/] - LLM_TEMPERATURE, LLM_BASE_URL") console.print("4. [yellow]System defaults[/] - Lowest priority (provider/litellm defaults)") console.print() if __name__ == "__main__": try: asyncio.run(main()) except KeyboardInterrupt: console.print("\n[yellow]Tests interrupted by user.[/]") except Exception as e: console.print(f"\n[bold red]An error occurred:[/]") console.print_exception(show_locals=False)
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/docker/test_server_token.py
tests/docker/test_server_token.py
import asyncio import json from typing import Optional from urllib.parse import quote async def get_token(session, email: str = "test@example.com") -> str: """Fetch a JWT token from the /token endpoint.""" url = "http://localhost:8000/token" payload = {"email": email} print(f"\nFetching token from {url} with email: {email}") try: async with session.post(url, json=payload) as response: status = response.status data = await response.json() print(f"Token Response Status: {status}") print(f"Token Response: {json.dumps(data, indent=2)}") if status == 200: return data["access_token"] else: raise Exception(f"Failed to get token: {data.get('detail', 'Unknown error')}") except Exception as e: print(f"Error fetching token: {str(e)}") raise async def test_endpoint( session, endpoint: str, url: str, token: str, params: Optional[dict] = None, expected_status: int = 200 ) -> Optional[dict]: """Test an endpoint with token and print results.""" params = params or {} param_str = "&".join(f"{k}={v}" for k, v in params.items()) full_url = f"http://localhost:8000/{endpoint}/{quote(url)}" if param_str: full_url += f"?{param_str}" headers = {"Authorization": f"Bearer {token}"} print(f"\nTesting: {full_url}") try: async with session.get(full_url, headers=headers) as response: status = response.status try: data = await response.json() except: data = await response.text() print(f"Status: {status} (Expected: {expected_status})") if isinstance(data, dict): print(f"Response: {json.dumps(data, indent=2)}") else: print(f"Response: {data[:500]}...") # First 500 chars assert status == expected_status, f"Expected {expected_status}, got {status}" return data except Exception as e: print(f"Error: {str(e)}") return None async def test_stream_crawl(session, token: str): """Test the /crawl/stream endpoint with multiple URLs.""" url = "http://localhost:8000/crawl/stream" payload = { "urls": [ "https://example.com", "https://example.com/page1", # Replicated example.com with variation "https://example.com/page2", # Replicated example.com with variation "https://example.com/page3", # Replicated example.com with variation # "https://www.python.org", # "https://news.ycombinator.com/news" ], "browser_config": {"headless": True, "viewport": {"width": 1200}}, "crawler_config": {"stream": True, "cache_mode": "bypass"} } headers = {"Authorization": f"Bearer {token}"} print(f"\nTesting Streaming Crawl: {url}") print(f"Payload: {json.dumps(payload, indent=2)}") try: async with session.post(url, json=payload, headers=headers) as response: status = response.status print(f"Status: {status} (Expected: 200)") assert status == 200, f"Expected 200, got {status}" # Read streaming response line-by-line (NDJSON) async for line in response.content: if line: data = json.loads(line.decode('utf-8').strip()) print(f"Streamed Result: {json.dumps(data, indent=2)}") except Exception as e: print(f"Error in streaming crawl test: {str(e)}") async def run_tests(): import aiohttp print("Starting API Tests...") # Test URLs urls = [ "example.com", "https://www.python.org", "https://news.ycombinator.com/news", "https://github.com/trending" ] async with aiohttp.ClientSession() as session: # Fetch token once and reuse it token = await get_token(session) if not token: print("Aborting tests due to token failure!") return print("\n=== Testing Crawl Endpoint ===") crawl_payload = { "urls": ["https://example.com"], "browser_config": {"headless": True}, "crawler_config": {"stream": False} } async with session.post( "http://localhost:8000/crawl", json=crawl_payload, headers={"Authorization": f"Bearer {token}"} ) as response: status = response.status data = await response.json() print(f"\nCrawl Endpoint Status: {status}") print(f"Crawl Response: {json.dumps(data, indent=2)}") print("\n=== Testing Crawl Stream Endpoint ===") await test_stream_crawl(session, token) print("\n=== Testing Markdown Endpoint ===") for url in []: #urls: for filter_type in ["raw", "fit", "bm25", "llm"]: params = {"f": filter_type} if filter_type in ["bm25", "llm"]: params["q"] = "extract main content" for cache in ["0", "1"]: params["c"] = cache await test_endpoint(session, "md", url, token, params) await asyncio.sleep(1) # Be nice to the server print("\n=== Testing LLM Endpoint ===") for url in urls: # Test basic extraction (direct response now) result = await test_endpoint( session, "llm", url, token, {"q": "Extract title and main content"} ) # Test with schema (direct response) schema = { "type": "object", "properties": { "title": {"type": "string"}, "content": {"type": "string"}, "links": {"type": "array", "items": {"type": "string"}} } } result = await test_endpoint( session, "llm", url, token, { "q": "Extract content with links", "s": json.dumps(schema), "c": "1" # Test with cache } ) await asyncio.sleep(2) # Be nice to the server print("\n=== Testing Error Cases ===") # Test invalid URL await test_endpoint( session, "md", "not_a_real_url", token, expected_status=500 ) # Test invalid filter type await test_endpoint( session, "md", "example.com", token, {"f": "invalid"}, expected_status=422 ) # Test LLM without query (should fail per your server logic) await test_endpoint( session, "llm", "example.com", token, expected_status=400 ) print("\nAll tests completed!") if __name__ == "__main__": asyncio.run(run_tests())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/docker/test_dockerclient.py
tests/docker/test_dockerclient.py
import asyncio from crawl4ai.docker_client import Crawl4aiDockerClient from crawl4ai import ( BrowserConfig, CrawlerRunConfig ) async def main(): async with Crawl4aiDockerClient(base_url="http://localhost:8000", verbose=True) as client: await client.authenticate("test@example.com") # Non-streaming crawl results = await client.crawl( ["https://example.com", "https://python.org"], browser_config=BrowserConfig(headless=True), crawler_config=CrawlerRunConfig() ) print(f"Non-streaming results: {results}") # Streaming crawl crawler_config = CrawlerRunConfig(stream=True) async for result in await client.crawl( ["https://example.com", "https://python.org"], browser_config=BrowserConfig(headless=True), crawler_config=crawler_config ): print(f"Streamed result: {result}") # Get schema schema = await client.get_schema() print(f"Schema: {schema}") if __name__ == "__main__": asyncio.run(main())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/docker/test_rest_api_deep_crawl.py
tests/docker/test_rest_api_deep_crawl.py
# ==== File: test_rest_api_deep_crawl.py ==== import pytest import pytest_asyncio import httpx import json import asyncio import os from typing import List, Dict, Any, AsyncGenerator from dotenv import load_dotenv load_dotenv() # Load environment variables from .env file if present # --- Test Configuration --- BASE_URL = os.getenv("CRAWL4AI_TEST_URL", "http://localhost:11235") # If server is running in Docker, use the host's IP BASE_URL = os.getenv("CRAWL4AI_TEST_URL", "http://localhost:8020") # If server is running in dev debug mode DEEP_CRAWL_BASE_URL = "https://docs.crawl4ai.com/samples/deepcrawl/" DEEP_CRAWL_DOMAIN = "docs.crawl4ai.com" # Used for domain filter # --- Helper Functions --- def load_proxies_from_env() -> List[Dict]: """Load proxies from PROXIES environment variable""" proxies = [] proxies_str = os.getenv("PROXIES", "") if not proxies_str: print("PROXIES environment variable not set or empty.") return proxies try: proxy_list = proxies_str.split(",") for proxy in proxy_list: proxy = proxy.strip() if not proxy: continue parts = proxy.split(":") if len(parts) == 4: ip, port, username, password = parts proxies.append({ "server": f"http://{ip}:{port}", # Assuming http, adjust if needed "username": username, "password": password, "ip": ip # Store original IP if available }) elif len(parts) == 2: # ip:port only ip, port = parts proxies.append({ "server": f"http://{ip}:{port}", "ip": ip }) else: print(f"Skipping invalid proxy string format: {proxy}") except Exception as e: print(f"Error loading proxies from environment: {e}") return proxies async def check_server_health(client: httpx.AsyncClient): """Check if the server is healthy before running tests.""" try: response = await client.get("/health") response.raise_for_status() print(f"\nServer healthy: {response.json()}") return True except (httpx.RequestError, httpx.HTTPStatusError) as e: pytest.fail(f"Server health check failed: {e}. Is the server running at {BASE_URL}?", pytrace=False) async def assert_crawl_result_structure(result: Dict[str, Any], check_ssl=False): """Asserts the basic structure of a single crawl result.""" assert isinstance(result, dict) assert "url" in result assert "success" in result assert "html" in result # Basic crawls should return HTML assert "metadata" in result assert isinstance(result["metadata"], dict) assert "depth" in result["metadata"] # Deep crawls add depth if check_ssl: assert "ssl_certificate" in result # Check if SSL info is present assert isinstance(result["ssl_certificate"], dict) or result["ssl_certificate"] is None async def process_streaming_response(response: httpx.Response) -> List[Dict[str, Any]]: """Processes an NDJSON streaming response.""" results = [] completed = False async for line in response.aiter_lines(): if line: try: data = json.loads(line) if data.get("status") == "completed": completed = True break # Stop processing after completion marker elif data.get("url"): # Ensure it looks like a result object results.append(data) else: print(f"Received non-result JSON line: {data}") # Log other status messages if needed except json.JSONDecodeError: pytest.fail(f"Failed to decode JSON line: {line}") assert completed, "Streaming response did not end with a completion marker." return results # --- Pytest Fixtures --- @pytest_asyncio.fixture(scope="function") async def async_client() -> AsyncGenerator[httpx.AsyncClient, None]: """Provides an async HTTP client""" # Increased timeout for potentially longer deep crawls async with httpx.AsyncClient(base_url=BASE_URL, timeout=300.0) as client: yield client # No explicit close needed with 'async with' # --- Test Class --- @pytest.mark.asyncio class TestDeepCrawlEndpoints: @pytest_asyncio.fixture(autouse=True) async def check_health_before_tests(self, async_client: httpx.AsyncClient): """Fixture to ensure server is healthy before each test in the class.""" await check_server_health(async_client) # 1. Basic Deep Crawl async def test_deep_crawl_basic_bfs(self, async_client: httpx.AsyncClient): """Test BFS deep crawl with limited depth and pages.""" max_depth = 1 max_pages = 3 # start_url + 2 more payload = { "urls": [DEEP_CRAWL_BASE_URL], "browser_config": {"type": "BrowserConfig", "params": {"headless": True}}, "crawler_config": { "type": "CrawlerRunConfig", "params": { "stream": False, "cache_mode": "BYPASS", # Use string value for CacheMode "deep_crawl_strategy": { "type": "BFSDeepCrawlStrategy", "params": { "max_depth": max_depth, "max_pages": max_pages, # Minimal filters for basic test "filter_chain": { "type": "FilterChain", "params": { "filters": [ { "type": "DomainFilter", "params": {"allowed_domains": [DEEP_CRAWL_DOMAIN]} } ] } } } } } } } response = await async_client.post("/crawl", json=payload) response.raise_for_status() data = response.json() assert data["success"] is True assert isinstance(data["results"], list) assert len(data["results"]) > 1 # Should be more than just the start URL assert len(data["results"]) <= max_pages # Respect max_pages found_depth_0 = False found_depth_1 = False for result in data["results"]: await assert_crawl_result_structure(result) assert result["success"] is True assert DEEP_CRAWL_DOMAIN in result["url"] depth = result["metadata"]["depth"] assert depth <= max_depth if depth == 0: found_depth_0 = True if depth == 1: found_depth_1 = True assert found_depth_0 assert found_depth_1 # 2. Deep Crawl with Filtering async def test_deep_crawl_with_filters(self, async_client: httpx.AsyncClient): """Test BFS deep crawl with content type and domain filters.""" max_depth = 1 max_pages = 5 payload = { "urls": [DEEP_CRAWL_BASE_URL], "browser_config": {"type": "BrowserConfig", "params": {"headless": True}}, "crawler_config": { "type": "CrawlerRunConfig", "params": { "stream": False, "cache_mode": "BYPASS", "deep_crawl_strategy": { "type": "BFSDeepCrawlStrategy", "params": { "max_depth": max_depth, "max_pages": max_pages, "filter_chain": { "type": "FilterChain", "params": { "filters": [ { "type": "DomainFilter", "params": {"allowed_domains": [DEEP_CRAWL_DOMAIN]} }, { "type": "ContentTypeFilter", "params": {"allowed_types": ["text/html"]} }, # Example: Exclude specific paths using regex { "type": "URLPatternFilter", "params": { "patterns": ["*/category-3/*"], # Block category 3 "reverse": True # Block if match } } ] } } } } } } } response = await async_client.post("/crawl", json=payload) response.raise_for_status() data = response.json() assert data["success"] is True assert len(data["results"]) > 0 assert len(data["results"]) <= max_pages for result in data["results"]: await assert_crawl_result_structure(result) assert result["success"] is True assert DEEP_CRAWL_DOMAIN in result["url"] assert "category-3" not in result["url"] # Check if filter worked assert result["metadata"]["depth"] <= max_depth # 3. Deep Crawl with Scoring async def test_deep_crawl_with_scoring(self, async_client: httpx.AsyncClient): """Test BFS deep crawl with URL scoring.""" max_depth = 1 max_pages = 4 payload = { "urls": [DEEP_CRAWL_BASE_URL], "browser_config": {"type": "BrowserConfig", "params": {"headless": True}}, "crawler_config": { "type": "CrawlerRunConfig", "params": { "stream": False, "cache_mode": "BYPASS", "deep_crawl_strategy": { "type": "BFSDeepCrawlStrategy", "params": { "max_depth": max_depth, "max_pages": max_pages, "filter_chain": { # Keep basic domain filter "type": "FilterChain", "params": { "filters": [{"type": "DomainFilter", "params": {"allowed_domains": [DEEP_CRAWL_DOMAIN]}}]} }, "url_scorer": { # Add scorer "type": "CompositeScorer", "params": { "scorers": [ { # Favor pages with 'product' in the URL "type": "KeywordRelevanceScorer", "params": {"keywords": ["product"], "weight": 1.0} }, { # Penalize deep paths slightly "type": "PathDepthScorer", "params": {"optimal_depth": 2, "weight": -0.2} } ] } }, # Set a threshold if needed: "score_threshold": 0.1 } } } } } response = await async_client.post("/crawl", json=payload) response.raise_for_status() data = response.json() assert data["success"] is True assert len(data["results"]) > 0 assert len(data["results"]) <= max_pages # Check if results seem biased towards products (harder to assert strictly without knowing exact scores) product_urls_found = any("product_" in result["url"] for result in data["results"] if result["metadata"]["depth"] > 0) print(f"Product URLs found among depth > 0 results: {product_urls_found}") # We expect scoring to prioritize product pages if available within limits # assert product_urls_found # This might be too strict depending on site structure and limits for result in data["results"]: await assert_crawl_result_structure(result) assert result["success"] is True assert result["metadata"]["depth"] <= max_depth # 4. Deep Crawl with CSS Extraction async def test_deep_crawl_with_css_extraction(self, async_client: httpx.AsyncClient): """Test BFS deep crawl combined with JsonCssExtractionStrategy.""" max_depth = 6 # Go deep enough to reach product pages max_pages = 20 # Schema to extract product details product_schema = { "name": "ProductDetails", "baseSelector": "div.container", # Base for product page "fields": [ {"name": "product_title", "selector": "h1", "type": "text"}, {"name": "price", "selector": ".product-price", "type": "text"}, {"name": "description", "selector": ".product-description p", "type": "text"}, {"name": "specs", "selector": ".product-specs li", "type": "list", "fields":[ {"name": "spec_name", "selector": ".spec-name", "type": "text"}, {"name": "spec_value", "selector": ".spec-value", "type": "text"} ]} ] } payload = { "urls": [DEEP_CRAWL_BASE_URL], "browser_config": {"type": "BrowserConfig", "params": {"headless": True}}, "crawler_config": { "type": "CrawlerRunConfig", "params": { "stream": False, "cache_mode": "BYPASS", "extraction_strategy": { # Apply extraction to ALL crawled pages "type": "JsonCssExtractionStrategy", "params": {"schema": {"type": "dict", "value": product_schema}} }, "deep_crawl_strategy": { "type": "BFSDeepCrawlStrategy", "params": { "max_depth": max_depth, "max_pages": max_pages, "filter_chain": { # Only crawl HTML on our domain "type": "FilterChain", "params": { "filters": [ {"type": "DomainFilter", "params": {"allowed_domains": [DEEP_CRAWL_DOMAIN]}}, {"type": "ContentTypeFilter", "params": {"allowed_types": ["text/html"]}} ] } } # Optional: Add scoring to prioritize product pages for extraction } } } } } response = await async_client.post("/crawl", json=payload) response.raise_for_status() data = response.json() assert data["success"] is True assert len(data["results"]) > 0 # assert len(data["results"]) <= max_pages found_extracted_product = False for result in data["results"]: await assert_crawl_result_structure(result) assert result["success"] is True assert "extracted_content" in result if "product_" in result["url"]: # Check product pages specifically assert result["extracted_content"] is not None try: extracted = json.loads(result["extracted_content"]) # Schema returns list even if one base match assert isinstance(extracted, list) if extracted: item = extracted[0] assert "product_title" in item and item["product_title"] assert "price" in item and item["price"] # Specs might be empty list if not found assert "specs" in item and isinstance(item["specs"], list) found_extracted_product = True print(f"Extracted product: {item.get('product_title')}") except (json.JSONDecodeError, AssertionError, IndexError) as e: pytest.fail(f"Extraction validation failed for {result['url']}: {e}\nContent: {result['extracted_content']}") # else: # # Non-product pages might have None or empty list depending on schema match # assert result["extracted_content"] is None or json.loads(result["extracted_content"]) == [] assert found_extracted_product, "Did not find any pages where product data was successfully extracted." # 5. Deep Crawl with LLM Extraction (Requires Server LLM Setup) async def test_deep_crawl_with_llm_extraction(self, async_client: httpx.AsyncClient): """Test BFS deep crawl combined with LLMExtractionStrategy.""" max_depth = 1 # Limit depth to keep LLM calls manageable max_pages = 3 payload = { "urls": [DEEP_CRAWL_BASE_URL], "browser_config": {"type": "BrowserConfig", "params": {"headless": True}}, "crawler_config": { "type": "CrawlerRunConfig", "params": { "stream": False, "cache_mode": "BYPASS", "extraction_strategy": { # Apply LLM extraction to crawled pages "type": "LLMExtractionStrategy", "params": { "instruction": "Extract the main H1 title and the text content of the first paragraph.", "llm_config": { # Example override, rely on server default if possible "type": "LLMConfig", "params": {"provider": "openai/gpt-4.1-mini"} # Use a cheaper model for testing }, "schema": { # Expected JSON output "type": "dict", "value": { "title": "PageContent", "type": "object", "properties": { "h1_title": {"type": "string"}, "first_paragraph": {"type": "string"} } } } } }, "deep_crawl_strategy": { "type": "BFSDeepCrawlStrategy", "params": { "max_depth": max_depth, "max_pages": max_pages, "filter_chain": { "type": "FilterChain", "params": { "filters": [ {"type": "DomainFilter", "params": {"allowed_domains": [DEEP_CRAWL_DOMAIN]}}, {"type": "ContentTypeFilter", "params": {"allowed_types": ["text/html"]}} ] } } } } } } } try: response = await async_client.post("/crawl", json=payload) response.raise_for_status() data = response.json() except httpx.HTTPStatusError as e: pytest.fail(f"Deep Crawl + LLM extraction request failed: {e}. Response: {e.response.text}. Check server logs and LLM API key setup.") except httpx.RequestError as e: pytest.fail(f"Deep Crawl + LLM extraction request failed: {e}.") assert data["success"] is True assert len(data["results"]) > 0 assert len(data["results"]) <= max_pages found_llm_extraction = False for result in data["results"]: await assert_crawl_result_structure(result) assert result["success"] is True assert "extracted_content" in result assert result["extracted_content"] is not None try: extracted = json.loads(result["extracted_content"]) if isinstance(extracted, list): extracted = extracted[0] # Handle list output assert isinstance(extracted, dict) assert "h1_title" in extracted # Check keys based on schema assert "first_paragraph" in extracted found_llm_extraction = True print(f"LLM extracted from {result['url']}: Title='{extracted.get('h1_title')}'") except (json.JSONDecodeError, AssertionError, IndexError, TypeError) as e: pytest.fail(f"LLM extraction validation failed for {result['url']}: {e}\nContent: {result['extracted_content']}") assert found_llm_extraction, "LLM extraction did not yield expected data on any crawled page." # 6. Deep Crawl with SSL Certificate Fetching async def test_deep_crawl_with_ssl(self, async_client: httpx.AsyncClient): """Test BFS deep crawl with fetch_ssl_certificate enabled.""" max_depth = 0 # Only fetch for start URL to keep test fast max_pages = 1 payload = { "urls": [DEEP_CRAWL_BASE_URL], "browser_config": {"type": "BrowserConfig", "params": {"headless": True}}, "crawler_config": { "type": "CrawlerRunConfig", "params": { "stream": False, "cache_mode": "BYPASS", "fetch_ssl_certificate": True, # <-- Enable SSL fetching "deep_crawl_strategy": { "type": "BFSDeepCrawlStrategy", "params": { "max_depth": max_depth, "max_pages": max_pages, } } } } } response = await async_client.post("/crawl", json=payload) response.raise_for_status() data = response.json() assert data["success"] is True assert len(data["results"]) == 1 result = data["results"][0] await assert_crawl_result_structure(result, check_ssl=True) # <-- Tell helper to check SSL field assert result["success"] is True # Check if SSL info was actually retrieved if result["ssl_certificate"]: # Assert directly using dictionary keys assert isinstance(result["ssl_certificate"], dict) # Verify it's a dict assert "issuer" in result["ssl_certificate"] assert "subject" in result["ssl_certificate"] # --- MODIFIED ASSERTIONS --- assert "not_before" in result["ssl_certificate"] # Check for the actual key assert "not_after" in result["ssl_certificate"] # Check for the actual key # --- END MODIFICATIONS --- assert "fingerprint" in result["ssl_certificate"] # Check another key # This print statement using .get() already works correctly with dictionaries print(f"SSL Issuer Org: {result['ssl_certificate'].get('issuer', {}).get('O', 'N/A')}") print(f"SSL Valid From: {result['ssl_certificate'].get('not_before', 'N/A')}") else: # This part remains the same print("SSL Certificate was null in the result.") # 7. Deep Crawl with Proxy Rotation (Requires PROXIES env var) async def test_deep_crawl_with_proxies(self, async_client: httpx.AsyncClient): """Test BFS deep crawl using proxy rotation.""" proxies = load_proxies_from_env() if not proxies: pytest.skip("Skipping proxy test: PROXIES environment variable not set or empty.") print(f"\nTesting with {len(proxies)} proxies loaded from environment.") max_depth = 1 max_pages = 3 payload = { "urls": [DEEP_CRAWL_BASE_URL], # Use the dummy site # Use a BrowserConfig that *might* pick up proxy if set, but rely on CrawlerRunConfig "browser_config": {"type": "BrowserConfig", "params": {"headless": True}}, "crawler_config": { "type": "CrawlerRunConfig", "params": { "stream": False, "cache_mode": "BYPASS", "proxy_rotation_strategy": { # <-- Define the strategy "type": "RoundRobinProxyStrategy", "params": { # Convert ProxyConfig dicts back to the serialized format expected by server "proxies": [{"type": "ProxyConfig", "params": p} for p in proxies] } }, "deep_crawl_strategy": { "type": "BFSDeepCrawlStrategy", "params": { "max_depth": max_depth, "max_pages": max_pages, "filter_chain": { "type": "FilterChain", "params": { "filters": [{"type": "DomainFilter", "params": {"allowed_domains": [DEEP_CRAWL_DOMAIN]}}]} } } } } } } try: response = await async_client.post("/crawl", json=payload) response.raise_for_status() data = response.json() except httpx.HTTPStatusError as e: # Proxies often cause connection errors, catch them pytest.fail(f"Proxy deep crawl failed: {e}. Response: {e.response.text}. Are proxies valid and accessible by the server?") except httpx.RequestError as e: pytest.fail(f"Proxy deep crawl request failed: {e}. Are proxies valid and accessible?") assert data["success"] is True assert len(data["results"]) > 0 assert len(data["results"]) <= max_pages # Primary assertion is that the crawl succeeded *with* proxy config print(f"Proxy deep crawl completed successfully for {len(data['results'])} pages.") # Verifying specific proxy usage requires server logs or custom headers/responses # --- Main Execution Block (for running script directly) --- if __name__ == "__main__": pytest_args = ["-v", "-s", __file__] # Example: Run only proxy test # pytest_args.append("-k test_deep_crawl_with_proxies") print(f"Running pytest with args: {pytest_args}") exit_code = pytest.main(pytest_args) print(f"Pytest finished with exit code: {exit_code}")
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/docker/test_serialization.py
tests/docker/test_serialization.py
import inspect from typing import Any, Dict from enum import Enum from crawl4ai import LLMConfig def to_serializable_dict(obj: Any) -> Dict: """ Recursively convert an object to a serializable dictionary using {type, params} structure for complex objects. """ if obj is None: return None # Handle basic types if isinstance(obj, (str, int, float, bool)): return obj # Handle Enum if isinstance(obj, Enum): return { "type": obj.__class__.__name__, "params": obj.value } # Handle datetime objects if hasattr(obj, 'isoformat'): return obj.isoformat() # Handle lists, tuples, and sets if isinstance(obj, (list, tuple, set)): return [to_serializable_dict(item) for item in obj] # Handle dictionaries - preserve them as-is if isinstance(obj, dict): return { "type": "dict", # Mark as plain dictionary "value": {str(k): to_serializable_dict(v) for k, v in obj.items()} } # Handle class instances if hasattr(obj, '__class__'): # Get constructor signature sig = inspect.signature(obj.__class__.__init__) params = sig.parameters # Get current values current_values = {} for name, param in params.items(): if name == 'self': continue value = getattr(obj, name, param.default) # Only include if different from default, considering empty values if not (is_empty_value(value) and is_empty_value(param.default)): if value != param.default: current_values[name] = to_serializable_dict(value) return { "type": obj.__class__.__name__, "params": current_values } return str(obj) def from_serializable_dict(data: Any) -> Any: """ Recursively convert a serializable dictionary back to an object instance. """ if data is None: return None # Handle basic types if isinstance(data, (str, int, float, bool)): return data # Handle typed data if isinstance(data, dict) and "type" in data: # Handle plain dictionaries if data["type"] == "dict": return {k: from_serializable_dict(v) for k, v in data["value"].items()} # Import from crawl4ai for class instances import crawl4ai cls = getattr(crawl4ai, data["type"]) # Handle Enum if issubclass(cls, Enum): return cls(data["params"]) # Handle class instances constructor_args = { k: from_serializable_dict(v) for k, v in data["params"].items() } return cls(**constructor_args) # Handle lists if isinstance(data, list): return [from_serializable_dict(item) for item in data] # Handle raw dictionaries (legacy support) if isinstance(data, dict): return {k: from_serializable_dict(v) for k, v in data.items()} return data def is_empty_value(value: Any) -> bool: """Check if a value is effectively empty/null.""" if value is None: return True if isinstance(value, (list, tuple, set, dict, str)) and len(value) == 0: return True return False # if __name__ == "__main__": # from crawl4ai import ( # CrawlerRunConfig, CacheMode, DefaultMarkdownGenerator, # PruningContentFilter, BM25ContentFilter, LLMContentFilter, # JsonCssExtractionStrategy, CosineStrategy, RegexChunking, # WebScrapingStrategy, LXMLWebScrapingStrategy # ) # # Test Case 1: BM25 content filtering through markdown generator # config1 = CrawlerRunConfig( # cache_mode=CacheMode.BYPASS, # markdown_generator=DefaultMarkdownGenerator( # content_filter=BM25ContentFilter( # user_query="technology articles", # bm25_threshold=1.2, # language="english" # ) # ), # chunking_strategy=RegexChunking(patterns=[r"\n\n", r"\.\s+"]), # excluded_tags=["nav", "footer", "aside"], # remove_overlay_elements=True # ) # # Serialize # serialized = to_serializable_dict(config1) # print("\nSerialized Config:") # print(serialized) # # Example output structure would now look like: # """ # { # "type": "CrawlerRunConfig", # "params": { # "cache_mode": { # "type": "CacheMode", # "params": "bypass" # }, # "markdown_generator": { # "type": "DefaultMarkdownGenerator", # "params": { # "content_filter": { # "type": "BM25ContentFilter", # "params": { # "user_query": "technology articles", # "bm25_threshold": 1.2, # "language": "english" # } # } # } # } # } # } # """ # # Deserialize # deserialized = from_serializable_dict(serialized) # print("\nDeserialized Config:") # print(to_serializable_dict(deserialized)) # # Verify they match # assert to_serializable_dict(config1) == to_serializable_dict(deserialized) # print("\nVerification passed: Configuration matches after serialization/deserialization!") if __name__ == "__main__": from crawl4ai import ( CrawlerRunConfig, CacheMode, DefaultMarkdownGenerator, PruningContentFilter, BM25ContentFilter, LLMContentFilter, JsonCssExtractionStrategy, RegexChunking, WebScrapingStrategy, LXMLWebScrapingStrategy ) # Test Case 1: BM25 content filtering through markdown generator config1 = CrawlerRunConfig( cache_mode=CacheMode.BYPASS, markdown_generator=DefaultMarkdownGenerator( content_filter=BM25ContentFilter( user_query="technology articles", bm25_threshold=1.2, language="english" ) ), chunking_strategy=RegexChunking(patterns=[r"\n\n", r"\.\s+"]), excluded_tags=["nav", "footer", "aside"], remove_overlay_elements=True ) # Test Case 2: LLM-based extraction with pruning filter schema = { "baseSelector": "article.post", "fields": [ {"name": "title", "selector": "h1", "type": "text"}, {"name": "content", "selector": ".content", "type": "html"} ] } config2 = CrawlerRunConfig( extraction_strategy=JsonCssExtractionStrategy(schema=schema), markdown_generator=DefaultMarkdownGenerator( content_filter=PruningContentFilter( threshold=0.48, threshold_type="fixed", min_word_threshold=0 ), options={"ignore_links": True} ), scraping_strategy=LXMLWebScrapingStrategy() ) # Test Case 3:LLM content filter config3 = CrawlerRunConfig( markdown_generator=DefaultMarkdownGenerator( content_filter=LLMContentFilter( llm_config = LLMConfig(provider="openai/gpt-4"), instruction="Extract key technical concepts", chunk_token_threshold=2000, overlap_rate=0.1 ), options={"ignore_images": True} ), scraping_strategy=WebScrapingStrategy() ) # Test all configurations test_configs = [config1, config2, config3] for i, config in enumerate(test_configs, 1): print(f"\nTesting Configuration {i}:") # Serialize serialized = to_serializable_dict(config) print(f"\nSerialized Config {i}:") print(serialized) # Deserialize deserialized = from_serializable_dict(serialized) print(f"\nDeserialized Config {i}:") print(to_serializable_dict(deserialized)) # Convert back to dict for comparison # Verify they match assert to_serializable_dict(config) == to_serializable_dict(deserialized) print(f"\nVerification passed: Configuration {i} matches after serialization/deserialization!")
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/async_assistant/test_extract_pipeline_v2.py
tests/async_assistant/test_extract_pipeline_v2.py
""" Test implementation v2: Combined classification and preparation in one LLM call. More efficient approach that reduces token usage and LLM calls. """ import asyncio import json import os from typing import List, Dict, Any, Optional, Union from lxml import html as lxml_html import re from crawl4ai import AsyncWebCrawler, CrawlerRunConfig from crawl4ai.async_configs import LLMConfig from crawl4ai import JsonCssExtractionStrategy, LLMExtractionStrategy from crawl4ai.utils import perform_completion_with_backoff async def extract_pipeline_v2( base_url: str, urls: Union[str, List[str], None], query: str, target_json_example: Optional[str] = None, force_llm: bool = False, verbose: bool = True ) -> Union[Dict, List[Dict]]: """ Improved extraction pipeline with combined classification and preparation. Pipeline: 1. Quick crawl & HTML skimming 2. Combined LLM call for classification + preparation 3. Execute appropriate extraction strategy """ # Normalize URLs if urls is None: urls = base_url target_urls = [urls] if isinstance(urls, str) else urls single_result = isinstance(urls, str) or urls is None # LLM configs llm_small = LLMConfig( provider="openai/gpt-4o-mini", api_token=os.getenv("OPENAI_API_KEY") ) llm_small.temperature = 0.3 llm_strong = LLMConfig( provider="openai/gpt-4o", api_token=os.getenv("OPENAI_API_KEY") ) llm_strong.temperature = 0.5 def vprint(msg: str): if verbose: print(f"🔍 {msg}") vprint(f"Query: '{query}'") if target_json_example: vprint(f"Target format provided: {target_json_example[:100]}...") # Step 1: Quick crawl for analysis async with AsyncWebCrawler(verbose=False) as crawler: vprint(f"Quick crawl: {base_url}") quick_result = await crawler.arun( url=base_url, config=CrawlerRunConfig( cache_mode="bypass", delay_before_return_html=2.0 ) ) if not quick_result.success: raise Exception(f"Failed to crawl {base_url}") # HTML Skimming def skim_html(html: str) -> str: """Remove non-structural elements using lxml.""" parser = lxml_html.HTMLParser(remove_comments=True) tree = lxml_html.fromstring(html, parser=parser) # Remove head section entirely for head in tree.xpath('//head'): head.getparent().remove(head) # Remove non-structural elements including SVGs for element in tree.xpath('//script | //style | //noscript | //meta | //link | //svg'): parent = element.getparent() if parent is not None: parent.remove(element) # Remove base64 images for img in tree.xpath('//img[@src]'): src = img.get('src', '') if 'base64' in src: img.set('src', 'BASE64_IMAGE') # Remove long class/id attributes for element in tree.xpath('//*[@class or @id]'): if element.get('class') and len(element.get('class')) > 100: element.set('class', 'LONG_CLASS') if element.get('id') and len(element.get('id')) > 50: element.set('id', 'LONG_ID') # Truncate text nodes for text_node in tree.xpath('//text()'): if text_node.strip() and len(text_node) > 100: parent = text_node.getparent() if parent is not None: new_text = text_node[:50] + "..." + text_node[-20:] if text_node.is_text: parent.text = new_text elif text_node.is_tail: parent.tail = new_text return lxml_html.tostring(tree, encoding='unicode') skimmed_html = skim_html(quick_result.html) vprint(f"Skimmed HTML from {len(quick_result.html)} to {len(skimmed_html)} chars") # Step 2: Combined classification and preparation if force_llm: classification_data = {"classification": "semantic"} vprint("Forced LLM extraction") else: combined_prompt = f""" Analyze this HTML and prepare for data extraction. User query: "{query}" """ if target_json_example: combined_prompt += f""" Target format: {target_json_example} """ combined_prompt += f""" HTML: <<<<HTML>>>> {skimmed_html} <<<<END HTML>>>> STEP 1: Determine extraction strategy - If data follows repeating HTML patterns (lists, tables, cards) → "structural" - If data requires understanding/inference → "semantic" STEP 2A: If STRUCTURAL extraction is appropriate: - Find the CSS selector for the BASE ELEMENT (repeating pattern) - Base element = container holding ONE data item (e.g., product card, table row) - Selector should select ALL instances, not too specific, not too general - Count approximate number of these elements """ if not target_json_example: combined_prompt += """ - Suggest what JSON structure can be extracted from one element """ combined_prompt += """ STEP 2B: If SEMANTIC extraction is needed: - Write a detailed instruction for what to extract - Be specific about the data needed """ if not target_json_example: combined_prompt += """ - Suggest expected JSON output structure """ combined_prompt += """ Return JSON with ONLY the relevant fields based on classification: { "classification": "structural" or "semantic", "confidence": 0.0-1.0, "reasoning": "brief explanation", // Include ONLY if classification is "structural": "base_selector": "css selector", "element_count": approximate number, // Include ONLY if classification is "semantic": "extraction_instruction": "detailed instruction", // Include if no target_json_example was provided: "suggested_json_example": { ... } } """ response = perform_completion_with_backoff( provider=llm_small.provider, prompt_with_variables=combined_prompt, api_token=llm_small.api_token, json_response=True, temperature=llm_small.temperature ) classification_data = json.loads(response.choices[0].message.content) vprint(f"Classification: {classification_data['classification']} (confidence: {classification_data['confidence']})") vprint(f"Reasoning: {classification_data['reasoning']}") # Use suggested JSON example if needed if not target_json_example and 'suggested_json_example' in classification_data: target_json_example = json.dumps(classification_data['suggested_json_example']) vprint(f"Using suggested example: {target_json_example}") # Step 3: Execute extraction based on classification if classification_data['classification'] == 'structural': vprint(f"Base selector: {classification_data['base_selector']}") vprint(f"Found ~{classification_data['element_count']} elements") # Get sample HTML for schema generation tree = lxml_html.fromstring(quick_result.html) parent_elements = tree.cssselect(classification_data['base_selector']) if not parent_elements: vprint("Base selector not found, falling back to semantic") classification_data['classification'] = 'semantic' else: # Use first element as sample sample_html = lxml_html.tostring(parent_elements[0], encoding='unicode') vprint(f"Generating schema from sample ({len(sample_html)} chars)") # Generate schema schema_params = { "html": sample_html, "query": query, "llm_config": llm_strong } if target_json_example: schema_params["target_json_example"] = target_json_example schema = JsonCssExtractionStrategy.generate_schema(**schema_params) vprint(f"Generated schema with {len(schema.get('fields', []))} fields") # Extract from all URLs extraction_strategy = JsonCssExtractionStrategy(schema) results = [] for idx, url in enumerate(target_urls): vprint(f"Extracting from: {url}") # Use already crawled HTML for base_url, crawl others if idx == 0 and url == base_url: # We already have this HTML, use raw:// to avoid re-crawling raw_url = f"raw://{quick_result.html}" vprint("Using cached HTML with raw:// scheme") else: # Need to crawl this URL raw_url = url result = await crawler.arun( url=raw_url, config=CrawlerRunConfig( extraction_strategy=extraction_strategy, cache_mode="bypass" ) ) if result.success and result.extracted_content: data = json.loads(result.extracted_content) results.append({ 'url': url, # Keep original URL for reference 'data': data, 'count': len(data) if isinstance(data, list) else 1, 'method': 'JsonCssExtraction', 'schema': schema }) return results[0] if single_result else results # Semantic extraction if classification_data['classification'] == 'semantic': vprint("Using LLM extraction") # Use generated instruction or create simple one if 'extraction_instruction' in classification_data: instruction = classification_data['extraction_instruction'] vprint(f"Generated instruction: {instruction[:100]}...") else: instruction = f"{query}\n\nReturn structured JSON data." extraction_strategy = LLMExtractionStrategy( llm_config=llm_strong, instruction=instruction ) results = [] for idx, url in enumerate(target_urls): vprint(f"LLM extracting from: {url}") # Use already crawled HTML for base_url, crawl others if idx == 0 and url == base_url: # We already have this HTML, use raw:// to avoid re-crawling raw_url = f"raw://{quick_result.html}" vprint("Using cached HTML with raw:// scheme") else: # Need to crawl this URL raw_url = url result = await crawler.arun( url=raw_url, config=CrawlerRunConfig( extraction_strategy=extraction_strategy, cache_mode="bypass" ) ) if result.success and result.extracted_content: data = json.loads(result.extracted_content) results.append({ 'url': url, # Keep original URL for reference 'data': data, 'count': len(data) if isinstance(data, list) else 1, 'method': 'LLMExtraction' }) return results[0] if single_result else results async def main(): """Test the improved extraction pipeline.""" print("\n🚀 CRAWL4AI EXTRACTION PIPELINE V2 TEST") print("="*50) try: # Test 1: Structural extraction (GitHub issues) print("\nTest 1: GitHub Issues (should use structural)") result = await extract_pipeline_v2( base_url="https://github.com/unclecode/crawl4ai/issues", urls=None, query="Extract all issue titles, numbers, and authors", verbose=True ) print(f"\n✅ Extracted {result.get('count', 0)} items using {result.get('method')}") if result.get('data'): print("Sample:", json.dumps(result['data'][:2] if isinstance(result['data'], list) else result['data'], indent=2)) # Test 2: With target JSON example print("\n\nTest 2: With target JSON example") target_example = json.dumps({ "title": "Issue title here", "number": "#123", "author": "username" }) result2 = await extract_pipeline_v2( base_url="https://github.com/unclecode/crawl4ai/issues", urls=None, query="Extract GitHub issues", target_json_example=target_example, verbose=True ) print(f"\n✅ Extracted {result2.get('count', 0)} items") # Test 3: Semantic extraction (force LLM) print("\n\nTest 3: Force semantic extraction") result3 = await extract_pipeline_v2( base_url="https://en.wikipedia.org/wiki/Artificial_intelligence", urls=None, query="Extract key concepts and their relationships in AI field", force_llm=True, verbose=True ) print(f"\n✅ Extracted using {result3.get('method')}") except Exception as e: print(f"\n❌ Error: {e}") import traceback traceback.print_exc() if __name__ == "__main__": if not os.getenv("OPENAI_API_KEY"): print("⚠️ Error: OPENAI_API_KEY environment variable not set") exit(1) asyncio.run(main())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/async_assistant/test_extract_pipeline.py
tests/async_assistant/test_extract_pipeline.py
""" Test implementation of AI Assistant extract pipeline using only Crawl4AI capabilities. This follows the exact flow discussed: query enhancement, classification, HTML skimming, parent extraction, schema generation, and extraction. """ import asyncio import json import os from typing import List, Dict, Any, Optional, Union from lxml import html as lxml_html import re from crawl4ai import AsyncWebCrawler, CrawlerRunConfig from crawl4ai.async_configs import LLMConfig from crawl4ai import JsonCssExtractionStrategy, LLMExtractionStrategy from crawl4ai.utils import perform_completion_with_backoff async def extract_pipeline( base_url: str, urls: Union[str, List[str], None], query: str, target_json_example: Optional[str] = None, force_llm: bool = False, verbose: bool = True ) -> Union[Dict, List[Dict]]: """ Full implementation of the AI-powered extraction pipeline using only Crawl4AI. Pipeline: 1. Quick crawl & HTML skimming 2. Classification (structural vs semantic) using LLM 3. Parent element extraction using LLM (for structural) 4. Schema generation using Crawl4AI's generate_schema 5. Extraction execution using Crawl4AI strategies """ # Normalize URLs if urls is None: urls = base_url target_urls = [urls] if isinstance(urls, str) else urls single_result = isinstance(urls, str) or urls is None # LLM configs for different tasks llm_small = LLMConfig( provider="openai/gpt-4o-mini", api_token=os.getenv("OPENAI_API_KEY") ) llm_small.temperature = 0.3 llm_strong = LLMConfig( provider="openai/gpt-4o", api_token=os.getenv("OPENAI_API_KEY") ) llm_strong.temperature = 0.5 def vprint(msg: str): if verbose: print(f"🔍 {msg}") # Step 1: Starting vprint(f"Query: '{query}'") # Step 2: Quick crawl for analysis async with AsyncWebCrawler(verbose=False) as crawler: vprint(f"Quick crawl: {base_url}") quick_result = await crawler.arun( url=base_url, config=CrawlerRunConfig( cache_mode="bypass", delay_before_return_html=2.0 ) ) if not quick_result.success: raise Exception(f"Failed to crawl {base_url}") # Step 3: HTML Skimming using lxml def skim_html(html: str) -> str: """Remove non-structural elements using lxml.""" parser = lxml_html.HTMLParser(remove_comments=True) tree = lxml_html.fromstring(html, parser=parser) # Remove head section entirely for head in tree.xpath('//head'): head.getparent().remove(head) # Remove non-structural elements including SVGs for element in tree.xpath('//script | //style | //noscript | //meta | //link | //svg'): parent = element.getparent() if parent is not None: parent.remove(element) # Remove base64 images for img in tree.xpath('//img[@src]'): src = img.get('src', '') if 'base64' in src: img.set('src', 'BASE64_IMAGE') # Remove long class/id attributes for element in tree.xpath('//*[@class or @id]'): if element.get('class') and len(element.get('class')) > 100: element.set('class', 'LONG_CLASS') if element.get('id') and len(element.get('id')) > 50: element.set('id', 'LONG_ID') # Truncate text nodes for text_node in tree.xpath('//text()'): if text_node.strip() and len(text_node) > 100: parent = text_node.getparent() if parent is not None: new_text = text_node[:50] + "..." + text_node[-20:] if text_node.is_text: parent.text = new_text elif text_node.is_tail: parent.tail = new_text return lxml_html.tostring(tree, encoding='unicode') skimmed_html = skim_html(quick_result.html) vprint(f"Skimmed HTML from {len(quick_result.html)} to {len(skimmed_html)} chars") # Step 4: Classification using LLM classification = 'semantic' # Default if not force_llm: classification_prompt = f""" Analyze this HTML to determine extraction strategy. Query: "{query}" HTML sample: <<<<HTML>>> {skimmed_html} <<<<END HTML>>> Determine if this can be extracted using CSS/XPath patterns (structural) or requires semantic understanding (semantic). Look for: - Repeating patterns (lists, cards, tables) → structural - Consistent HTML structure → structural - Need for inference or understanding → semantic Return JSON: {{ "strategy": "structural" or "semantic", "confidence": 0.0-1.0, "reasoning": "..." }} """ response = perform_completion_with_backoff( provider=llm_small.provider, prompt_with_variables=classification_prompt, api_token=llm_small.api_token, json_response=True, temperature=llm_small.temperature ) classification_result = json.loads(response.choices[0].message.content) classification = classification_result['strategy'] vprint(f"Classification: {classification} (confidence: {classification_result['confidence']})") vprint(f"Reasoning: {classification_result['reasoning']}") if force_llm: classification = 'semantic' vprint("Forced LLM extraction") # Step 5 & 6: Execute appropriate extraction strategy if classification == 'structural': # Extract parent element using LLM with proper explanation parent_prompt = f""" Identify the CSS selector for the BASE ELEMENT TEMPLATE containing the data to extract. IMPORTANT: The base element template is a repeating pattern in the HTML where each instance contains one item of data (like a product card, article card, issue card, etc.). The selector should: - Not be too specific (avoid selecting just one item) - Not be too general (avoid selecting unrelated elements) - Select ALL instances of the repeating pattern - Point to the container that holds ONE complete data item For example: - On Amazon: div.s-result-item (each product card) - On GitHub issues: div[id^="issue_"] (each issue card) - On a blog: article.post-card (each article) User query: "{query}" """ if target_json_example: parent_prompt += f""" The user expects to extract data in this format: {target_json_example} Find the base element that contains all these fields. """ else: parent_prompt += """ Also provide a JSON example of what data can be extracted from one instance of this base element. """ parent_prompt += f""" HTML (first 8000 chars): <<<<HTML>>> {skimmed_html} <<<<END HTML>>> Return JSON: {{ "parent_selector": "css_selector_here", "explanation": "why this selector is appropriate",""" if not target_json_example: parent_prompt += """ "suggested_json_example": { "field1": "example value", "field2": "example value" }""" parent_prompt += """ }} """ response = perform_completion_with_backoff( provider=llm_small.provider, prompt_with_variables=parent_prompt, api_token=llm_small.api_token, json_response=True, temperature=llm_small.temperature ) parent_data = json.loads(response.choices[0].message.content) parent_selector = parent_data['parent_selector'] vprint(f"Parent selector: {parent_selector}") vprint(f"Explanation: {parent_data['explanation']}") # Use suggested JSON example if no target provided if not target_json_example and 'suggested_json_example' in parent_data: target_json_example = json.dumps(parent_data['suggested_json_example']) vprint(f"Using LLM suggested example: {target_json_example}") # Get the actual parent HTML for schema generation tree = lxml_html.fromstring(quick_result.html) parent_elements = tree.cssselect(parent_selector) if not parent_elements: vprint("Parent selector not found, falling back to semantic") classification = 'semantic' else: # Use the first instance as sample sample_html = lxml_html.tostring(parent_elements[0], encoding='unicode') vprint(f"Generating schema from sample HTML ({len(sample_html)} chars)") # Generate schema using Crawl4AI schema_params = { "html": sample_html, "query": query, "llm_config": llm_strong } if target_json_example: schema_params["target_json_example"] = target_json_example schema = JsonCssExtractionStrategy.generate_schema(**schema_params) vprint(f"Generated schema with {len(schema.get('fields', []))} fields") # Extract from all URLs extraction_strategy = JsonCssExtractionStrategy(schema) results = [] for url in target_urls: vprint(f"Extracting from: {url}") result = await crawler.arun( url=url, config=CrawlerRunConfig( extraction_strategy=extraction_strategy, cache_mode="bypass" ) ) if result.success and result.extracted_content: data = json.loads(result.extracted_content) results.append({ 'url': url, 'data': data, 'count': len(data) if isinstance(data, list) else 1, 'method': 'JsonCssExtraction', 'schema': schema }) return results[0] if single_result else results # Semantic extraction (LLM) if classification == 'semantic': vprint("Using LLM extraction") # Build instruction from query instruction = f""" {query} Return structured JSON data. """ extraction_strategy = LLMExtractionStrategy( llm_config=llm_strong, instruction=instruction ) results = [] for url in target_urls: vprint(f"LLM extracting from: {url}") result = await crawler.arun( url=url, config=CrawlerRunConfig( extraction_strategy=extraction_strategy, cache_mode="bypass" ) ) if result.success and result.extracted_content: data = json.loads(result.extracted_content) results.append({ 'url': url, 'data': data, 'count': len(data) if isinstance(data, list) else 1, 'method': 'LLMExtraction' }) return results[0] if single_result else results async def main(): """Test the extraction pipeline.""" print("\n🚀 CRAWL4AI EXTRACTION PIPELINE TEST") print("="*50) # Test structural extraction try: result = await extract_pipeline( base_url="https://github.com/unclecode/crawl4ai/issues", urls=None, query="I want to extract all issue titles, numbers, and who opened them", verbose=True ) print(f"\n✅ Success! Extracted {result.get('count', 0)} items") print(f"Method used: {result.get('method')}") if result.get('data'): print("\nFirst few items:") data = result['data'] items_to_show = data[:3] if isinstance(data, list) else data print(json.dumps(items_to_show, indent=2)) if result.get('schema'): print(f"\nGenerated schema fields: {[f['name'] for f in result['schema'].get('fields', [])]}") except Exception as e: print(f"\n❌ Error: {e}") import traceback traceback.print_exc() if __name__ == "__main__": # Check for API key if not os.getenv("OPENAI_API_KEY"): print("⚠️ Error: OPENAI_API_KEY environment variable not set") exit(1) asyncio.run(main())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/adaptive/test_embedding_strategy.py
tests/adaptive/test_embedding_strategy.py
""" Test and demo script for Embedding-based Adaptive Crawler This script demonstrates the embedding-based adaptive crawling with semantic space coverage and gap-driven expansion. """ import asyncio import os from pathlib import Path import time from rich.console import Console from rich import print as rprint import sys # Add parent directory to path for imports sys.path.append(str(Path(__file__).parent.parent.parent)) from crawl4ai import ( AsyncWebCrawler, AdaptiveCrawler, AdaptiveConfig, CrawlState ) console = Console() async def test_basic_embedding_crawl(): """Test basic embedding-based adaptive crawling""" console.print("\n[bold yellow]Test 1: Basic Embedding-based Crawl[/bold yellow]") console.print("Testing semantic space coverage with query expansion") # Configure with embedding strategy config = AdaptiveConfig( strategy="embedding", confidence_threshold=0.7, # Not used for stopping in embedding strategy min_gain_threshold=0.01, max_pages=15, top_k_links=3, n_query_variations=8, embedding_model="sentence-transformers/all-MiniLM-L6-v2" # Fast, good quality ) # For query expansion, we need an LLM config llm_config = { 'provider': 'openai/gpt-4o-mini', 'api_token': os.getenv('OPENAI_API_KEY') } if not llm_config['api_token']: console.print("[red]Warning: OPENAI_API_KEY not set. Using mock data for demo.[/red]") # Continue with mock for demo purposes config.embedding_llm_config = llm_config # Create crawler async with AsyncWebCrawler() as crawler: prog_crawler = AdaptiveCrawler( crawler=crawler, config=config ) # Start adaptive crawl start_time = time.time() console.print("\n[cyan]Starting semantic adaptive crawl...[/cyan]") state = await prog_crawler.digest( start_url="https://docs.python.org/3/library/asyncio.html", query="async await coroutines event loops" ) elapsed = time.time() - start_time # Print results console.print(f"\n[green]Crawl completed in {elapsed:.2f} seconds[/green]") prog_crawler.print_stats(detailed=False) # Show semantic coverage details console.print("\n[bold cyan]Semantic Coverage Details:[/bold cyan]") if state.expanded_queries: console.print(f"Query expanded to {len(state.expanded_queries)} variations") console.print("Sample variations:") for i, q in enumerate(state.expanded_queries[:3], 1): console.print(f" {i}. {q}") if state.semantic_gaps: console.print(f"\nSemantic gaps identified: {len(state.semantic_gaps)}") console.print(f"\nFinal confidence: {prog_crawler.confidence:.2%}") console.print(f"Is Sufficient: {'Yes (Validated)' if prog_crawler.is_sufficient else 'No'}") console.print(f"Pages needed: {len(state.crawled_urls)}") async def test_embedding_vs_statistical(use_openai=False): """Compare embedding strategy with statistical strategy""" console.print("\n[bold yellow]Test 2: Embedding vs Statistical Strategy Comparison[/bold yellow]") test_url = "https://httpbin.org" test_query = "http headers authentication api" # Test 1: Statistical strategy console.print("\n[cyan]1. Statistical Strategy:[/cyan]") config_stat = AdaptiveConfig( strategy="statistical", confidence_threshold=0.7, max_pages=10 ) async with AsyncWebCrawler() as crawler: stat_crawler = AdaptiveCrawler(crawler=crawler, config=config_stat) start_time = time.time() state_stat = await stat_crawler.digest(start_url=test_url, query=test_query) stat_time = time.time() - start_time stat_pages = len(state_stat.crawled_urls) stat_confidence = stat_crawler.confidence # Test 2: Embedding strategy console.print("\n[cyan]2. Embedding Strategy:[/cyan]") config_emb = AdaptiveConfig( strategy="embedding", confidence_threshold=0.7, # Not used for stopping max_pages=10, n_query_variations=5, min_gain_threshold=0.01 ) # Use OpenAI if available or requested if use_openai and os.getenv('OPENAI_API_KEY'): config_emb.embedding_llm_config = { 'provider': 'openai/text-embedding-3-small', 'api_token': os.getenv('OPENAI_API_KEY'), 'embedding_model': 'text-embedding-3-small' } console.print("[cyan]Using OpenAI embeddings[/cyan]") else: # Default config will try sentence-transformers config_emb.embedding_llm_config = { 'provider': 'openai/gpt-4o-mini', 'api_token': os.getenv('OPENAI_API_KEY', 'dummy-key') } async with AsyncWebCrawler() as crawler: emb_crawler = AdaptiveCrawler(crawler=crawler, config=config_emb) start_time = time.time() state_emb = await emb_crawler.digest(start_url=test_url, query=test_query) emb_time = time.time() - start_time emb_pages = len(state_emb.crawled_urls) emb_confidence = emb_crawler.confidence # Compare results console.print("\n[bold green]Comparison Results:[/bold green]") console.print(f"Statistical: {stat_pages} pages in {stat_time:.2f}s, confidence: {stat_confidence:.2%}, sufficient: {stat_crawler.is_sufficient}") console.print(f"Embedding: {emb_pages} pages in {emb_time:.2f}s, confidence: {emb_confidence:.2%}, sufficient: {emb_crawler.is_sufficient}") if emb_pages < stat_pages: efficiency = ((stat_pages - emb_pages) / stat_pages) * 100 console.print(f"\n[green]Embedding strategy used {efficiency:.0f}% fewer pages![/green]") # Show validation info for embedding if hasattr(state_emb, 'metrics') and 'validation_confidence' in state_emb.metrics: console.print(f"Embedding validation score: {state_emb.metrics['validation_confidence']:.2%}") async def test_custom_embedding_provider(): """Test with different embedding providers""" console.print("\n[bold yellow]Test 3: Custom Embedding Provider[/bold yellow]") # Example with OpenAI embeddings config = AdaptiveConfig( strategy="embedding", confidence_threshold=0.8, # Not used for stopping max_pages=10, min_gain_threshold=0.01, n_query_variations=5 ) # Configure to use OpenAI embeddings instead of sentence-transformers config.embedding_llm_config = { 'provider': 'openai/text-embedding-3-small', 'api_token': os.getenv('OPENAI_API_KEY'), 'embedding_model': 'text-embedding-3-small' } if not config.embedding_llm_config['api_token']: console.print("[yellow]Skipping OpenAI embedding test - no API key[/yellow]") return async with AsyncWebCrawler() as crawler: prog_crawler = AdaptiveCrawler(crawler=crawler, config=config) console.print("Using OpenAI embeddings for semantic analysis...") state = await prog_crawler.digest( start_url="https://httpbin.org", query="api endpoints json response" ) prog_crawler.print_stats(detailed=False) async def test_knowledge_export_import(): """Test exporting and importing semantic knowledge bases""" console.print("\n[bold yellow]Test 4: Semantic Knowledge Base Export/Import[/bold yellow]") config = AdaptiveConfig( strategy="embedding", confidence_threshold=0.7, # Not used for stopping max_pages=5, min_gain_threshold=0.01, n_query_variations=4 ) # First crawl async with AsyncWebCrawler() as crawler: crawler1 = AdaptiveCrawler(crawler=crawler, config=config) console.print("\n[cyan]Building initial knowledge base...[/cyan]") state1 = await crawler1.digest( start_url="https://httpbin.org", query="http methods headers" ) # Export export_path = "semantic_kb.jsonl" crawler1.export_knowledge_base(export_path) console.print(f"[green]Exported {len(state1.knowledge_base)} documents with embeddings[/green]") # Import and continue async with AsyncWebCrawler() as crawler: crawler2 = AdaptiveCrawler(crawler=crawler, config=config) console.print("\n[cyan]Importing knowledge base...[/cyan]") crawler2.import_knowledge_base(export_path) # Continue with new query - should be faster console.print("\n[cyan]Extending with new query...[/cyan]") state2 = await crawler2.digest( start_url="https://httpbin.org", query="authentication oauth tokens" ) console.print(f"[green]Total knowledge base: {len(state2.knowledge_base)} documents[/green]") # Cleanup Path(export_path).unlink(missing_ok=True) async def test_gap_visualization(): """Visualize semantic gaps and coverage""" console.print("\n[bold yellow]Test 5: Semantic Gap Analysis[/bold yellow]") config = AdaptiveConfig( strategy="embedding", confidence_threshold=0.9, # Not used for stopping max_pages=8, n_query_variations=6, min_gain_threshold=0.01 ) async with AsyncWebCrawler() as crawler: prog_crawler = AdaptiveCrawler(crawler=crawler, config=config) # Initial crawl state = await prog_crawler.digest( start_url="https://docs.python.org/3/library/", query="concurrency threading multiprocessing" ) # Analyze gaps console.print("\n[bold cyan]Semantic Gap Analysis:[/bold cyan]") console.print(f"Query variations: {len(state.expanded_queries)}") console.print(f"Knowledge documents: {len(state.knowledge_base)}") console.print(f"Identified gaps: {len(state.semantic_gaps)}") if state.semantic_gaps: console.print("\n[yellow]Gap sizes (distance from coverage):[/yellow]") for i, (_, distance) in enumerate(state.semantic_gaps[:5], 1): console.print(f" Gap {i}: {distance:.3f}") # Show crawl progression console.print("\n[cyan]Crawl Order (gap-driven selection):[/cyan]") for i, url in enumerate(state.crawl_order[:5], 1): console.print(f" {i}. {url}") async def test_fast_convergence_with_relevant_query(): """Test that both strategies reach high confidence quickly with relevant queries""" console.print("\n[bold yellow]Test 7: Fast Convergence with Relevant Query[/bold yellow]") console.print("Testing that strategies reach 80%+ confidence within 2-3 batches") # Test scenarios test_cases = [ { "name": "Python Async Documentation", "url": "https://docs.python.org/3/library/asyncio.html", "query": "async await coroutines event loops tasks" } ] for test_case in test_cases: console.print(f"\n[bold cyan]Testing: {test_case['name']}[/bold cyan]") console.print(f"URL: {test_case['url']}") console.print(f"Query: {test_case['query']}") # Test Embedding Strategy console.print("\n[yellow]Embedding Strategy:[/yellow]") config_emb = AdaptiveConfig( strategy="embedding", confidence_threshold=0.8, max_pages=9, top_k_links=3, min_gain_threshold=0.01, n_query_variations=5 ) # Configure embeddings config_emb.embedding_llm_config = { 'provider': 'openai/gpt-4o-mini', 'api_token': os.getenv('OPENAI_API_KEY'), } async with AsyncWebCrawler() as crawler: emb_crawler = AdaptiveCrawler(crawler=crawler, config=config_emb) start_time = time.time() state = await emb_crawler.digest( start_url=test_case['url'], query=test_case['query'] ) # Get batch breakdown total_pages = len(state.crawled_urls) for i in range(0, total_pages, 3): batch_num = (i // 3) + 1 batch_pages = min(3, total_pages - i) pages_so_far = i + batch_pages estimated_confidence = state.metrics.get('confidence', 0) * (pages_so_far / total_pages) console.print(f"Batch {batch_num}: {batch_pages} pages → Confidence: {estimated_confidence:.1%} {'✅' if estimated_confidence >= 0.8 else '❌'}") final_confidence = emb_crawler.confidence console.print(f"[green]Final: {total_pages} pages → Confidence: {final_confidence:.1%} {'✅ (Sufficient!)' if emb_crawler.is_sufficient else '❌'}[/green]") # Show learning metrics for embedding if 'avg_min_distance' in state.metrics: console.print(f"[dim]Avg gap distance: {state.metrics['avg_min_distance']:.3f}[/dim]") if 'validation_confidence' in state.metrics: console.print(f"[dim]Validation score: {state.metrics['validation_confidence']:.1%}[/dim]") # Test Statistical Strategy console.print("\n[yellow]Statistical Strategy:[/yellow]") config_stat = AdaptiveConfig( strategy="statistical", confidence_threshold=0.8, max_pages=9, top_k_links=3, min_gain_threshold=0.01 ) async with AsyncWebCrawler() as crawler: stat_crawler = AdaptiveCrawler(crawler=crawler, config=config_stat) # Track batch progress batch_results = [] current_pages = 0 # Custom batch tracking start_time = time.time() state = await stat_crawler.digest( start_url=test_case['url'], query=test_case['query'] ) # Get batch breakdown (every 3 pages) total_pages = len(state.crawled_urls) for i in range(0, total_pages, 3): batch_num = (i // 3) + 1 batch_pages = min(3, total_pages - i) # Estimate confidence at this point (simplified) pages_so_far = i + batch_pages estimated_confidence = state.metrics.get('confidence', 0) * (pages_so_far / total_pages) console.print(f"Batch {batch_num}: {batch_pages} pages → Confidence: {estimated_confidence:.1%} {'✅' if estimated_confidence >= 0.8 else '❌'}") final_confidence = stat_crawler.confidence console.print(f"[green]Final: {total_pages} pages → Confidence: {final_confidence:.1%} {'✅ (Sufficient!)' if stat_crawler.is_sufficient else '❌'}[/green]") async def test_irrelevant_query_behavior(): """Test how embedding strategy handles completely irrelevant queries""" console.print("\n[bold yellow]Test 8: Irrelevant Query Behavior[/bold yellow]") console.print("Testing embedding strategy with a query that has no semantic relevance to the content") # Test with irrelevant query on Python async documentation test_case = { "name": "Irrelevant Query on Python Docs", "url": "https://docs.python.org/3/library/asyncio.html", "query": "how to cook fried rice with vegetables" } console.print(f"\n[bold cyan]Testing: {test_case['name']}[/bold cyan]") console.print(f"URL: {test_case['url']} (Python async documentation)") console.print(f"Query: '{test_case['query']}' (completely irrelevant)") console.print("\n[dim]Expected behavior: Low confidence, high distances, no convergence[/dim]") # Configure embedding strategy config_emb = AdaptiveConfig( strategy="embedding", confidence_threshold=0.8, max_pages=9, top_k_links=3, min_gain_threshold=0.01, n_query_variations=5, embedding_min_relative_improvement=0.05, # Lower threshold to see more iterations embedding_min_confidence_threshold=0.1 # Will stop if confidence < 10% ) # Configure embeddings using the correct format config_emb.embedding_llm_config = { 'provider': 'openai/gpt-4o-mini', 'api_token': os.getenv('OPENAI_API_KEY'), } async with AsyncWebCrawler() as crawler: emb_crawler = AdaptiveCrawler(crawler=crawler, config=config_emb) start_time = time.time() state = await emb_crawler.digest( start_url=test_case['url'], query=test_case['query'] ) elapsed = time.time() - start_time # Analyze results console.print(f"\n[bold]Results after {elapsed:.1f} seconds:[/bold]") # Basic metrics total_pages = len(state.crawled_urls) final_confidence = emb_crawler.confidence console.print(f"\nPages crawled: {total_pages}") console.print(f"Final confidence: {final_confidence:.1%} {'✅' if emb_crawler.is_sufficient else '❌'}") # Distance metrics if 'avg_min_distance' in state.metrics: console.print(f"\n[yellow]Distance Metrics:[/yellow]") console.print(f" Average minimum distance: {state.metrics['avg_min_distance']:.3f}") console.print(f" Close neighbors (<0.3): {state.metrics.get('avg_close_neighbors', 0):.1f}") console.print(f" Very close neighbors (<0.2): {state.metrics.get('avg_very_close_neighbors', 0):.1f}") # Interpret distances avg_dist = state.metrics['avg_min_distance'] if avg_dist > 0.8: console.print(f" [red]→ Very poor match (distance > 0.8)[/red]") elif avg_dist > 0.6: console.print(f" [yellow]→ Poor match (distance > 0.6)[/yellow]") elif avg_dist > 0.4: console.print(f" [blue]→ Moderate match (distance > 0.4)[/blue]") else: console.print(f" [green]→ Good match (distance < 0.4)[/green]") # Show sample expanded queries if state.expanded_queries: console.print(f"\n[yellow]Sample Query Variations Generated:[/yellow]") for i, q in enumerate(state.expanded_queries[:3], 1): console.print(f" {i}. {q}") # Show crawl progression console.print(f"\n[yellow]Crawl Progression:[/yellow]") for i, url in enumerate(state.crawl_order[:5], 1): console.print(f" {i}. {url}") if len(state.crawl_order) > 5: console.print(f" ... and {len(state.crawl_order) - 5} more") # Validation score if 'validation_confidence' in state.metrics: console.print(f"\n[yellow]Validation:[/yellow]") console.print(f" Validation score: {state.metrics['validation_confidence']:.1%}") # Why it stopped if 'stopped_reason' in state.metrics: console.print(f"\n[yellow]Stopping Reason:[/yellow] {state.metrics['stopped_reason']}") if state.metrics.get('is_irrelevant', False): console.print("[red]→ Query and content are completely unrelated![/red]") elif total_pages >= config_emb.max_pages: console.print(f"\n[yellow]Stopping Reason:[/yellow] Reached max pages limit ({config_emb.max_pages})") # Summary console.print(f"\n[bold]Summary:[/bold]") if final_confidence < 0.2: console.print("[red]✗ As expected: Query is completely irrelevant to content[/red]") console.print("[green]✓ The embedding strategy correctly identified no semantic match[/green]") else: console.print(f"[yellow]⚠ Unexpected: Got {final_confidence:.1%} confidence for irrelevant query[/yellow]") console.print("[yellow] This may indicate the query variations are too broad[/yellow]") async def test_high_dimensional_handling(): """Test handling of high-dimensional embedding spaces""" console.print("\n[bold yellow]Test 6: High-Dimensional Embedding Space Handling[/bold yellow]") console.print("Testing how the system handles 384+ dimensional embeddings") config = AdaptiveConfig( strategy="embedding", confidence_threshold=0.8, # Not used for stopping max_pages=5, n_query_variations=8, # Will create 9 points total min_gain_threshold=0.01, embedding_model="sentence-transformers/all-MiniLM-L6-v2" # 384 dimensions ) # Use OpenAI if available, otherwise mock if os.getenv('OPENAI_API_KEY'): config.embedding_llm_config = { 'provider': 'openai/text-embedding-3-small', 'api_token': os.getenv('OPENAI_API_KEY'), 'embedding_model': 'text-embedding-3-small' } else: config.embedding_llm_config = { 'provider': 'openai/gpt-4o-mini', 'api_token': 'mock-key' } async with AsyncWebCrawler() as crawler: prog_crawler = AdaptiveCrawler(crawler=crawler, config=config) console.print("\n[cyan]Testing with high-dimensional embeddings (384D)...[/cyan]") try: state = await prog_crawler.digest( start_url="https://httpbin.org", query="api endpoints json" ) console.print(f"[green]✓ Successfully handled {len(state.expanded_queries)} queries in 384D space[/green]") console.print(f"Coverage shape type: {type(state.coverage_shape)}") if isinstance(state.coverage_shape, dict): console.print(f"Coverage model: centroid + radius") console.print(f" - Center shape: {state.coverage_shape['center'].shape if 'center' in state.coverage_shape else 'N/A'}") console.print(f" - Radius: {state.coverage_shape.get('radius', 'N/A'):.3f}") except Exception as e: console.print(f"[red]Error: {e}[/red]") console.print("[yellow]This demonstrates why alpha shapes don't work in high dimensions[/yellow]") async def main(): """Run all embedding strategy tests""" console.print("[bold magenta]Embedding-based Adaptive Crawler Test Suite[/bold magenta]") console.print("=" * 60) try: # Check if we have required dependencies has_sentence_transformers = True has_numpy = True try: import numpy console.print("[green]✓ NumPy installed[/green]") except ImportError: has_numpy = False console.print("[red]Missing numpy[/red]") # Try to import sentence_transformers but catch numpy compatibility errors try: import sentence_transformers console.print("[green]✓ Sentence-transformers installed[/green]") except (ImportError, RuntimeError, ValueError) as e: has_sentence_transformers = False console.print(f"[yellow]Warning: sentence-transformers not available[/yellow]") console.print("[yellow]Tests will use OpenAI embeddings if available or mock data[/yellow]") # Run tests based on available dependencies if has_numpy: # Check if we should use OpenAI for embeddings use_openai = not has_sentence_transformers and os.getenv('OPENAI_API_KEY') if not has_sentence_transformers and not os.getenv('OPENAI_API_KEY'): console.print("\n[red]Neither sentence-transformers nor OpenAI API key available[/red]") console.print("[yellow]Please set OPENAI_API_KEY or fix sentence-transformers installation[/yellow]") return # Run all tests # await test_basic_embedding_crawl() # await test_embedding_vs_statistical(use_openai=use_openai) # Run the fast convergence test - this is the most important one # await test_fast_convergence_with_relevant_query() # Test with irrelevant query await test_irrelevant_query_behavior() # Only run OpenAI-specific test if we have API key # if os.getenv('OPENAI_API_KEY'): # await test_custom_embedding_provider() # # Skip tests that require sentence-transformers when it's not available # if has_sentence_transformers: # await test_knowledge_export_import() # await test_gap_visualization() # else: # console.print("\n[yellow]Skipping tests that require sentence-transformers due to numpy compatibility issues[/yellow]") # This test should work with mock data # await test_high_dimensional_handling() else: console.print("\n[red]Cannot run tests without NumPy[/red]") return console.print("\n[bold green]✅ All tests completed![/bold green]") except Exception as e: console.print(f"\n[bold red]❌ Test failed: {e}[/bold red]") import traceback traceback.print_exc() if __name__ == "__main__": asyncio.run(main())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/adaptive/test_adaptive_crawler.py
tests/adaptive/test_adaptive_crawler.py
""" Test and demo script for Adaptive Crawler This script demonstrates the progressive crawling functionality with various configurations and use cases. """ import asyncio import json from pathlib import Path import time from typing import Dict, List from rich.console import Console from rich.table import Table from rich.progress import Progress from rich import print as rprint # Add parent directory to path for imports import sys sys.path.append(str(Path(__file__).parent.parent)) from crawl4ai import ( AsyncWebCrawler, AdaptiveCrawler, AdaptiveConfig, CrawlState ) console = Console() def print_relevant_content(crawler: AdaptiveCrawler, top_k: int = 3): """Print most relevant content found""" relevant = crawler.get_relevant_content(top_k=top_k) if not relevant: console.print("[yellow]No relevant content found yet.[/yellow]") return console.print(f"\n[bold cyan]Top {len(relevant)} Most Relevant Pages:[/bold cyan]") for i, doc in enumerate(relevant, 1): console.print(f"\n[green]{i}. {doc['url']}[/green]") console.print(f" Score: {doc['score']:.2f}") # Show snippet content = doc['content'] or "" snippet = content[:200].replace('\n', ' ') + "..." if len(content) > 200 else content console.print(f" [dim]{snippet}[/dim]") async def test_basic_progressive_crawl(): """Test basic progressive crawling functionality""" console.print("\n[bold yellow]Test 1: Basic Progressive Crawl[/bold yellow]") console.print("Testing on Python documentation with query about async/await") config = AdaptiveConfig( confidence_threshold=0.7, max_pages=10, top_k_links=2, min_gain_threshold=0.1 ) # Create crawler async with AsyncWebCrawler() as crawler: prog_crawler = AdaptiveCrawler( crawler=crawler, config=config ) # Start progressive crawl start_time = time.time() state = await prog_crawler.digest( start_url="https://docs.python.org/3/library/asyncio.html", query="async await context managers" ) elapsed = time.time() - start_time # Print results prog_crawler.print_stats(detailed=False) prog_crawler.print_stats(detailed=True) print_relevant_content(prog_crawler) console.print(f"\n[green]Crawl completed in {elapsed:.2f} seconds[/green]") console.print(f"Final confidence: {prog_crawler.confidence:.2%}") console.print(f"URLs crawled: {list(state.crawled_urls)[:5]}...") # Show first 5 # Test export functionality export_path = "knowledge_base_export.jsonl" prog_crawler.export_knowledge_base(export_path) console.print(f"[green]Knowledge base exported to {export_path}[/green]") # Clean up Path(export_path).unlink(missing_ok=True) async def test_with_persistence(): """Test state persistence and resumption""" console.print("\n[bold yellow]Test 2: Persistence and Resumption[/bold yellow]") console.print("Testing state save/load functionality") state_path = "test_crawl_state.json" config = AdaptiveConfig( confidence_threshold=0.6, max_pages=5, top_k_links=2, save_state=True, state_path=state_path ) # First crawl - partial async with AsyncWebCrawler() as crawler: prog_crawler = AdaptiveCrawler( crawler=crawler, config=config ) state1 = await prog_crawler.digest( start_url="https://httpbin.org", query="http headers response" ) console.print(f"[cyan]First crawl: {len(state1.crawled_urls)} pages[/cyan]") # Resume crawl config.max_pages = 10 # Increase limit async with AsyncWebCrawler() as crawler: prog_crawler = AdaptiveCrawler( crawler=crawler, config=config ) state2 = await prog_crawler.digest( start_url="https://httpbin.org", query="http headers response", resume_from=state_path ) console.print(f"[green]Resumed crawl: {len(state2.crawled_urls)} total pages[/green]") # Clean up Path(state_path).unlink(missing_ok=True) async def test_different_domains(): """Test on different types of websites""" console.print("\n[bold yellow]Test 3: Different Domain Types[/bold yellow]") test_cases = [ { "name": "Documentation Site", "url": "https://docs.python.org/3/", "query": "decorators and context managers" }, { "name": "API Documentation", "url": "https://httpbin.org", "query": "http authentication headers" } ] for test in test_cases: console.print(f"\n[cyan]Testing: {test['name']}[/cyan]") console.print(f"URL: {test['url']}") console.print(f"Query: {test['query']}") config = AdaptiveConfig( confidence_threshold=0.6, max_pages=5, top_k_links=2 ) async with AsyncWebCrawler() as crawler: prog_crawler = AdaptiveCrawler( crawler=crawler, config=config ) start_time = time.time() state = await prog_crawler.digest( start_url=test['url'], query=test['query'] ) elapsed = time.time() - start_time # Summary using print_stats prog_crawler.print_stats(detailed=False) async def test_stopping_criteria(): """Test different stopping criteria""" console.print("\n[bold yellow]Test 4: Stopping Criteria[/bold yellow]") # Test 1: High confidence threshold console.print("\n[cyan]4.1 High confidence threshold (0.9)[/cyan]") config = AdaptiveConfig( confidence_threshold=0.9, # Very high max_pages=20, top_k_links=3 ) async with AsyncWebCrawler() as crawler: prog_crawler = AdaptiveCrawler(crawler=crawler, config=config) state = await prog_crawler.digest( start_url="https://docs.python.org/3/library/", query="python standard library" ) console.print(f"Pages needed for 90% confidence: {len(state.crawled_urls)}") prog_crawler.print_stats(detailed=False) # Test 2: Page limit console.print("\n[cyan]4.2 Page limit (3 pages max)[/cyan]") config = AdaptiveConfig( confidence_threshold=0.9, max_pages=3, # Very low limit top_k_links=2 ) async with AsyncWebCrawler() as crawler: prog_crawler = AdaptiveCrawler(crawler=crawler, config=config) state = await prog_crawler.digest( start_url="https://docs.python.org/3/library/", query="python standard library modules" ) console.print(f"Stopped by: {'Page limit' if len(state.crawled_urls) >= 3 else 'Other'}") prog_crawler.print_stats(detailed=False) async def test_crawl_patterns(): """Analyze crawl patterns and link selection""" console.print("\n[bold yellow]Test 5: Crawl Pattern Analysis[/bold yellow]") config = AdaptiveConfig( confidence_threshold=0.7, max_pages=8, top_k_links=2, min_gain_threshold=0.05 ) async with AsyncWebCrawler() as crawler: prog_crawler = AdaptiveCrawler(crawler=crawler, config=config) # Track crawl progress console.print("\n[cyan]Crawl Progress:[/cyan]") state = await prog_crawler.digest( start_url="https://httpbin.org", query="http methods post get" ) # Show crawl order console.print("\n[green]Crawl Order:[/green]") for i, url in enumerate(state.crawl_order, 1): console.print(f"{i}. {url}") # Show new terms discovered per page console.print("\n[green]New Terms Discovered:[/green]") for i, new_terms in enumerate(state.new_terms_history, 1): console.print(f"Page {i}: {new_terms} new terms") # Final metrics console.print(f"\n[yellow]Saturation reached: {state.metrics.get('saturation', 0):.2%}[/yellow]") async def main(): """Run all tests""" console.print("[bold magenta]Adaptive Crawler Test Suite[/bold magenta]") console.print("=" * 50) try: # Run tests await test_basic_progressive_crawl() # await test_with_persistence() # await test_different_domains() # await test_stopping_criteria() # await test_crawl_patterns() console.print("\n[bold green]✅ All tests completed successfully![/bold green]") except Exception as e: console.print(f"\n[bold red]❌ Test failed with error: {e}[/bold red]") import traceback traceback.print_exc() if __name__ == "__main__": # Run the test suite asyncio.run(main())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/adaptive/compare_performance.py
tests/adaptive/compare_performance.py
""" Compare performance before and after optimizations """ def read_baseline(): """Read baseline performance metrics""" with open('performance_baseline.txt', 'r') as f: content = f.read() # Extract key metrics metrics = {} lines = content.split('\n') for i, line in enumerate(lines): if 'Total Time:' in line: metrics['total_time'] = float(line.split(':')[1].strip().split()[0]) elif 'Memory Used:' in line: metrics['memory_mb'] = float(line.split(':')[1].strip().split()[0]) elif 'validate_coverage:' in line and i+1 < len(lines) and 'Avg Time:' in lines[i+2]: metrics['validate_coverage_ms'] = float(lines[i+2].split(':')[1].strip().split()[0]) elif 'select_links:' in line and i+1 < len(lines) and 'Avg Time:' in lines[i+2]: metrics['select_links_ms'] = float(lines[i+2].split(':')[1].strip().split()[0]) elif 'calculate_confidence:' in line and i+1 < len(lines) and 'Avg Time:' in lines[i+2]: metrics['calculate_confidence_ms'] = float(lines[i+2].split(':')[1].strip().split()[0]) return metrics def print_comparison(before_metrics, after_metrics): """Print performance comparison""" print("\n" + "="*80) print("PERFORMANCE COMPARISON: BEFORE vs AFTER OPTIMIZATIONS") print("="*80) # Total time time_improvement = (before_metrics['total_time'] - after_metrics['total_time']) / before_metrics['total_time'] * 100 print(f"\n📊 Total Time:") print(f" Before: {before_metrics['total_time']:.2f} seconds") print(f" After: {after_metrics['total_time']:.2f} seconds") print(f" Improvement: {time_improvement:.1f}% faster ✅" if time_improvement > 0 else f" Slower: {-time_improvement:.1f}% ❌") # Memory mem_improvement = (before_metrics['memory_mb'] - after_metrics['memory_mb']) / before_metrics['memory_mb'] * 100 print(f"\n💾 Memory Usage:") print(f" Before: {before_metrics['memory_mb']:.2f} MB") print(f" After: {after_metrics['memory_mb']:.2f} MB") print(f" Improvement: {mem_improvement:.1f}% less memory ✅" if mem_improvement > 0 else f" More memory: {-mem_improvement:.1f}% ❌") # Key operations print(f"\n⚡ Key Operations:") # Validate coverage if 'validate_coverage_ms' in before_metrics and 'validate_coverage_ms' in after_metrics: val_improvement = (before_metrics['validate_coverage_ms'] - after_metrics['validate_coverage_ms']) / before_metrics['validate_coverage_ms'] * 100 print(f"\n validate_coverage:") print(f" Before: {before_metrics['validate_coverage_ms']:.1f} ms") print(f" After: {after_metrics['validate_coverage_ms']:.1f} ms") print(f" Improvement: {val_improvement:.1f}% faster ✅" if val_improvement > 0 else f" Slower: {-val_improvement:.1f}% ❌") # Select links if 'select_links_ms' in before_metrics and 'select_links_ms' in after_metrics: sel_improvement = (before_metrics['select_links_ms'] - after_metrics['select_links_ms']) / before_metrics['select_links_ms'] * 100 print(f"\n select_links:") print(f" Before: {before_metrics['select_links_ms']:.1f} ms") print(f" After: {after_metrics['select_links_ms']:.1f} ms") print(f" Improvement: {sel_improvement:.1f}% faster ✅" if sel_improvement > 0 else f" Slower: {-sel_improvement:.1f}% ❌") # Calculate confidence if 'calculate_confidence_ms' in before_metrics and 'calculate_confidence_ms' in after_metrics: calc_improvement = (before_metrics['calculate_confidence_ms'] - after_metrics['calculate_confidence_ms']) / before_metrics['calculate_confidence_ms'] * 100 print(f"\n calculate_confidence:") print(f" Before: {before_metrics['calculate_confidence_ms']:.1f} ms") print(f" After: {after_metrics['calculate_confidence_ms']:.1f} ms") print(f" Improvement: {calc_improvement:.1f}% faster ✅" if calc_improvement > 0 else f" Slower: {-calc_improvement:.1f}% ❌") print("\n" + "="*80) # Overall assessment if time_improvement > 50: print("🎉 EXCELLENT OPTIMIZATION! More than 50% performance improvement!") elif time_improvement > 30: print("✅ GOOD OPTIMIZATION! Significant performance improvement!") elif time_improvement > 10: print("👍 DECENT OPTIMIZATION! Noticeable performance improvement!") else: print("🤔 MINIMAL IMPROVEMENT. Further optimization may be needed.") print("="*80) if __name__ == "__main__": # Example usage - you'll run this after implementing optimizations baseline = read_baseline() print("Baseline metrics loaded:") for k, v in baseline.items(): print(f" {k}: {v}") print("\n⚠️ Run the performance test again after optimizations to compare!") print("Then update this script with the new metrics to see the comparison.")
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/adaptive/test_confidence_debug.py
tests/adaptive/test_confidence_debug.py
""" Test script for debugging confidence calculation in adaptive crawler Focus: Testing why confidence decreases when crawling relevant URLs """ import asyncio import sys from pathlib import Path from typing import List, Dict import math # Add parent directory to path for imports sys.path.append(str(Path(__file__).parent.parent)) from crawl4ai import AsyncWebCrawler from crawl4ai.adaptive_crawler import CrawlState, StatisticalStrategy from crawl4ai.models import CrawlResult class ConfidenceTestHarness: """Test harness for analyzing confidence calculation""" def __init__(self): self.strategy = StatisticalStrategy() self.test_urls = [ 'https://docs.python.org/3/library/asyncio.html', 'https://docs.python.org/3/library/asyncio-runner.html', 'https://docs.python.org/3/library/asyncio-api-index.html', 'https://docs.python.org/3/library/contextvars.html', 'https://docs.python.org/3/library/asyncio-stream.html' ] self.query = "async await context manager" async def test_confidence_progression(self): """Test confidence calculation as we crawl each URL""" print(f"Testing confidence for query: '{self.query}'") print("=" * 80) # Initialize state state = CrawlState(query=self.query) # Create crawler async with AsyncWebCrawler() as crawler: for i, url in enumerate(self.test_urls, 1): print(f"\n{i}. Crawling: {url}") print("-" * 80) # Crawl the URL result = await crawler.arun(url=url) # Extract markdown content if hasattr(result, '_results') and result._results: result = result._results[0] # Create a mock CrawlResult with markdown mock_result = type('CrawlResult', (), { 'markdown': type('Markdown', (), { 'raw_markdown': result.markdown.raw_markdown if hasattr(result, 'markdown') else '' })(), 'url': url })() # Update state state.knowledge_base.append(mock_result) await self.strategy.update_state(state, [mock_result]) # Calculate metrics confidence = await self.strategy.calculate_confidence(state) # Get individual components coverage = state.metrics.get('coverage', 0) consistency = state.metrics.get('consistency', 0) saturation = state.metrics.get('saturation', 0) # Analyze term frequencies query_terms = self.strategy._tokenize(self.query.lower()) term_stats = {} for term in query_terms: term_stats[term] = { 'tf': state.term_frequencies.get(term, 0), 'df': state.document_frequencies.get(term, 0) } # Print detailed results print(f"State after crawl {i}:") print(f" Total documents: {state.total_documents}") print(f" Unique terms: {len(state.term_frequencies)}") print(f" New terms added: {state.new_terms_history[-1] if state.new_terms_history else 0}") print(f"\nQuery term statistics:") for term, stats in term_stats.items(): print(f" '{term}': tf={stats['tf']}, df={stats['df']}") print(f"\nMetrics:") print(f" Coverage: {coverage:.3f}") print(f" Consistency: {consistency:.3f}") print(f" Saturation: {saturation:.3f}") print(f" → Confidence: {confidence:.3f}") # Show coverage calculation details print(f"\nCoverage calculation details:") self._debug_coverage_calculation(state, query_terms) # Alert if confidence decreased if i > 1 and confidence < state.metrics.get('prev_confidence', 0): print(f"\n⚠️ WARNING: Confidence decreased from {state.metrics.get('prev_confidence', 0):.3f} to {confidence:.3f}") state.metrics['prev_confidence'] = confidence def _debug_coverage_calculation(self, state: CrawlState, query_terms: List[str]): """Debug coverage calculation step by step""" coverage_score = 0.0 max_possible_score = 0.0 for term in query_terms: tf = state.term_frequencies.get(term, 0) df = state.document_frequencies.get(term, 0) if df > 0: idf = math.log((state.total_documents - df + 0.5) / (df + 0.5) + 1) doc_coverage = df / state.total_documents tf_boost = min(tf / df, 3.0) term_score = doc_coverage * idf * (1 + 0.1 * math.log1p(tf_boost)) print(f" '{term}': doc_cov={doc_coverage:.2f}, idf={idf:.2f}, boost={1 + 0.1 * math.log1p(tf_boost):.2f} → score={term_score:.3f}") coverage_score += term_score else: print(f" '{term}': not found → score=0.000") max_possible_score += 1.0 * 1.0 * 1.1 print(f" Total: {coverage_score:.3f} / {max_possible_score:.3f} = {coverage_score/max_possible_score if max_possible_score > 0 else 0:.3f}") # New coverage calculation print(f"\n NEW Coverage calculation (without IDF):") new_coverage = self._calculate_coverage_new(state, query_terms) print(f" → New Coverage: {new_coverage:.3f}") def _calculate_coverage_new(self, state: CrawlState, query_terms: List[str]) -> float: """New coverage calculation without IDF""" if not query_terms or state.total_documents == 0: return 0.0 term_scores = [] max_tf = max(state.term_frequencies.values()) if state.term_frequencies else 1 for term in query_terms: tf = state.term_frequencies.get(term, 0) df = state.document_frequencies.get(term, 0) if df > 0: # Document coverage: what fraction of docs contain this term doc_coverage = df / state.total_documents # Frequency signal: normalized log frequency freq_signal = math.log(1 + tf) / math.log(1 + max_tf) if max_tf > 0 else 0 # Combined score: document coverage with frequency boost term_score = doc_coverage * (1 + 0.5 * freq_signal) print(f" '{term}': doc_cov={doc_coverage:.2f}, freq_signal={freq_signal:.2f} → score={term_score:.3f}") term_scores.append(term_score) else: print(f" '{term}': not found → score=0.000") term_scores.append(0.0) # Average across all query terms coverage = sum(term_scores) / len(term_scores) return coverage async def main(): """Run the confidence test""" tester = ConfidenceTestHarness() await tester.test_confidence_progression() print("\n" + "=" * 80) print("Test complete!") if __name__ == "__main__": asyncio.run(main())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/adaptive/test_llm_embedding.py
tests/adaptive/test_llm_embedding.py
import asyncio import os from crawl4ai import AsyncWebCrawler, AdaptiveCrawler, AdaptiveConfig, LLMConfig async def test_configuration(name: str, config: AdaptiveConfig, url: str, query: str): """Test a specific configuration""" print(f"\n{'='*60}") print(f"Configuration: {name}") print(f"{'='*60}") async with AsyncWebCrawler(verbose=False) as crawler: adaptive = AdaptiveCrawler(crawler, config) result = await adaptive.digest(start_url=url, query=query) print("\n" + "="*50) print("CRAWL STATISTICS") print("="*50) adaptive.print_stats(detailed=False) # Get the most relevant content found print("\n" + "="*50) print("MOST RELEVANT PAGES") print("="*50) relevant_pages = adaptive.get_relevant_content(top_k=5) for i, page in enumerate(relevant_pages, 1): print(f"\n{i}. {page['url']}") print(f" Relevance Score: {page['score']:.2%}") # Show a snippet of the content content = page['content'] or "" if content: snippet = content[:200].replace('\n', ' ') if len(content) > 200: snippet += "..." print(f" Preview: {snippet}") print(f"\n{'='*50}") print(f"Pages crawled: {len(result.crawled_urls)}") print(f"Final confidence: {adaptive.confidence:.1%}") print(f"Stopped reason: {result.metrics.get('stopped_reason', 'max_pages')}") if result.metrics.get('is_irrelevant', False): print("⚠️ Query detected as irrelevant!") return result async def llm_embedding(): """Demonstrate various embedding configurations""" print("EMBEDDING STRATEGY CONFIGURATION EXAMPLES") print("=" * 60) # Base URL and query for testing test_url = "https://docs.python.org/3/library/asyncio.html" openai_llm_config = LLMConfig( provider='openai/text-embedding-3-small', api_token=os.getenv('OPENAI_API_KEY'), temperature=0.7, max_tokens=2000 ) config_openai = AdaptiveConfig( strategy="embedding", max_pages=10, # Use OpenAI embeddings embedding_llm_config=openai_llm_config, # embedding_llm_config={ # 'provider': 'openai/text-embedding-3-small', # 'api_token': os.getenv('OPENAI_API_KEY') # }, # OpenAI embeddings are high quality, can be stricter embedding_k_exp=4.0, n_query_variations=12 ) await test_configuration( "OpenAI Embeddings", config_openai, test_url, # "event-driven architecture patterns" "async await context managers coroutines" ) return async def basic_adaptive_crawling(): """Basic adaptive crawling example""" # Initialize the crawler async with AsyncWebCrawler(verbose=True) as crawler: # Create an adaptive crawler with default settings (statistical strategy) adaptive = AdaptiveCrawler(crawler) # Note: You can also use embedding strategy for semantic understanding: # from crawl4ai import AdaptiveConfig # config = AdaptiveConfig(strategy="embedding") # adaptive = AdaptiveCrawler(crawler, config) # Start adaptive crawling print("Starting adaptive crawl for Python async programming information...") result = await adaptive.digest( start_url="https://docs.python.org/3/library/asyncio.html", query="async await context managers coroutines" ) # Display crawl statistics print("\n" + "="*50) print("CRAWL STATISTICS") print("="*50) adaptive.print_stats(detailed=False) # Get the most relevant content found print("\n" + "="*50) print("MOST RELEVANT PAGES") print("="*50) relevant_pages = adaptive.get_relevant_content(top_k=5) for i, page in enumerate(relevant_pages, 1): print(f"\n{i}. {page['url']}") print(f" Relevance Score: {page['score']:.2%}") # Show a snippet of the content content = page['content'] or "" if content: snippet = content[:200].replace('\n', ' ') if len(content) > 200: snippet += "..." print(f" Preview: {snippet}") # Show final confidence print(f"\n{'='*50}") print(f"Final Confidence: {adaptive.confidence:.2%}") print(f"Total Pages Crawled: {len(result.crawled_urls)}") print(f"Knowledge Base Size: {len(adaptive.state.knowledge_base)} documents") if adaptive.confidence >= 0.8: print("✓ High confidence - can answer detailed questions about async Python") elif adaptive.confidence >= 0.6: print("~ Moderate confidence - can answer basic questions") else: print("✗ Low confidence - need more information") if __name__ == "__main__": asyncio.run(llm_embedding()) # asyncio.run(basic_adaptive_crawling())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/adaptive/test_embedding_performance.py
tests/adaptive/test_embedding_performance.py
""" Performance test for Embedding Strategy optimizations Measures time and memory usage before and after optimizations """ import asyncio import time import tracemalloc import numpy as np from pathlib import Path import sys import os # Add parent directory to path for imports sys.path.append(str(Path(__file__).parent.parent.parent)) from crawl4ai import AsyncWebCrawler, AdaptiveCrawler, AdaptiveConfig from crawl4ai.adaptive_crawler import EmbeddingStrategy, CrawlState from crawl4ai.models import CrawlResult class PerformanceMetrics: def __init__(self): self.start_time = 0 self.end_time = 0 self.start_memory = 0 self.peak_memory = 0 self.operation_times = {} def start(self): tracemalloc.start() self.start_time = time.perf_counter() self.start_memory = tracemalloc.get_traced_memory()[0] def end(self): self.end_time = time.perf_counter() current, peak = tracemalloc.get_traced_memory() self.peak_memory = peak tracemalloc.stop() def record_operation(self, name: str, duration: float): if name not in self.operation_times: self.operation_times[name] = [] self.operation_times[name].append(duration) @property def total_time(self): return self.end_time - self.start_time @property def memory_used_mb(self): return (self.peak_memory - self.start_memory) / 1024 / 1024 def print_summary(self, label: str): print(f"\n{'='*60}") print(f"Performance Summary: {label}") print(f"{'='*60}") print(f"Total Time: {self.total_time:.3f} seconds") print(f"Memory Used: {self.memory_used_mb:.2f} MB") if self.operation_times: print("\nOperation Breakdown:") for op, times in self.operation_times.items(): avg_time = sum(times) / len(times) total_time = sum(times) print(f" {op}:") print(f" - Calls: {len(times)}") print(f" - Avg Time: {avg_time*1000:.2f} ms") print(f" - Total Time: {total_time:.3f} s") async def create_mock_crawl_results(n: int) -> list: """Create mock crawl results for testing""" results = [] for i in range(n): class MockMarkdown: def __init__(self, content): self.raw_markdown = content class MockResult: def __init__(self, url, content): self.url = url self.markdown = MockMarkdown(content) self.success = True content = f"This is test content {i} about async await coroutines event loops. " * 50 result = MockResult(f"https://example.com/page{i}", content) results.append(result) return results async def test_embedding_performance(): """Test the performance of embedding strategy operations""" # Configuration n_kb_docs = 30 # Number of documents in knowledge base n_queries = 10 # Number of query variations n_links = 50 # Number of candidate links n_iterations = 5 # Number of calculation iterations print(f"\nTest Configuration:") print(f"- Knowledge Base Documents: {n_kb_docs}") print(f"- Query Variations: {n_queries}") print(f"- Candidate Links: {n_links}") print(f"- Iterations: {n_iterations}") # Create embedding strategy config = AdaptiveConfig( strategy="embedding", max_pages=50, n_query_variations=n_queries, embedding_model="sentence-transformers/all-MiniLM-L6-v2" # 384 dimensions ) # Set up API key if available if os.getenv('OPENAI_API_KEY'): config.embedding_llm_config = { 'provider': 'openai/text-embedding-3-small', 'api_token': os.getenv('OPENAI_API_KEY'), 'embedding_model': 'text-embedding-3-small' } else: config.embedding_llm_config = { 'provider': 'openai/gpt-4o-mini', 'api_token': 'dummy-key' } strategy = EmbeddingStrategy( embedding_model=config.embedding_model, llm_config=config.embedding_llm_config ) strategy.config = config # Initialize state state = CrawlState() state.query = "async await coroutines event loops tasks" # Start performance monitoring metrics = PerformanceMetrics() metrics.start() # 1. Generate query embeddings print("\n1. Generating query embeddings...") start = time.perf_counter() query_embeddings, expanded_queries = await strategy.map_query_semantic_space( state.query, config.n_query_variations ) state.query_embeddings = query_embeddings state.expanded_queries = expanded_queries metrics.record_operation("query_embedding", time.perf_counter() - start) print(f" Generated {len(query_embeddings)} query embeddings") # 2. Build knowledge base incrementally print("\n2. Building knowledge base...") mock_results = await create_mock_crawl_results(n_kb_docs) for i in range(0, n_kb_docs, 5): # Add 5 documents at a time batch = mock_results[i:i+5] start = time.perf_counter() await strategy.update_state(state, batch) metrics.record_operation("update_state", time.perf_counter() - start) state.knowledge_base.extend(batch) print(f" Knowledge base has {len(state.kb_embeddings)} documents") # 3. Test repeated confidence calculations print(f"\n3. Testing {n_iterations} confidence calculations...") for i in range(n_iterations): start = time.perf_counter() confidence = await strategy.calculate_confidence(state) metrics.record_operation("calculate_confidence", time.perf_counter() - start) print(f" Iteration {i+1}: {confidence:.3f} ({(time.perf_counter() - start)*1000:.1f} ms)") # 4. Test coverage gap calculations print(f"\n4. Testing coverage gap calculations...") for i in range(n_iterations): start = time.perf_counter() gaps = strategy.find_coverage_gaps(state.kb_embeddings, state.query_embeddings) metrics.record_operation("find_coverage_gaps", time.perf_counter() - start) print(f" Iteration {i+1}: {len(gaps)} gaps ({(time.perf_counter() - start)*1000:.1f} ms)") # 5. Test validation print(f"\n5. Testing validation coverage...") for i in range(n_iterations): start = time.perf_counter() val_score = await strategy.validate_coverage(state) metrics.record_operation("validate_coverage", time.perf_counter() - start) print(f" Iteration {i+1}: {val_score:.3f} ({(time.perf_counter() - start)*1000:.1f} ms)") # 6. Create mock links for ranking from crawl4ai.models import Link mock_links = [] for i in range(n_links): link = Link( href=f"https://example.com/new{i}", text=f"Link about async programming {i}", title=f"Async Guide {i}" ) mock_links.append(link) # 7. Test link selection print(f"\n6. Testing link selection with {n_links} candidates...") start = time.perf_counter() scored_links = await strategy.select_links_for_expansion( mock_links, gaps, state.kb_embeddings ) metrics.record_operation("select_links", time.perf_counter() - start) print(f" Scored {len(scored_links)} links in {(time.perf_counter() - start)*1000:.1f} ms") # End monitoring metrics.end() return metrics async def main(): """Run performance tests before and after optimizations""" print("="*80) print("EMBEDDING STRATEGY PERFORMANCE TEST") print("="*80) # Test current implementation print("\n📊 Testing CURRENT Implementation...") metrics_before = await test_embedding_performance() metrics_before.print_summary("BEFORE Optimizations") # Store key metrics for comparison total_time_before = metrics_before.total_time memory_before = metrics_before.memory_used_mb # Calculate specific operation costs calc_conf_avg = sum(metrics_before.operation_times.get("calculate_confidence", [])) / len(metrics_before.operation_times.get("calculate_confidence", [1])) find_gaps_avg = sum(metrics_before.operation_times.get("find_coverage_gaps", [])) / len(metrics_before.operation_times.get("find_coverage_gaps", [1])) validate_avg = sum(metrics_before.operation_times.get("validate_coverage", [])) / len(metrics_before.operation_times.get("validate_coverage", [1])) print(f"\n🔍 Key Bottlenecks Identified:") print(f" - calculate_confidence: {calc_conf_avg*1000:.1f} ms per call") print(f" - find_coverage_gaps: {find_gaps_avg*1000:.1f} ms per call") print(f" - validate_coverage: {validate_avg*1000:.1f} ms per call") print("\n" + "="*80) print("EXPECTED IMPROVEMENTS AFTER OPTIMIZATION:") print("- Distance calculations: 80-90% faster (vectorization)") print("- Memory usage: 20-30% reduction (deduplication)") print("- Overall performance: 60-70% improvement") print("="*80) if __name__ == "__main__": asyncio.run(main())
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/memory/benchmark_report.py
tests/memory/benchmark_report.py
#!/usr/bin/env python3 """ Benchmark reporting tool for Crawl4AI stress tests. Generates visual reports and comparisons between test runs. """ import os import json import glob import argparse import sys from datetime import datetime from pathlib import Path from rich.console import Console from rich.table import Table from rich.panel import Panel # Initialize rich console console = Console() # Try to import optional visualization dependencies VISUALIZATION_AVAILABLE = True try: import pandas as pd import matplotlib.pyplot as plt import matplotlib as mpl import numpy as np import seaborn as sns except ImportError: VISUALIZATION_AVAILABLE = False console.print("[yellow]Warning: Visualization dependencies not found. Install with:[/yellow]") console.print("[yellow]pip install pandas matplotlib seaborn[/yellow]") console.print("[yellow]Only text-based reports will be generated.[/yellow]") # Configure plotting if available if VISUALIZATION_AVAILABLE: # Set plot style for dark theme plt.style.use('dark_background') sns.set_theme(style="darkgrid") # Custom color palette based on Nord theme nord_palette = ["#88c0d0", "#81a1c1", "#a3be8c", "#ebcb8b", "#bf616a", "#b48ead", "#5e81ac"] sns.set_palette(nord_palette) class BenchmarkReporter: """Generates visual reports and comparisons for Crawl4AI stress tests.""" def __init__(self, reports_dir="reports", output_dir="benchmark_reports"): """Initialize the benchmark reporter. Args: reports_dir: Directory containing test result files output_dir: Directory to save generated reports """ self.reports_dir = Path(reports_dir) self.output_dir = Path(output_dir) self.output_dir.mkdir(parents=True, exist_ok=True) # Configure matplotlib if available if VISUALIZATION_AVAILABLE: # Ensure the matplotlib backend works in headless environments mpl.use('Agg') # Set up styling for plots with dark theme mpl.rcParams['figure.figsize'] = (12, 8) mpl.rcParams['font.size'] = 12 mpl.rcParams['axes.labelsize'] = 14 mpl.rcParams['axes.titlesize'] = 16 mpl.rcParams['xtick.labelsize'] = 12 mpl.rcParams['ytick.labelsize'] = 12 mpl.rcParams['legend.fontsize'] = 12 mpl.rcParams['figure.facecolor'] = '#1e1e1e' mpl.rcParams['axes.facecolor'] = '#2e3440' mpl.rcParams['savefig.facecolor'] = '#1e1e1e' mpl.rcParams['text.color'] = '#e0e0e0' mpl.rcParams['axes.labelcolor'] = '#e0e0e0' mpl.rcParams['xtick.color'] = '#e0e0e0' mpl.rcParams['ytick.color'] = '#e0e0e0' mpl.rcParams['grid.color'] = '#444444' mpl.rcParams['figure.edgecolor'] = '#444444' def load_test_results(self, limit=None): """Load all test results from the reports directory. Args: limit: Optional limit on number of most recent tests to load Returns: Dictionary mapping test IDs to result data """ result_files = glob.glob(str(self.reports_dir / "test_results_*.json")) # Sort files by modification time (newest first) result_files.sort(key=os.path.getmtime, reverse=True) if limit: result_files = result_files[:limit] results = {} for file_path in result_files: try: with open(file_path, 'r') as f: data = json.load(f) test_id = data.get('test_id') if test_id: results[test_id] = data # Try to load the corresponding memory samples csv_path = self.reports_dir / f"memory_samples_{test_id}.csv" if csv_path.exists(): try: memory_df = pd.read_csv(csv_path) results[test_id]['memory_samples'] = memory_df except Exception as e: console.print(f"[yellow]Warning: Could not load memory samples for {test_id}: {e}[/yellow]") except Exception as e: console.print(f"[red]Error loading {file_path}: {e}[/red]") console.print(f"Loaded {len(results)} test results") return results def generate_summary_table(self, results): """Generate a summary table of test results. Args: results: Dictionary mapping test IDs to result data Returns: Rich Table object """ table = Table(title="Crawl4AI Stress Test Summary", show_header=True) # Define columns table.add_column("Test ID", style="cyan") table.add_column("Date", style="bright_green") table.add_column("URLs", justify="right") table.add_column("Workers", justify="right") table.add_column("Success %", justify="right") table.add_column("Time (s)", justify="right") table.add_column("Mem Growth", justify="right") table.add_column("URLs/sec", justify="right") # Add rows for test_id, data in sorted(results.items(), key=lambda x: x[0], reverse=True): # Parse timestamp from test_id try: date_str = datetime.strptime(test_id, "%Y%m%d_%H%M%S").strftime("%Y-%m-%d %H:%M") except: date_str = "Unknown" # Calculate success percentage total_urls = data.get('url_count', 0) successful = data.get('successful_urls', 0) success_pct = (successful / total_urls * 100) if total_urls > 0 else 0 # Calculate memory growth if available mem_growth = "N/A" if 'memory_samples' in data: samples = data['memory_samples'] if len(samples) >= 2: # Try to extract numeric values from memory_info strings try: first_mem = float(samples.iloc[0]['memory_info'].split()[0]) last_mem = float(samples.iloc[-1]['memory_info'].split()[0]) mem_growth = f"{last_mem - first_mem:.1f} MB" except: pass # Calculate URLs per second time_taken = data.get('total_time_seconds', 0) urls_per_sec = total_urls / time_taken if time_taken > 0 else 0 table.add_row( test_id, date_str, str(total_urls), str(data.get('workers', 'N/A')), f"{success_pct:.1f}%", f"{data.get('total_time_seconds', 0):.2f}", mem_growth, f"{urls_per_sec:.1f}" ) return table def generate_performance_chart(self, results, output_file=None): """Generate a performance comparison chart. Args: results: Dictionary mapping test IDs to result data output_file: File path to save the chart Returns: Path to the saved chart file or None if visualization is not available """ if not VISUALIZATION_AVAILABLE: console.print("[yellow]Skipping performance chart - visualization dependencies not available[/yellow]") return None # Extract relevant data data = [] for test_id, result in results.items(): urls = result.get('url_count', 0) workers = result.get('workers', 0) time_taken = result.get('total_time_seconds', 0) urls_per_sec = urls / time_taken if time_taken > 0 else 0 # Parse timestamp from test_id for sorting try: timestamp = datetime.strptime(test_id, "%Y%m%d_%H%M%S") data.append({ 'test_id': test_id, 'timestamp': timestamp, 'urls': urls, 'workers': workers, 'time_seconds': time_taken, 'urls_per_sec': urls_per_sec }) except: console.print(f"[yellow]Warning: Could not parse timestamp from {test_id}[/yellow]") if not data: console.print("[yellow]No valid data for performance chart[/yellow]") return None # Convert to DataFrame and sort by timestamp df = pd.DataFrame(data) df = df.sort_values('timestamp') # Create the plot fig, ax1 = plt.subplots(figsize=(12, 6)) # Plot URLs per second as bars with properly set x-axis x_pos = range(len(df['test_id'])) bars = ax1.bar(x_pos, df['urls_per_sec'], color='#88c0d0', alpha=0.8) ax1.set_ylabel('URLs per Second', color='#88c0d0') ax1.tick_params(axis='y', labelcolor='#88c0d0') # Properly set x-axis labels ax1.set_xticks(x_pos) ax1.set_xticklabels(df['test_id'].tolist(), rotation=45, ha='right') # Add worker count as text on each bar for i, bar in enumerate(bars): height = bar.get_height() workers = df.iloc[i]['workers'] ax1.text(i, height + 0.1, f'W: {workers}', ha='center', va='bottom', fontsize=9, color='#e0e0e0') # Add a second y-axis for total URLs ax2 = ax1.twinx() ax2.plot(x_pos, df['urls'], '-', color='#bf616a', alpha=0.8, markersize=6, marker='o') ax2.set_ylabel('Total URLs', color='#bf616a') ax2.tick_params(axis='y', labelcolor='#bf616a') # Set title and layout plt.title('Crawl4AI Performance Benchmarks') plt.tight_layout() # Save the figure if output_file is None: output_file = self.output_dir / "performance_comparison.png" plt.savefig(output_file, dpi=100, bbox_inches='tight') plt.close() return output_file def generate_memory_charts(self, results, output_prefix=None): """Generate memory usage charts for each test. Args: results: Dictionary mapping test IDs to result data output_prefix: Prefix for output file names Returns: List of paths to the saved chart files """ if not VISUALIZATION_AVAILABLE: console.print("[yellow]Skipping memory charts - visualization dependencies not available[/yellow]") return [] output_files = [] for test_id, result in results.items(): if 'memory_samples' not in result: continue memory_df = result['memory_samples'] # Check if we have enough data points if len(memory_df) < 2: continue # Try to extract numeric values from memory_info strings try: memory_values = [] for mem_str in memory_df['memory_info']: # Extract the number from strings like "142.8 MB" value = float(mem_str.split()[0]) memory_values.append(value) memory_df['memory_mb'] = memory_values except Exception as e: console.print(f"[yellow]Could not parse memory values for {test_id}: {e}[/yellow]") continue # Create the plot plt.figure(figsize=(10, 6)) # Plot memory usage over time plt.plot(memory_df['elapsed_seconds'], memory_df['memory_mb'], color='#88c0d0', marker='o', linewidth=2, markersize=4) # Add annotations for chunk processing chunk_size = result.get('chunk_size', 0) url_count = result.get('url_count', 0) if chunk_size > 0 and url_count > 0: # Estimate chunk processing times num_chunks = (url_count + chunk_size - 1) // chunk_size # Ceiling division total_time = result.get('total_time_seconds', memory_df['elapsed_seconds'].max()) chunk_times = np.linspace(0, total_time, num_chunks + 1)[1:] for i, time_point in enumerate(chunk_times): if time_point <= memory_df['elapsed_seconds'].max(): plt.axvline(x=time_point, color='#4c566a', linestyle='--', alpha=0.6) plt.text(time_point, memory_df['memory_mb'].min(), f'Chunk {i+1}', rotation=90, verticalalignment='bottom', fontsize=8, color='#e0e0e0') # Set labels and title plt.xlabel('Elapsed Time (seconds)', color='#e0e0e0') plt.ylabel('Memory Usage (MB)', color='#e0e0e0') plt.title(f'Memory Usage During Test {test_id}\n({url_count} URLs, {result.get("workers", "?")} Workers)', color='#e0e0e0') # Add grid and set y-axis to start from zero plt.grid(True, alpha=0.3, color='#4c566a') # Add test metadata as text info_text = ( f"URLs: {url_count}\n" f"Workers: {result.get('workers', 'N/A')}\n" f"Chunk Size: {result.get('chunk_size', 'N/A')}\n" f"Total Time: {result.get('total_time_seconds', 0):.2f}s\n" ) # Calculate memory growth if len(memory_df) >= 2: first_mem = memory_df.iloc[0]['memory_mb'] last_mem = memory_df.iloc[-1]['memory_mb'] growth = last_mem - first_mem growth_rate = growth / result.get('total_time_seconds', 1) info_text += f"Memory Growth: {growth:.1f} MB\n" info_text += f"Growth Rate: {growth_rate:.2f} MB/s" plt.figtext(0.02, 0.02, info_text, fontsize=9, color='#e0e0e0', bbox=dict(facecolor='#3b4252', alpha=0.8, edgecolor='#4c566a')) # Save the figure if output_prefix is None: output_file = self.output_dir / f"memory_chart_{test_id}.png" else: output_file = Path(f"{output_prefix}_memory_{test_id}.png") plt.tight_layout() plt.savefig(output_file, dpi=100, bbox_inches='tight') plt.close() output_files.append(output_file) return output_files def generate_comparison_report(self, results, title=None, output_file=None): """Generate a comprehensive comparison report of multiple test runs. Args: results: Dictionary mapping test IDs to result data title: Optional title for the report output_file: File path to save the report Returns: Path to the saved report file """ if not results: console.print("[yellow]No results to generate comparison report[/yellow]") return None if output_file is None: timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") output_file = self.output_dir / f"comparison_report_{timestamp}.html" # Create data for the report rows = [] for test_id, data in results.items(): # Calculate metrics urls = data.get('url_count', 0) workers = data.get('workers', 0) successful = data.get('successful_urls', 0) failed = data.get('failed_urls', 0) time_seconds = data.get('total_time_seconds', 0) # Calculate additional metrics success_rate = (successful / urls) * 100 if urls > 0 else 0 urls_per_second = urls / time_seconds if time_seconds > 0 else 0 urls_per_worker = urls / workers if workers > 0 else 0 # Calculate memory growth if available mem_start = None mem_end = None mem_growth = None if 'memory_samples' in data: samples = data['memory_samples'] if len(samples) >= 2: try: first_mem = float(samples.iloc[0]['memory_info'].split()[0]) last_mem = float(samples.iloc[-1]['memory_info'].split()[0]) mem_start = first_mem mem_end = last_mem mem_growth = last_mem - first_mem except: pass # Parse timestamp from test_id try: timestamp = datetime.strptime(test_id, "%Y%m%d_%H%M%S") except: timestamp = None rows.append({ 'test_id': test_id, 'timestamp': timestamp, 'date': timestamp.strftime("%Y-%m-%d %H:%M:%S") if timestamp else "Unknown", 'urls': urls, 'workers': workers, 'chunk_size': data.get('chunk_size', 0), 'successful': successful, 'failed': failed, 'success_rate': success_rate, 'time_seconds': time_seconds, 'urls_per_second': urls_per_second, 'urls_per_worker': urls_per_worker, 'memory_start': mem_start, 'memory_end': mem_end, 'memory_growth': mem_growth }) # Sort data by timestamp if possible if VISUALIZATION_AVAILABLE: # Convert to DataFrame and sort by timestamp df = pd.DataFrame(rows) if 'timestamp' in df.columns and not df['timestamp'].isna().all(): df = df.sort_values('timestamp', ascending=False) else: # Simple sorting without pandas rows.sort(key=lambda x: x.get('timestamp', datetime.now()), reverse=True) df = None # Generate HTML report html = [] html.append('<!DOCTYPE html>') html.append('<html lang="en">') html.append('<head>') html.append('<meta charset="UTF-8">') html.append('<meta name="viewport" content="width=device-width, initial-scale=1.0">') html.append(f'<title>{title or "Crawl4AI Benchmark Comparison"}</title>') html.append('<style>') html.append(''' body { font-family: Arial, sans-serif; line-height: 1.6; margin: 0; padding: 20px; max-width: 1200px; margin: 0 auto; color: #e0e0e0; background-color: #1e1e1e; } h1, h2, h3 { color: #81a1c1; } table { border-collapse: collapse; width: 100%; margin-bottom: 20px; } th, td { text-align: left; padding: 12px; border-bottom: 1px solid #444; } th { background-color: #2e3440; font-weight: bold; } tr:hover { background-color: #2e3440; } a { color: #88c0d0; text-decoration: none; } a:hover { text-decoration: underline; } .chart-container { margin: 30px 0; text-align: center; background-color: #2e3440; padding: 20px; border-radius: 8px; } .chart-container img { max-width: 100%; height: auto; border: 1px solid #444; box-shadow: 0 0 10px rgba(0,0,0,0.3); } .card { border: 1px solid #444; border-radius: 8px; padding: 15px; margin-bottom: 20px; background-color: #2e3440; box-shadow: 0 0 10px rgba(0,0,0,0.2); } .highlight { background-color: #3b4252; font-weight: bold; } .status-good { color: #a3be8c; } .status-warning { color: #ebcb8b; } .status-bad { color: #bf616a; } ''') html.append('</style>') html.append('</head>') html.append('<body>') # Header html.append(f'<h1>{title or "Crawl4AI Benchmark Comparison"}</h1>') html.append(f'<p>Report generated on {datetime.now().strftime("%Y-%m-%d %H:%M:%S")}</p>') # Summary section html.append('<div class="card">') html.append('<h2>Summary</h2>') html.append('<p>This report compares the performance of Crawl4AI across multiple test runs.</p>') # Summary metrics data_available = (VISUALIZATION_AVAILABLE and df is not None and not df.empty) or (not VISUALIZATION_AVAILABLE and len(rows) > 0) if data_available: # Get the latest test data if VISUALIZATION_AVAILABLE and df is not None and not df.empty: latest_test = df.iloc[0] latest_id = latest_test['test_id'] else: latest_test = rows[0] # First row (already sorted by timestamp) latest_id = latest_test['test_id'] html.append('<h3>Latest Test Results</h3>') html.append('<ul>') html.append(f'<li><strong>Test ID:</strong> {latest_id}</li>') html.append(f'<li><strong>Date:</strong> {latest_test["date"]}</li>') html.append(f'<li><strong>URLs:</strong> {latest_test["urls"]}</li>') html.append(f'<li><strong>Workers:</strong> {latest_test["workers"]}</li>') html.append(f'<li><strong>Success Rate:</strong> {latest_test["success_rate"]:.1f}%</li>') html.append(f'<li><strong>Time:</strong> {latest_test["time_seconds"]:.2f} seconds</li>') html.append(f'<li><strong>Performance:</strong> {latest_test["urls_per_second"]:.1f} URLs/second</li>') # Check memory growth (handle both pandas and dict mode) memory_growth_available = False if VISUALIZATION_AVAILABLE and df is not None: if pd.notna(latest_test["memory_growth"]): html.append(f'<li><strong>Memory Growth:</strong> {latest_test["memory_growth"]:.1f} MB</li>') memory_growth_available = True else: if latest_test["memory_growth"] is not None: html.append(f'<li><strong>Memory Growth:</strong> {latest_test["memory_growth"]:.1f} MB</li>') memory_growth_available = True html.append('</ul>') # If we have more than one test, show trend if (VISUALIZATION_AVAILABLE and df is not None and len(df) > 1) or (not VISUALIZATION_AVAILABLE and len(rows) > 1): if VISUALIZATION_AVAILABLE and df is not None: prev_test = df.iloc[1] else: prev_test = rows[1] # Calculate performance change perf_change = ((latest_test["urls_per_second"] / prev_test["urls_per_second"]) - 1) * 100 if prev_test["urls_per_second"] > 0 else 0 status_class = "" if perf_change > 5: status_class = "status-good" elif perf_change < -5: status_class = "status-bad" html.append('<h3>Performance Trend</h3>') html.append('<ul>') html.append(f'<li><strong>Performance Change:</strong> <span class="{status_class}">{perf_change:+.1f}%</span> compared to previous test</li>') # Memory trend if available memory_trend_available = False if VISUALIZATION_AVAILABLE and df is not None: if pd.notna(latest_test["memory_growth"]) and pd.notna(prev_test["memory_growth"]): mem_change = latest_test["memory_growth"] - prev_test["memory_growth"] memory_trend_available = True else: if latest_test["memory_growth"] is not None and prev_test["memory_growth"] is not None: mem_change = latest_test["memory_growth"] - prev_test["memory_growth"] memory_trend_available = True if memory_trend_available: mem_status = "" if mem_change < -1: # Improved (less growth) mem_status = "status-good" elif mem_change > 1: # Worse (more growth) mem_status = "status-bad" html.append(f'<li><strong>Memory Trend:</strong> <span class="{mem_status}">{mem_change:+.1f} MB</span> change in memory growth</li>') html.append('</ul>') html.append('</div>') # Generate performance chart if visualization is available if VISUALIZATION_AVAILABLE: perf_chart = self.generate_performance_chart(results) if perf_chart: html.append('<div class="chart-container">') html.append('<h2>Performance Comparison</h2>') html.append(f'<img src="{os.path.relpath(perf_chart, os.path.dirname(output_file))}" alt="Performance Comparison Chart">') html.append('</div>') else: html.append('<div class="chart-container">') html.append('<h2>Performance Comparison</h2>') html.append('<p>Charts not available - install visualization dependencies (pandas, matplotlib, seaborn) to enable.</p>') html.append('</div>') # Generate memory charts if visualization is available if VISUALIZATION_AVAILABLE: memory_charts = self.generate_memory_charts(results) if memory_charts: html.append('<div class="chart-container">') html.append('<h2>Memory Usage</h2>') for chart in memory_charts: test_id = chart.stem.split('_')[-1] html.append(f'<h3>Test {test_id}</h3>') html.append(f'<img src="{os.path.relpath(chart, os.path.dirname(output_file))}" alt="Memory Chart for {test_id}">') html.append('</div>') else: html.append('<div class="chart-container">') html.append('<h2>Memory Usage</h2>') html.append('<p>Charts not available - install visualization dependencies (pandas, matplotlib, seaborn) to enable.</p>') html.append('</div>') # Detailed results table html.append('<h2>Detailed Results</h2>') # Add the results as an HTML table html.append('<table>') # Table headers html.append('<tr>') for col in ['Test ID', 'Date', 'URLs', 'Workers', 'Success %', 'Time (s)', 'URLs/sec', 'Mem Growth (MB)']: html.append(f'<th>{col}</th>') html.append('</tr>') # Table rows - handle both pandas DataFrame and list of dicts if VISUALIZATION_AVAILABLE and df is not None: # Using pandas DataFrame for _, row in df.iterrows(): html.append('<tr>') html.append(f'<td>{row["test_id"]}</td>') html.append(f'<td>{row["date"]}</td>') html.append(f'<td>{row["urls"]}</td>') html.append(f'<td>{row["workers"]}</td>') html.append(f'<td>{row["success_rate"]:.1f}%</td>') html.append(f'<td>{row["time_seconds"]:.2f}</td>') html.append(f'<td>{row["urls_per_second"]:.1f}</td>') # Memory growth cell if pd.notna(row["memory_growth"]): html.append(f'<td>{row["memory_growth"]:.1f}</td>') else: html.append('<td>N/A</td>') html.append('</tr>') else: # Using list of dicts (when pandas is not available) for row in rows: html.append('<tr>') html.append(f'<td>{row["test_id"]}</td>') html.append(f'<td>{row["date"]}</td>') html.append(f'<td>{row["urls"]}</td>') html.append(f'<td>{row["workers"]}</td>') html.append(f'<td>{row["success_rate"]:.1f}%</td>') html.append(f'<td>{row["time_seconds"]:.2f}</td>') html.append(f'<td>{row["urls_per_second"]:.1f}</td>') # Memory growth cell if row["memory_growth"] is not None: html.append(f'<td>{row["memory_growth"]:.1f}</td>') else: html.append('<td>N/A</td>') html.append('</tr>') html.append('</table>') # Conclusion section html.append('<div class="card">') html.append('<h2>Conclusion</h2>') if VISUALIZATION_AVAILABLE and df is not None and not df.empty: # Using pandas for statistics (when available) # Calculate some overall statistics avg_urls_per_sec = df['urls_per_second'].mean() max_urls_per_sec = df['urls_per_second'].max() # Determine if we have a trend if len(df) > 1: trend_data = df.sort_values('timestamp') first_perf = trend_data.iloc[0]['urls_per_second'] last_perf = trend_data.iloc[-1]['urls_per_second'] perf_change = ((last_perf / first_perf) - 1) * 100 if first_perf > 0 else 0 if perf_change > 10: trend_desc = "significantly improved" trend_class = "status-good" elif perf_change > 5: trend_desc = "improved" trend_class = "status-good" elif perf_change < -10: trend_desc = "significantly decreased" trend_class = "status-bad" elif perf_change < -5: trend_desc = "decreased" trend_class = "status-bad" else: trend_desc = "remained stable" trend_class = "" html.append(f'<p>Overall performance has <span class="{trend_class}">{trend_desc}</span> over the test period.</p>') html.append(f'<p>Average throughput: <strong>{avg_urls_per_sec:.1f}</strong> URLs/second</p>') html.append(f'<p>Maximum throughput: <strong>{max_urls_per_sec:.1f}</strong> URLs/second</p>') # Memory leak assessment if 'memory_growth' in df.columns and not df['memory_growth'].isna().all(): avg_growth = df['memory_growth'].mean() max_growth = df['memory_growth'].max() if avg_growth < 5: leak_assessment = "No significant memory leaks detected" leak_class = "status-good" elif avg_growth < 10: leak_assessment = "Minor memory growth observed" leak_class = "status-warning" else: leak_assessment = "Potential memory leak detected" leak_class = "status-bad" html.append(f'<p><span class="{leak_class}">{leak_assessment}</span>. Average memory growth: <strong>{avg_growth:.1f} MB</strong> per test.</p>') else: # Manual calculations without pandas
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
true
unclecode/crawl4ai
https://github.com/unclecode/crawl4ai/blob/c85f56b085a1d5b62779774d48887345e566869b/tests/memory/test_crawler_monitor.py
tests/memory/test_crawler_monitor.py
""" Test script for the CrawlerMonitor component. This script simulates a crawler with multiple tasks to demonstrate the real-time monitoring capabilities. """ import time import uuid import random import threading import sys import os # Add the parent directory to the path to import crawl4ai sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../.."))) from crawl4ai.components.crawler_monitor import CrawlerMonitor from crawl4ai.models import CrawlStatus def simulate_crawler_task(monitor, task_id, url, simulate_failure=False): """Simulate a crawler task with different states.""" # Task starts in the QUEUED state wait_time = random.uniform(0.5, 3.0) time.sleep(wait_time) # Update to IN_PROGRESS state monitor.update_task( task_id=task_id, status=CrawlStatus.IN_PROGRESS, start_time=time.time(), wait_time=wait_time ) # Simulate task running process_time = random.uniform(1.0, 5.0) for i in range(int(process_time * 2)): # Simulate memory usage changes memory_usage = random.uniform(5.0, 25.0) monitor.update_task( task_id=task_id, memory_usage=memory_usage, peak_memory=max(memory_usage, monitor.get_task_stats(task_id).get("peak_memory", 0)) ) time.sleep(0.5) # Update to COMPLETED or FAILED state if simulate_failure and random.random() < 0.8: # 80% chance of failure if simulate_failure is True monitor.update_task( task_id=task_id, status=CrawlStatus.FAILED, end_time=time.time(), error_message="Simulated failure: Connection timeout", memory_usage=0.0 ) else: monitor.update_task( task_id=task_id, status=CrawlStatus.COMPLETED, end_time=time.time(), memory_usage=0.0 ) def update_queue_stats(monitor, num_queued_tasks): """Update queue statistics periodically.""" while monitor.is_running: queued_tasks = [ task for task_id, task in monitor.get_all_task_stats().items() if task["status"] == CrawlStatus.QUEUED.name ] total_queued = len(queued_tasks) if total_queued > 0: current_time = time.time() wait_times = [ current_time - task.get("enqueue_time", current_time) for task in queued_tasks ] highest_wait_time = max(wait_times) if wait_times else 0.0 avg_wait_time = sum(wait_times) / len(wait_times) if wait_times else 0.0 else: highest_wait_time = 0.0 avg_wait_time = 0.0 monitor.update_queue_statistics( total_queued=total_queued, highest_wait_time=highest_wait_time, avg_wait_time=avg_wait_time ) # Simulate memory pressure based on number of active tasks active_tasks = len([ task for task_id, task in monitor.get_all_task_stats().items() if task["status"] == CrawlStatus.IN_PROGRESS.name ]) if active_tasks > 8: monitor.update_memory_status("CRITICAL") elif active_tasks > 4: monitor.update_memory_status("PRESSURE") else: monitor.update_memory_status("NORMAL") time.sleep(1.0) def test_crawler_monitor(): """Test the CrawlerMonitor with simulated crawler tasks.""" # Total number of URLs to crawl total_urls = 50 # Initialize the monitor monitor = CrawlerMonitor(urls_total=total_urls, refresh_rate=0.5) # Start the monitor monitor.start() # Start thread to update queue statistics queue_stats_thread = threading.Thread(target=update_queue_stats, args=(monitor, total_urls)) queue_stats_thread.daemon = True queue_stats_thread.start() try: # Create task threads threads = [] for i in range(total_urls): task_id = str(uuid.uuid4()) url = f"https://example.com/page{i}" # Add task to monitor monitor.add_task(task_id, url) # Determine if this task should simulate failure simulate_failure = (i % 10 == 0) # Every 10th task # Create and start thread for this task thread = threading.Thread( target=simulate_crawler_task, args=(monitor, task_id, url, simulate_failure) ) thread.daemon = True threads.append(thread) # Start threads with delay to simulate tasks being added over time batch_size = 5 for i in range(0, len(threads), batch_size): batch = threads[i:i+batch_size] for thread in batch: thread.start() time.sleep(0.5) # Small delay between starting threads # Wait a bit before starting the next batch time.sleep(2.0) # Wait for all threads to complete for thread in threads: thread.join() # Keep monitor running a bit longer to see the final state time.sleep(5.0) except KeyboardInterrupt: print("\nTest interrupted by user") finally: # Stop the monitor monitor.stop() print("\nCrawler monitor test completed") if __name__ == "__main__": test_crawler_monitor()
python
Apache-2.0
c85f56b085a1d5b62779774d48887345e566869b
2026-01-04T14:38:51.943025Z
false