sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
crewAIInc/crewAI:lib/crewai/src/crewai/flow/config.py | from typing import Any, Literal, TypedDict
from typing_extensions import NotRequired
DarkGray = Literal["#333333"]
CrewAIOrange = Literal["#FF5A50"]
Gray = Literal["#666666"]
White = Literal["#FFFFFF"]
Black = Literal["#000000"]
DARK_GRAY: Literal["#333333"] = "#333333"
CREWAI_ORANGE: Literal["#FF5A50"] = "#FF5A50"
GRAY: Literal["#666666"] = "#666666"
WHITE: Literal["#FFFFFF"] = "#FFFFFF"
BLACK: Literal["#000000"] = "#000000"
class FlowColors(TypedDict):
bg: White
start: CrewAIOrange
method: DarkGray
router: DarkGray
router_border: CrewAIOrange
edge: Gray
router_edge: CrewAIOrange
text: White
class FontStyles(TypedDict, total=False):
color: DarkGray | CrewAIOrange | Gray | White | Black
multi: Literal["html"]
class StartNodeStyle(TypedDict):
color: CrewAIOrange
shape: Literal["box"]
font: FontStyles
label: NotRequired[str]
margin: dict[str, int]
class MethodNodeStyle(TypedDict):
color: DarkGray
shape: Literal["box"]
font: FontStyles
label: NotRequired[str]
margin: dict[str, int]
class RouterNodeStyle(TypedDict):
color: dict[str, Any]
shape: Literal["box"]
font: FontStyles
label: NotRequired[str]
borderWidth: int
borderWidthSelected: int
shapeProperties: dict[str, list[int] | bool]
margin: dict[str, int]
class CrewNodeStyle(TypedDict):
color: dict[str, CrewAIOrange | White]
shape: Literal["box"]
font: FontStyles
label: NotRequired[str]
borderWidth: int
borderWidthSelected: int
shapeProperties: dict[str, bool]
margin: dict[str, int]
class NodeStyles(TypedDict):
start: StartNodeStyle
method: MethodNodeStyle
router: RouterNodeStyle
crew: CrewNodeStyle
COLORS: FlowColors = {
"bg": WHITE,
"start": CREWAI_ORANGE,
"method": DARK_GRAY,
"router": DARK_GRAY,
"router_border": CREWAI_ORANGE,
"edge": GRAY,
"router_edge": CREWAI_ORANGE,
"text": WHITE,
}
NODE_STYLES: NodeStyles = {
"start": {
"color": CREWAI_ORANGE,
"shape": "box",
"font": {"color": WHITE},
"margin": {"top": 10, "bottom": 8, "left": 10, "right": 10},
},
"method": {
"color": DARK_GRAY,
"shape": "box",
"font": {"color": WHITE},
"margin": {"top": 10, "bottom": 8, "left": 10, "right": 10},
},
"router": {
"color": {
"background": DARK_GRAY,
"border": CREWAI_ORANGE,
"highlight": {
"border": CREWAI_ORANGE,
"background": DARK_GRAY,
},
},
"shape": "box",
"font": {"color": WHITE},
"borderWidth": 3,
"borderWidthSelected": 4,
"shapeProperties": {"borderDashes": [5, 5]},
"margin": {"top": 10, "bottom": 8, "left": 10, "right": 10},
},
"crew": {
"color": {
"background": WHITE,
"border": CREWAI_ORANGE,
},
"shape": "box",
"font": {"color": BLACK},
"borderWidth": 3,
"borderWidthSelected": 4,
"shapeProperties": {"borderDashes": False},
"margin": {"top": 10, "bottom": 8, "left": 10, "right": 10},
},
}
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/flow/config.py",
"license": "MIT License",
"lines": 111,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai/src/crewai/flow/flow_wrappers.py | """Wrapper classes for flow decorated methods with type-safe metadata."""
from __future__ import annotations
from collections.abc import Callable, Sequence
import functools
import inspect
from typing import Any, Generic, Literal, ParamSpec, TypeAlias, TypeVar, TypedDict
from typing_extensions import Required, Self
from crewai.flow.types import FlowMethodName
P = ParamSpec("P")
R = TypeVar("R")
FlowConditionType: TypeAlias = Literal["OR", "AND"]
SimpleFlowCondition: TypeAlias = tuple[FlowConditionType, list[FlowMethodName]]
class FlowCondition(TypedDict, total=False):
"""Type definition for flow trigger conditions.
This is a recursive structure where conditions can contain nested FlowConditions.
Attributes:
type: The type of the condition.
conditions: A list of conditions types.
methods: A list of methods.
"""
type: Required[FlowConditionType]
conditions: Sequence[FlowMethodName | FlowCondition]
methods: list[FlowMethodName]
FlowConditions: TypeAlias = list[FlowMethodName | FlowCondition]
class FlowMethod(Generic[P, R]):
"""Base wrapper for flow methods with decorator metadata.
This class provides a type-safe way to add metadata to methods
while preserving their callable signature and attributes. It handles
both bound (instance) and unbound (class) method states.
"""
def __init__(self, meth: Callable[P, R], instance: Any = None) -> None:
"""Initialize the flow method wrapper.
Args:
meth: The method to wrap.
instance: The instance to bind to (None for unbound).
"""
self._meth = meth
self._instance = instance
functools.update_wrapper(self, meth, updated=[])
self.__name__: FlowMethodName = FlowMethodName(self.__name__)
self.__signature__ = inspect.signature(meth)
if instance is not None:
self.__self__ = instance
if inspect.iscoroutinefunction(meth):
try:
inspect.markcoroutinefunction(self)
except AttributeError:
import asyncio.coroutines
self._is_coroutine = asyncio.coroutines._is_coroutine # type: ignore[attr-defined]
# Preserve flow-related attributes from wrapped method (e.g., from @human_feedback)
for attr in [
"__is_router__",
"__router_paths__",
"__human_feedback_config__",
]:
if hasattr(meth, attr):
setattr(self, attr, getattr(meth, attr))
def __call__(self, *args: P.args, **kwargs: P.kwargs) -> R:
"""Call the wrapped method.
Args:
*args: Positional arguments.
**kwargs: Keyword arguments.
Returns:
The result of calling the wrapped method.
"""
if self._instance is not None:
return self._meth(self._instance, *args, **kwargs)
return self._meth(*args, **kwargs)
def unwrap(self) -> Callable[P, R]:
"""Get the original unwrapped method.
Returns:
The original method before decoration.
"""
return self._meth
def __get__(self, instance: Any, owner: type | None = None) -> Self:
"""Support the descriptor protocol for method binding.
This allows the wrapped method to be properly bound to an instance
when accessed as an attribute.
Args:
instance: The instance the method is being accessed from.
owner: The class that owns the method.
Returns:
A new wrapper bound to the instance, or self if accessed from the class.
"""
if instance is None:
return self
bound = type(self)(self._meth, instance)
skip = {
"_meth",
"_instance",
"__name__",
"__doc__",
"__signature__",
"__self__",
"_is_coroutine",
"__module__",
"__qualname__",
"__annotations__",
"__type_params__",
"__wrapped__",
}
for attr, value in self.__dict__.items():
if attr not in skip:
setattr(bound, attr, value)
return bound
class StartMethod(FlowMethod[P, R]):
"""Wrapper for methods marked as flow start points."""
__is_start_method__: bool = True
__trigger_methods__: list[FlowMethodName] | None = None
__condition_type__: FlowConditionType | None = None
__trigger_condition__: FlowCondition | None = None
class ListenMethod(FlowMethod[P, R]):
"""Wrapper for methods marked as flow listeners."""
__trigger_methods__: list[FlowMethodName] | None = None
__condition_type__: FlowConditionType | None = None
__trigger_condition__: FlowCondition | None = None
class RouterMethod(FlowMethod[P, R]):
"""Wrapper for methods marked as flow routers."""
__is_router__: bool = True
__trigger_methods__: list[FlowMethodName] | None = None
__condition_type__: FlowConditionType | None = None
__trigger_condition__: FlowCondition | None = None
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/flow/flow_wrappers.py",
"license": "MIT License",
"lines": 122,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai/src/crewai/lite_agent_output.py | """Output class for LiteAgent execution results."""
from __future__ import annotations
from typing import Any
from pydantic import BaseModel, Field
from crewai.utilities.types import LLMMessage
class LiteAgentOutput(BaseModel):
"""Class that represents the result of a LiteAgent execution."""
model_config = {"arbitrary_types_allowed": True}
raw: str = Field(description="Raw output of the agent", default="")
pydantic: BaseModel | None = Field(
description="Pydantic output of the agent", default=None
)
agent_role: str = Field(description="Role of the agent that produced this output")
usage_metrics: dict[str, Any] | None = Field(
description="Token usage metrics for this execution", default=None
)
messages: list[LLMMessage] = Field(description="Messages of the agent", default=[])
def to_dict(self) -> dict[str, Any]:
"""Convert pydantic_output to a dictionary."""
if self.pydantic:
return self.pydantic.model_dump()
return {}
def __str__(self) -> str:
"""Return the raw output as a string."""
return self.raw
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/lite_agent_output.py",
"license": "MIT License",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai/src/crewai/llms/providers/anthropic/completion.py | from __future__ import annotations
import json
import logging
import os
from typing import TYPE_CHECKING, Any, Final, Literal, TypeGuard, cast
from pydantic import BaseModel
from crewai.events.types.llm_events import LLMCallType
from crewai.llms.base_llm import BaseLLM, llm_call_context
from crewai.llms.hooks.transport import AsyncHTTPTransport, HTTPTransport
from crewai.utilities.agent_utils import is_context_length_exceeded
from crewai.utilities.exceptions.context_window_exceeding_exception import (
LLMContextLengthExceededError,
)
from crewai.utilities.types import LLMMessage
if TYPE_CHECKING:
from crewai.llms.hooks.base import BaseInterceptor
try:
from anthropic import Anthropic, AsyncAnthropic, transform_schema
from anthropic.types import Message, TextBlock, ThinkingBlock, ToolUseBlock
from anthropic.types.beta import BetaMessage, BetaTextBlock, BetaToolUseBlock
import httpx
except ImportError:
raise ImportError(
'Anthropic native provider not available, to install: uv add "crewai[anthropic]"'
) from None
ANTHROPIC_FILES_API_BETA: Final = "files-api-2025-04-14"
ANTHROPIC_STRUCTURED_OUTPUTS_BETA: Final = "structured-outputs-2025-11-13"
NATIVE_STRUCTURED_OUTPUT_MODELS: Final[
tuple[
Literal["claude-sonnet-4-5"],
Literal["claude-sonnet-4.5"],
Literal["claude-opus-4-5"],
Literal["claude-opus-4.5"],
Literal["claude-opus-4-1"],
Literal["claude-opus-4.1"],
Literal["claude-haiku-4-5"],
Literal["claude-haiku-4.5"],
]
] = (
"claude-sonnet-4-5",
"claude-sonnet-4.5",
"claude-opus-4-5",
"claude-opus-4.5",
"claude-opus-4-1",
"claude-opus-4.1",
"claude-haiku-4-5",
"claude-haiku-4.5",
)
def _supports_native_structured_outputs(model: str) -> bool:
"""Check if the model supports native structured outputs.
Native structured outputs are only available for Claude 4.5 models
(Sonnet 4.5, Opus 4.5, Opus 4.1, Haiku 4.5).
Other models require the tool-based fallback approach.
Args:
model: The model name/identifier.
Returns:
True if the model supports native structured outputs.
"""
model_lower = model.lower()
return any(prefix in model_lower for prefix in NATIVE_STRUCTURED_OUTPUT_MODELS)
def _is_pydantic_model_class(obj: Any) -> TypeGuard[type[BaseModel]]:
"""Check if an object is a Pydantic model class.
This distinguishes between Pydantic model classes that support structured
outputs (have model_json_schema) and plain dicts like {"type": "json_object"}.
Args:
obj: The object to check.
Returns:
True if obj is a Pydantic model class.
"""
return isinstance(obj, type) and issubclass(obj, BaseModel)
def _contains_file_id_reference(messages: list[dict[str, Any]]) -> bool:
"""Check if any message content contains a file_id reference.
Anthropic's Files API is in beta and requires a special header when
file_id references are used in content blocks.
Args:
messages: List of message dicts to check.
Returns:
True if any content block contains a file_id reference.
"""
for message in messages:
content = message.get("content")
if isinstance(content, list):
for block in content:
if isinstance(block, dict):
source = block.get("source", {})
if isinstance(source, dict) and source.get("type") == "file":
return True
return False
class AnthropicThinkingConfig(BaseModel):
type: Literal["enabled", "disabled"]
budget_tokens: int | None = None
class AnthropicCompletion(BaseLLM):
"""Anthropic native completion implementation.
This class provides direct integration with the Anthropic Python SDK,
offering native tool use, streaming support, and proper message formatting.
"""
def __init__(
self,
model: str = "claude-3-5-sonnet-20241022",
api_key: str | None = None,
base_url: str | None = None,
timeout: float | None = None,
max_retries: int = 2,
temperature: float | None = None,
max_tokens: int = 4096, # Required for Anthropic
top_p: float | None = None,
stop_sequences: list[str] | None = None,
stream: bool = False,
client_params: dict[str, Any] | None = None,
interceptor: BaseInterceptor[httpx.Request, httpx.Response] | None = None,
thinking: AnthropicThinkingConfig | None = None,
response_format: type[BaseModel] | None = None,
**kwargs: Any,
):
"""Initialize Anthropic chat completion client.
Args:
model: Anthropic model name (e.g., 'claude-3-5-sonnet-20241022')
api_key: Anthropic API key (defaults to ANTHROPIC_API_KEY env var)
base_url: Custom base URL for Anthropic API
timeout: Request timeout in seconds
max_retries: Maximum number of retries
temperature: Sampling temperature (0-1)
max_tokens: Maximum tokens in response (required for Anthropic)
top_p: Nucleus sampling parameter
stop_sequences: Stop sequences (Anthropic uses stop_sequences, not stop)
stream: Enable streaming responses
client_params: Additional parameters for the Anthropic client
interceptor: HTTP interceptor for modifying requests/responses at transport level.
response_format: Pydantic model for structured output. When provided, responses
will be validated against this model schema.
**kwargs: Additional parameters
"""
super().__init__(
model=model, temperature=temperature, stop=stop_sequences or [], **kwargs
)
# Client params
self.interceptor = interceptor
self.client_params = client_params
self.base_url = base_url
self.timeout = timeout
self.max_retries = max_retries
self.client = Anthropic(**self._get_client_params())
async_client_params = self._get_client_params()
if self.interceptor:
async_transport = AsyncHTTPTransport(interceptor=self.interceptor)
async_http_client = httpx.AsyncClient(transport=async_transport)
async_client_params["http_client"] = async_http_client
self.async_client = AsyncAnthropic(**async_client_params)
# Store completion parameters
self.max_tokens = max_tokens
self.top_p = top_p
self.stream = stream
self.stop_sequences = stop_sequences or []
self.thinking = thinking
self.previous_thinking_blocks: list[ThinkingBlock] = []
self.response_format = response_format
# Model-specific settings
self.is_claude_3 = "claude-3" in model.lower()
self.supports_tools = True
@property
def stop(self) -> list[str]:
"""Get stop sequences sent to the API."""
return self.stop_sequences
@stop.setter
def stop(self, value: list[str] | str | None) -> None:
"""Set stop sequences.
Synchronizes stop_sequences to ensure values set by CrewAgentExecutor
are properly sent to the Anthropic API.
Args:
value: Stop sequences as a list, single string, or None
"""
if value is None:
self.stop_sequences = []
elif isinstance(value, str):
self.stop_sequences = [value]
elif isinstance(value, list):
self.stop_sequences = value
else:
self.stop_sequences = []
def _get_client_params(self) -> dict[str, Any]:
"""Get client parameters."""
if self.api_key is None:
self.api_key = os.getenv("ANTHROPIC_API_KEY")
if self.api_key is None:
raise ValueError("ANTHROPIC_API_KEY is required")
client_params = {
"api_key": self.api_key,
"base_url": self.base_url,
"timeout": self.timeout,
"max_retries": self.max_retries,
}
if self.interceptor:
transport = HTTPTransport(interceptor=self.interceptor)
http_client = httpx.Client(transport=transport)
client_params["http_client"] = http_client # type: ignore[assignment]
if self.client_params:
client_params.update(self.client_params)
return client_params
def call(
self,
messages: str | list[LLMMessage],
tools: list[dict[str, Any]] | None = None,
callbacks: list[Any] | None = None,
available_functions: dict[str, Any] | None = None,
from_task: Any | None = None,
from_agent: Any | None = None,
response_model: type[BaseModel] | None = None,
) -> str | Any:
"""Call Anthropic messages API.
Args:
messages: Input messages for the chat completion
tools: List of tool/function definitions
callbacks: Callback functions (not used in native implementation)
available_functions: Available functions for tool calling
from_task: Task that initiated the call
from_agent: Agent that initiated the call
Returns:
Chat completion response or tool call result
"""
with llm_call_context():
try:
# Emit call started event
self._emit_call_started_event(
messages=messages,
tools=tools,
callbacks=callbacks,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
)
# Format messages for Anthropic
formatted_messages, system_message = (
self._format_messages_for_anthropic(messages)
)
if not self._invoke_before_llm_call_hooks(
formatted_messages, from_agent
):
raise ValueError("LLM call blocked by before_llm_call hook")
# Prepare completion parameters
completion_params = self._prepare_completion_params(
formatted_messages, system_message, tools, available_functions
)
effective_response_model = response_model or self.response_format
# Handle streaming vs non-streaming
if self.stream:
return self._handle_streaming_completion(
completion_params,
available_functions,
from_task,
from_agent,
effective_response_model,
)
return self._handle_completion(
completion_params,
available_functions,
from_task,
from_agent,
effective_response_model,
)
except Exception as e:
error_msg = f"Anthropic API call failed: {e!s}"
logging.error(error_msg)
self._emit_call_failed_event(
error=error_msg, from_task=from_task, from_agent=from_agent
)
raise
async def acall(
self,
messages: str | list[LLMMessage],
tools: list[dict[str, Any]] | None = None,
callbacks: list[Any] | None = None,
available_functions: dict[str, Any] | None = None,
from_task: Any | None = None,
from_agent: Any | None = None,
response_model: type[BaseModel] | None = None,
) -> str | Any:
"""Async call to Anthropic messages API.
Args:
messages: Input messages for the chat completion
tools: List of tool/function definitions
callbacks: Callback functions (not used in native implementation)
available_functions: Available functions for tool calling
from_task: Task that initiated the call
from_agent: Agent that initiated the call
response_model: Optional response model.
Returns:
Chat completion response or tool call result
"""
with llm_call_context():
try:
self._emit_call_started_event(
messages=messages,
tools=tools,
callbacks=callbacks,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
)
formatted_messages, system_message = (
self._format_messages_for_anthropic(messages)
)
completion_params = self._prepare_completion_params(
formatted_messages, system_message, tools, available_functions
)
effective_response_model = response_model or self.response_format
if self.stream:
return await self._ahandle_streaming_completion(
completion_params,
available_functions,
from_task,
from_agent,
effective_response_model,
)
return await self._ahandle_completion(
completion_params,
available_functions,
from_task,
from_agent,
effective_response_model,
)
except Exception as e:
error_msg = f"Anthropic API call failed: {e!s}"
logging.error(error_msg)
self._emit_call_failed_event(
error=error_msg, from_task=from_task, from_agent=from_agent
)
raise
def _prepare_completion_params(
self,
messages: list[LLMMessage],
system_message: str | None = None,
tools: list[dict[str, Any]] | None = None,
available_functions: dict[str, Any] | None = None,
) -> dict[str, Any]:
"""Prepare parameters for Anthropic messages API.
Args:
messages: Formatted messages for Anthropic
system_message: Extracted system message
tools: Tool definitions
available_functions: Available functions for tool calling. When provided
with a single tool, tool_choice is automatically set to force tool use.
Returns:
Parameters dictionary for Anthropic API
"""
params = {
"model": self.model,
"messages": messages,
"max_tokens": self.max_tokens,
"stream": self.stream,
}
# Add system message if present
if system_message:
params["system"] = system_message
# Add optional parameters if set
if self.temperature is not None:
params["temperature"] = self.temperature
if self.top_p is not None:
params["top_p"] = self.top_p
if self.stop_sequences:
params["stop_sequences"] = self.stop_sequences
# Handle tools for Claude 3+
if tools and self.supports_tools:
converted_tools = self._convert_tools_for_interference(tools)
params["tools"] = converted_tools
if available_functions and len(converted_tools) == 1:
tool_name = converted_tools[0].get("name")
if tool_name and tool_name in available_functions:
params["tool_choice"] = {"type": "tool", "name": tool_name}
if self.thinking:
if isinstance(self.thinking, AnthropicThinkingConfig):
params["thinking"] = self.thinking.model_dump()
else:
params["thinking"] = self.thinking
return params
def _convert_tools_for_interference(
self, tools: list[dict[str, Any]]
) -> list[dict[str, Any]]:
"""Convert CrewAI tool format to Anthropic tool use format."""
anthropic_tools = []
for tool in tools:
if "input_schema" in tool and "name" in tool and "description" in tool:
anthropic_tools.append(tool)
continue
try:
from crewai.llms.providers.utils.common import safe_tool_conversion
name, description, parameters = safe_tool_conversion(tool, "Anthropic")
except (ImportError, KeyError, ValueError) as e:
logging.error(f"Error converting tool to Anthropic format: {e}")
raise e
anthropic_tool = {
"name": name,
"description": description,
}
if parameters and isinstance(parameters, dict):
anthropic_tool["input_schema"] = parameters # type: ignore[assignment]
else:
anthropic_tool["input_schema"] = { # type: ignore[assignment]
"type": "object",
"properties": {},
"required": [],
}
anthropic_tools.append(anthropic_tool)
return anthropic_tools
def _extract_thinking_block(
self, content_block: Any
) -> ThinkingBlock | dict[str, Any] | None:
"""Extract and format thinking block from content block.
Args:
content_block: Content block from Anthropic response
Returns:
Dictionary with thinking block data including signature, or None if not a thinking block
"""
if content_block.type == "thinking":
thinking_block = {
"type": "thinking",
"thinking": content_block.thinking,
}
if hasattr(content_block, "signature"):
thinking_block["signature"] = content_block.signature
return thinking_block
if content_block.type == "redacted_thinking":
redacted_block = {"type": "redacted_thinking"}
if hasattr(content_block, "thinking"):
redacted_block["thinking"] = content_block.thinking
if hasattr(content_block, "signature"):
redacted_block["signature"] = content_block.signature
return redacted_block
return None
def _format_messages_for_anthropic(
self, messages: str | list[LLMMessage]
) -> tuple[list[LLMMessage], str | None]:
"""Format messages for Anthropic API.
Anthropic has specific requirements:
- System messages are separate from conversation messages
- Messages must alternate between user and assistant
- First message must be from user
- Tool results must be in user messages with tool_result content blocks
- When thinking is enabled, assistant messages must start with thinking blocks
Args:
messages: Input messages
Returns:
Tuple of (formatted_messages, system_message)
"""
# Use base class formatting first
base_formatted = super()._format_messages(messages)
formatted_messages: list[LLMMessage] = []
system_message: str | None = None
pending_tool_results: list[dict[str, Any]] = []
for message in base_formatted:
role = message.get("role")
content = message.get("content", "")
if role == "system":
if system_message:
system_message += f"\n\n{content}"
else:
system_message = cast(str, content)
elif role == "tool":
tool_call_id = message.get("tool_call_id", "")
if not tool_call_id:
raise ValueError("Tool message missing required tool_call_id")
tool_result = {
"type": "tool_result",
"tool_use_id": tool_call_id,
"content": content if content else "",
}
pending_tool_results.append(tool_result)
elif role == "assistant":
# First, flush any pending tool results as a user message
if pending_tool_results:
formatted_messages.append(
{"role": "user", "content": pending_tool_results}
)
pending_tool_results = []
# Handle assistant message with tool_calls (convert to Anthropic format)
tool_calls = message.get("tool_calls", [])
if tool_calls:
assistant_content: list[dict[str, Any]] = []
for tc in tool_calls:
if isinstance(tc, dict):
func = tc.get("function", {})
tool_use = {
"type": "tool_use",
"id": tc.get("id", ""),
"name": func.get("name", ""),
"input": json.loads(func.get("arguments", "{}"))
if isinstance(func.get("arguments"), str)
else func.get("arguments", {}),
}
assistant_content.append(tool_use)
if assistant_content:
formatted_messages.append(
{"role": "assistant", "content": assistant_content}
)
elif isinstance(content, list):
formatted_messages.append({"role": "assistant", "content": content})
elif self.thinking and self.previous_thinking_blocks:
structured_content = cast(
list[dict[str, Any]],
[
*self.previous_thinking_blocks,
{"type": "text", "text": content if content else ""},
],
)
formatted_messages.append(
LLMMessage(role="assistant", content=structured_content)
)
else:
content_str = content if content is not None else ""
formatted_messages.append(
LLMMessage(role="assistant", content=content_str)
)
else:
# User message - first flush any pending tool results
if pending_tool_results:
formatted_messages.append(
{"role": "user", "content": pending_tool_results}
)
pending_tool_results = []
role_str = role if role is not None else "user"
if isinstance(content, list):
formatted_messages.append({"role": role_str, "content": content})
else:
content_str = content if content is not None else ""
formatted_messages.append(
LLMMessage(role=role_str, content=content_str)
)
# Flush any remaining pending tool results
if pending_tool_results:
formatted_messages.append({"role": "user", "content": pending_tool_results})
# Ensure first message is from user (Anthropic requirement)
if not formatted_messages:
# If no messages, add a default user message
formatted_messages.append({"role": "user", "content": "Hello"})
elif formatted_messages[0]["role"] != "user":
# If first message is not from user, insert a user message at the beginning
formatted_messages.insert(0, {"role": "user", "content": "Hello"})
return formatted_messages, system_message
def _handle_completion(
self,
params: dict[str, Any],
available_functions: dict[str, Any] | None = None,
from_task: Any | None = None,
from_agent: Any | None = None,
response_model: type[BaseModel] | None = None,
) -> str | Any:
"""Handle non-streaming message completion."""
uses_file_api = _contains_file_id_reference(params.get("messages", []))
betas: list[str] = []
use_native_structured_output = False
if uses_file_api:
betas.append(ANTHROPIC_FILES_API_BETA)
extra_body: dict[str, Any] | None = None
if _is_pydantic_model_class(response_model):
schema = transform_schema(response_model.model_json_schema())
if _supports_native_structured_outputs(self.model):
use_native_structured_output = True
betas.append(ANTHROPIC_STRUCTURED_OUTPUTS_BETA)
extra_body = {
"output_format": {
"type": "json_schema",
"schema": schema,
}
}
else:
structured_tool = {
"name": "structured_output",
"description": "Output the structured response",
"input_schema": schema,
}
params["tools"] = [structured_tool]
params["tool_choice"] = {"type": "tool", "name": "structured_output"}
try:
if betas:
params["betas"] = betas
response = self.client.beta.messages.create(
**params, extra_body=extra_body
)
else:
response = self.client.messages.create(**params)
except Exception as e:
if is_context_length_exceeded(e):
logging.error(f"Context window exceeded: {e}")
raise LLMContextLengthExceededError(str(e)) from e
raise e from e
usage = self._extract_anthropic_token_usage(response)
self._track_token_usage_internal(usage)
if _is_pydantic_model_class(response_model) and response.content:
if use_native_structured_output:
for block in response.content:
if isinstance(block, (TextBlock, BetaTextBlock)):
structured_data = response_model.model_validate_json(block.text)
self._emit_call_completed_event(
response=structured_data.model_dump_json(),
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
return structured_data
else:
for block in response.content:
if (
isinstance(block, (ToolUseBlock, BetaToolUseBlock))
and block.name == "structured_output"
):
structured_data = response_model.model_validate(block.input)
self._emit_call_completed_event(
response=structured_data.model_dump_json(),
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
return structured_data
# Check if Claude wants to use tools
if response.content:
tool_uses = [
block
for block in response.content
if isinstance(block, (ToolUseBlock, BetaToolUseBlock))
]
if tool_uses:
# If no available_functions, return tool calls for executor to handle
# This allows the executor to manage tool execution with proper
# message history and post-tool reasoning prompts
if not available_functions:
self._emit_call_completed_event(
response=list(tool_uses),
call_type=LLMCallType.TOOL_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
return list(tool_uses)
result = self._execute_first_tool(
tool_uses, available_functions, from_task, from_agent
)
if result is not None:
return result
content = ""
thinking_blocks: list[ThinkingBlock] = []
if response.content:
for content_block in response.content:
if hasattr(content_block, "text"):
content += content_block.text
else:
thinking_block = self._extract_thinking_block(content_block)
if thinking_block:
thinking_blocks.append(cast(ThinkingBlock, thinking_block))
if thinking_blocks:
self.previous_thinking_blocks = thinking_blocks
content = self._apply_stop_words(content)
self._emit_call_completed_event(
response=content,
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
if usage.get("total_tokens", 0) > 0:
logging.info(f"Anthropic API usage: {usage}")
return self._invoke_after_llm_call_hooks(
params["messages"], content, from_agent
)
def _handle_streaming_completion(
self,
params: dict[str, Any],
available_functions: dict[str, Any] | None = None,
from_task: Any | None = None,
from_agent: Any | None = None,
response_model: type[BaseModel] | None = None,
) -> str | Any:
"""Handle streaming message completion."""
betas: list[str] = []
use_native_structured_output = False
extra_body: dict[str, Any] | None = None
if _is_pydantic_model_class(response_model):
schema = transform_schema(response_model.model_json_schema())
if _supports_native_structured_outputs(self.model):
use_native_structured_output = True
betas.append(ANTHROPIC_STRUCTURED_OUTPUTS_BETA)
extra_body = {
"output_format": {
"type": "json_schema",
"schema": schema,
}
}
else:
structured_tool = {
"name": "structured_output",
"description": "Output the structured response",
"input_schema": schema,
}
params["tools"] = [structured_tool]
params["tool_choice"] = {"type": "tool", "name": "structured_output"}
full_response = ""
# Remove 'stream' parameter as messages.stream() doesn't accept it
# (the SDK sets it internally)
stream_params = {k: v for k, v in params.items() if k != "stream"}
if betas:
stream_params["betas"] = betas
current_tool_calls: dict[int, dict[str, Any]] = {}
stream_context = (
self.client.beta.messages.stream(**stream_params, extra_body=extra_body)
if betas
else self.client.messages.stream(**stream_params)
)
with stream_context as stream:
response_id = None
for event in stream:
if hasattr(event, "message") and hasattr(event.message, "id"):
response_id = event.message.id
if hasattr(event, "delta") and hasattr(event.delta, "text"):
text_delta = event.delta.text
full_response += text_delta
self._emit_stream_chunk_event(
chunk=text_delta,
from_task=from_task,
from_agent=from_agent,
response_id=response_id,
)
if event.type == "content_block_start":
block = event.content_block
if block.type == "tool_use":
block_index = event.index
current_tool_calls[block_index] = {
"id": block.id,
"name": block.name,
"arguments": "",
"index": block_index,
}
self._emit_stream_chunk_event(
chunk="",
from_task=from_task,
from_agent=from_agent,
tool_call={
"id": block.id,
"function": {
"name": block.name,
"arguments": "",
},
"type": "function",
"index": block_index,
},
call_type=LLMCallType.TOOL_CALL,
response_id=response_id,
)
elif event.type == "content_block_delta":
if event.delta.type == "input_json_delta":
block_index = event.index
partial_json = event.delta.partial_json
if block_index in current_tool_calls and partial_json:
current_tool_calls[block_index]["arguments"] += partial_json
self._emit_stream_chunk_event(
chunk=partial_json,
from_task=from_task,
from_agent=from_agent,
tool_call={
"id": current_tool_calls[block_index]["id"],
"function": {
"name": current_tool_calls[block_index]["name"],
"arguments": current_tool_calls[block_index][
"arguments"
],
},
"type": "function",
"index": block_index,
},
call_type=LLMCallType.TOOL_CALL,
response_id=response_id,
)
final_message = stream.get_final_message()
thinking_blocks: list[ThinkingBlock] = []
if final_message.content:
for content_block in final_message.content:
thinking_block = self._extract_thinking_block(content_block)
if thinking_block:
thinking_blocks.append(cast(ThinkingBlock, thinking_block))
if thinking_blocks:
self.previous_thinking_blocks = thinking_blocks
usage = self._extract_anthropic_token_usage(final_message)
self._track_token_usage_internal(usage)
if _is_pydantic_model_class(response_model):
if use_native_structured_output:
structured_data = response_model.model_validate_json(full_response)
self._emit_call_completed_event(
response=structured_data.model_dump_json(),
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
return structured_data
for block in final_message.content:
if (
isinstance(block, ToolUseBlock)
and block.name == "structured_output"
):
structured_data = response_model.model_validate(block.input)
self._emit_call_completed_event(
response=structured_data.model_dump_json(),
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
return structured_data
if final_message.content:
tool_uses = [
block
for block in final_message.content
if isinstance(block, (ToolUseBlock, BetaToolUseBlock))
]
if tool_uses:
if not available_functions:
return list(tool_uses)
# Execute first tool and return result directly
result = self._execute_first_tool(
tool_uses, available_functions, from_task, from_agent
)
if result is not None:
return result
full_response = self._apply_stop_words(full_response)
self._emit_call_completed_event(
response=full_response,
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
return self._invoke_after_llm_call_hooks(
params["messages"], full_response, from_agent
)
def _execute_tools_and_collect_results(
self,
tool_uses: list[ToolUseBlock | BetaToolUseBlock],
available_functions: dict[str, Any],
from_task: Any | None = None,
from_agent: Any | None = None,
) -> list[dict[str, Any]]:
"""Execute tools and collect results in Anthropic format.
Args:
tool_uses: List of tool use blocks from Claude's response (regular or beta API)
available_functions: Available functions for tool calling
from_task: Task that initiated the call
from_agent: Agent that initiated the call
Returns:
List of tool result dictionaries in Anthropic format
"""
tool_results = []
for tool_use in tool_uses:
function_name = tool_use.name
function_args = tool_use.input
result = self._handle_tool_execution(
function_name=function_name,
function_args=cast(dict[str, Any], function_args),
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
)
tool_result = {
"type": "tool_result",
"tool_use_id": tool_use.id,
"content": str(result)
if result is not None
else "Tool execution completed",
}
tool_results.append(tool_result)
return tool_results
def _execute_first_tool(
self,
tool_uses: list[ToolUseBlock | BetaToolUseBlock],
available_functions: dict[str, Any],
from_task: Any | None = None,
from_agent: Any | None = None,
) -> Any | None:
"""Execute the first tool from the tool_uses list and return its result.
This is used when available_functions is provided, to directly execute
the tool and return its result (matching OpenAI behavior for use cases
like reasoning_handler).
Args:
tool_uses: List of tool use blocks from Claude's response
available_functions: Available functions for tool calling
from_task: Task that initiated the call
from_agent: Agent that initiated the call
Returns:
The result of the first tool execution, or None if execution failed
"""
tool_use = tool_uses[0]
function_name = tool_use.name
function_args = cast(dict[str, Any], tool_use.input)
return self._handle_tool_execution(
function_name=function_name,
function_args=function_args,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
)
# TODO: we drop this
def _handle_tool_use_conversation(
self,
initial_response: Message | BetaMessage,
tool_uses: list[ToolUseBlock | BetaToolUseBlock],
params: dict[str, Any],
available_functions: dict[str, Any],
from_task: Any | None = None,
from_agent: Any | None = None,
) -> str:
"""Handle the complete tool use conversation flow.
This implements the proper Anthropic tool use pattern:
1. Claude requests tool use
2. We execute the tools
3. We send tool results back to Claude
4. Claude processes results and generates final response
"""
tool_results = self._execute_tools_and_collect_results(
tool_uses, available_functions, from_task, from_agent
)
follow_up_params = params.copy()
# Add Claude's tool use response to conversation
assistant_content: list[
ThinkingBlock | ToolUseBlock | TextBlock | dict[str, Any]
] = []
for block in initial_response.content:
thinking_block = self._extract_thinking_block(block)
if thinking_block:
assistant_content.append(thinking_block)
elif block.type == "tool_use":
assistant_content.append(
{
"type": "tool_use",
"id": block.id,
"name": block.name,
"input": block.input,
}
)
elif hasattr(block, "text"):
assistant_content.append({"type": "text", "text": block.text})
assistant_message = {"role": "assistant", "content": assistant_content}
# Add user message with tool results
user_message = {"role": "user", "content": tool_results}
# Update messages for follow-up call
follow_up_params["messages"] = params["messages"] + [
assistant_message,
user_message,
]
try:
# Send tool results back to Claude for final response
final_response: Message = self.client.messages.create(**follow_up_params)
# Track token usage for follow-up call
follow_up_usage = self._extract_anthropic_token_usage(final_response)
self._track_token_usage_internal(follow_up_usage)
final_content = ""
thinking_blocks: list[ThinkingBlock] = []
if final_response.content:
for content_block in final_response.content:
if hasattr(content_block, "text"):
final_content += content_block.text
else:
thinking_block = self._extract_thinking_block(content_block)
if thinking_block:
thinking_blocks.append(cast(ThinkingBlock, thinking_block))
if thinking_blocks:
self.previous_thinking_blocks = thinking_blocks
final_content = self._apply_stop_words(final_content)
# Emit completion event for the final response
self._emit_call_completed_event(
response=final_content,
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=follow_up_params["messages"],
)
# Log combined token usage
total_usage = {
"input_tokens": follow_up_usage.get("input_tokens", 0),
"output_tokens": follow_up_usage.get("output_tokens", 0),
"total_tokens": follow_up_usage.get("total_tokens", 0),
}
if total_usage.get("total_tokens", 0) > 0:
logging.info(f"Anthropic API tool conversation usage: {total_usage}")
return final_content
except Exception as e:
if is_context_length_exceeded(e):
logging.error(f"Context window exceeded in tool follow-up: {e}")
raise LLMContextLengthExceededError(str(e)) from e
logging.error(f"Tool follow-up conversation failed: {e}")
# Fallback: return the first tool result if follow-up fails
if tool_results:
return cast(str, tool_results[0]["content"])
raise e
async def _ahandle_completion(
self,
params: dict[str, Any],
available_functions: dict[str, Any] | None = None,
from_task: Any | None = None,
from_agent: Any | None = None,
response_model: type[BaseModel] | None = None,
) -> str | Any:
"""Handle non-streaming async message completion."""
uses_file_api = _contains_file_id_reference(params.get("messages", []))
betas: list[str] = []
use_native_structured_output = False
if uses_file_api:
betas.append(ANTHROPIC_FILES_API_BETA)
extra_body: dict[str, Any] | None = None
if _is_pydantic_model_class(response_model):
schema = transform_schema(response_model.model_json_schema())
if _supports_native_structured_outputs(self.model):
use_native_structured_output = True
betas.append(ANTHROPIC_STRUCTURED_OUTPUTS_BETA)
extra_body = {
"output_format": {
"type": "json_schema",
"schema": schema,
}
}
else:
structured_tool = {
"name": "structured_output",
"description": "Output the structured response",
"input_schema": schema,
}
params["tools"] = [structured_tool]
params["tool_choice"] = {"type": "tool", "name": "structured_output"}
try:
if betas:
params["betas"] = betas
response = await self.async_client.beta.messages.create(
**params, extra_body=extra_body
)
else:
response = await self.async_client.messages.create(**params)
except Exception as e:
if is_context_length_exceeded(e):
logging.error(f"Context window exceeded: {e}")
raise LLMContextLengthExceededError(str(e)) from e
raise e from e
usage = self._extract_anthropic_token_usage(response)
self._track_token_usage_internal(usage)
if _is_pydantic_model_class(response_model) and response.content:
if use_native_structured_output:
for block in response.content:
if isinstance(block, (TextBlock, BetaTextBlock)):
structured_data = response_model.model_validate_json(block.text)
self._emit_call_completed_event(
response=structured_data.model_dump_json(),
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
return structured_data
else:
for block in response.content:
if (
isinstance(block, ToolUseBlock)
and block.name == "structured_output"
):
structured_data = response_model.model_validate(block.input)
self._emit_call_completed_event(
response=structured_data.model_dump_json(),
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
return structured_data
# Handle both ToolUseBlock (regular API) and BetaToolUseBlock (beta API features)
if response.content:
tool_uses = [
block
for block in response.content
if isinstance(block, (ToolUseBlock, BetaToolUseBlock))
]
if tool_uses:
# If no available_functions, return tool calls for executor to handle
if not available_functions:
self._emit_call_completed_event(
response=list(tool_uses),
call_type=LLMCallType.TOOL_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
return list(tool_uses)
result = self._execute_first_tool(
tool_uses, available_functions, from_task, from_agent
)
if result is not None:
return result
content = ""
if response.content:
for content_block in response.content:
if hasattr(content_block, "text"):
content += content_block.text
content = self._apply_stop_words(content)
self._emit_call_completed_event(
response=content,
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
if usage.get("total_tokens", 0) > 0:
logging.info(f"Anthropic API usage: {usage}")
return content
async def _ahandle_streaming_completion(
self,
params: dict[str, Any],
available_functions: dict[str, Any] | None = None,
from_task: Any | None = None,
from_agent: Any | None = None,
response_model: type[BaseModel] | None = None,
) -> str | Any:
"""Handle async streaming message completion."""
betas: list[str] = []
use_native_structured_output = False
extra_body: dict[str, Any] | None = None
if _is_pydantic_model_class(response_model):
schema = transform_schema(response_model.model_json_schema())
if _supports_native_structured_outputs(self.model):
use_native_structured_output = True
betas.append(ANTHROPIC_STRUCTURED_OUTPUTS_BETA)
extra_body = {
"output_format": {
"type": "json_schema",
"schema": schema,
}
}
else:
structured_tool = {
"name": "structured_output",
"description": "Output the structured response",
"input_schema": schema,
}
params["tools"] = [structured_tool]
params["tool_choice"] = {"type": "tool", "name": "structured_output"}
full_response = ""
stream_params = {k: v for k, v in params.items() if k != "stream"}
if betas:
stream_params["betas"] = betas
current_tool_calls: dict[int, dict[str, Any]] = {}
stream_context = (
self.async_client.beta.messages.stream(
**stream_params, extra_body=extra_body
)
if betas
else self.async_client.messages.stream(**stream_params)
)
async with stream_context as stream:
response_id = None
async for event in stream:
if hasattr(event, "message") and hasattr(event.message, "id"):
response_id = event.message.id
if hasattr(event, "delta") and hasattr(event.delta, "text"):
text_delta = event.delta.text
full_response += text_delta
self._emit_stream_chunk_event(
chunk=text_delta,
from_task=from_task,
from_agent=from_agent,
response_id=response_id,
)
if event.type == "content_block_start":
block = event.content_block
if block.type == "tool_use":
block_index = event.index
current_tool_calls[block_index] = {
"id": block.id,
"name": block.name,
"arguments": "",
"index": block_index,
}
self._emit_stream_chunk_event(
chunk="",
from_task=from_task,
from_agent=from_agent,
tool_call={
"id": block.id,
"function": {
"name": block.name,
"arguments": "",
},
"type": "function",
"index": block_index,
},
call_type=LLMCallType.TOOL_CALL,
response_id=response_id,
)
elif event.type == "content_block_delta":
if event.delta.type == "input_json_delta":
block_index = event.index
partial_json = event.delta.partial_json
if block_index in current_tool_calls and partial_json:
current_tool_calls[block_index]["arguments"] += partial_json
self._emit_stream_chunk_event(
chunk=partial_json,
from_task=from_task,
from_agent=from_agent,
tool_call={
"id": current_tool_calls[block_index]["id"],
"function": {
"name": current_tool_calls[block_index]["name"],
"arguments": current_tool_calls[block_index][
"arguments"
],
},
"type": "function",
"index": block_index,
},
call_type=LLMCallType.TOOL_CALL,
response_id=response_id,
)
final_message = await stream.get_final_message()
usage = self._extract_anthropic_token_usage(final_message)
self._track_token_usage_internal(usage)
if _is_pydantic_model_class(response_model):
if use_native_structured_output:
structured_data = response_model.model_validate_json(full_response)
self._emit_call_completed_event(
response=structured_data.model_dump_json(),
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
return structured_data
for block in final_message.content:
if (
isinstance(block, ToolUseBlock)
and block.name == "structured_output"
):
structured_data = response_model.model_validate(block.input)
self._emit_call_completed_event(
response=structured_data.model_dump_json(),
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
return structured_data
if final_message.content:
tool_uses = [
block
for block in final_message.content
if isinstance(block, (ToolUseBlock, BetaToolUseBlock))
]
if tool_uses:
if not available_functions:
return list(tool_uses)
result = self._execute_first_tool(
tool_uses, available_functions, from_task, from_agent
)
if result is not None:
return result
full_response = self._apply_stop_words(full_response)
self._emit_call_completed_event(
response=full_response,
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
return full_response
async def _ahandle_tool_use_conversation(
self,
initial_response: Message | BetaMessage,
tool_uses: list[ToolUseBlock | BetaToolUseBlock],
params: dict[str, Any],
available_functions: dict[str, Any],
from_task: Any | None = None,
from_agent: Any | None = None,
) -> str:
"""Handle the complete async tool use conversation flow.
This implements the proper Anthropic tool use pattern:
1. Claude requests tool use
2. We execute the tools
3. We send tool results back to Claude
4. Claude processes results and generates final response
"""
tool_results = self._execute_tools_and_collect_results(
tool_uses, available_functions, from_task, from_agent
)
follow_up_params = params.copy()
assistant_message = {"role": "assistant", "content": initial_response.content}
user_message = {"role": "user", "content": tool_results}
follow_up_params["messages"] = params["messages"] + [
assistant_message,
user_message,
]
try:
final_response: Message = await self.async_client.messages.create(
**follow_up_params
)
follow_up_usage = self._extract_anthropic_token_usage(final_response)
self._track_token_usage_internal(follow_up_usage)
final_content = ""
if final_response.content:
for content_block in final_response.content:
if hasattr(content_block, "text"):
final_content += content_block.text
final_content = self._apply_stop_words(final_content)
self._emit_call_completed_event(
response=final_content,
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=follow_up_params["messages"],
)
total_usage = {
"input_tokens": follow_up_usage.get("input_tokens", 0),
"output_tokens": follow_up_usage.get("output_tokens", 0),
"total_tokens": follow_up_usage.get("total_tokens", 0),
}
if total_usage.get("total_tokens", 0) > 0:
logging.info(f"Anthropic API tool conversation usage: {total_usage}")
return final_content
except Exception as e:
if is_context_length_exceeded(e):
logging.error(f"Context window exceeded in tool follow-up: {e}")
raise LLMContextLengthExceededError(str(e)) from e
logging.error(f"Tool follow-up conversation failed: {e}")
if tool_results:
return cast(str, tool_results[0]["content"])
raise e
def supports_function_calling(self) -> bool:
"""Check if the model supports function calling."""
return self.supports_tools
def supports_stop_words(self) -> bool:
"""Check if the model supports stop words."""
return True # All Claude models support stop sequences
def get_context_window_size(self) -> int:
"""Get the context window size for the model."""
from crewai.llm import CONTEXT_WINDOW_USAGE_RATIO
# Context window sizes for Anthropic models
context_windows = {
"claude-3-5-sonnet": 200000,
"claude-3-5-haiku": 200000,
"claude-3-opus": 200000,
"claude-3-sonnet": 200000,
"claude-3-haiku": 200000,
"claude-3-7-sonnet": 200000,
"claude-2.1": 200000,
"claude-2": 100000,
"claude-instant": 100000,
}
# Find the best match for the model name
for model_prefix, size in context_windows.items():
if self.model.startswith(model_prefix):
return int(size * CONTEXT_WINDOW_USAGE_RATIO)
# Default context window size for Claude models
return int(200000 * CONTEXT_WINDOW_USAGE_RATIO)
@staticmethod
def _extract_anthropic_token_usage(
response: Message | BetaMessage,
) -> dict[str, Any]:
"""Extract token usage from Anthropic response."""
if hasattr(response, "usage") and response.usage:
usage = response.usage
input_tokens = getattr(usage, "input_tokens", 0)
output_tokens = getattr(usage, "output_tokens", 0)
cache_read_tokens = getattr(usage, "cache_read_input_tokens", 0) or 0
return {
"input_tokens": input_tokens,
"output_tokens": output_tokens,
"total_tokens": input_tokens + output_tokens,
"cached_prompt_tokens": cache_read_tokens,
}
return {"total_tokens": 0}
def supports_multimodal(self) -> bool:
"""Check if the model supports multimodal inputs.
All Claude 3+ models support vision and PDFs.
Returns:
True if the model supports images and PDFs.
"""
return "claude-3" in self.model.lower() or "claude-4" in self.model.lower()
def get_file_uploader(self) -> Any:
"""Get an Anthropic file uploader using this LLM's clients.
Returns:
AnthropicFileUploader instance with pre-configured sync and async clients.
"""
try:
from crewai_files.uploaders.anthropic import AnthropicFileUploader
return AnthropicFileUploader(
client=self.client,
async_client=self.async_client,
)
except ImportError:
return None
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/llms/providers/anthropic/completion.py",
"license": "MIT License",
"lines": 1390,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai/src/crewai/llms/providers/azure/completion.py | from __future__ import annotations
import json
import logging
import os
from typing import TYPE_CHECKING, Any, TypedDict
from pydantic import BaseModel
from typing_extensions import Self
from crewai.utilities.agent_utils import is_context_length_exceeded
from crewai.utilities.exceptions.context_window_exceeding_exception import (
LLMContextLengthExceededError,
)
from crewai.utilities.pydantic_schema_utils import generate_model_description
from crewai.utilities.types import LLMMessage
if TYPE_CHECKING:
from crewai.llms.hooks.base import BaseInterceptor
try:
from azure.ai.inference import (
ChatCompletionsClient,
)
from azure.ai.inference.aio import (
ChatCompletionsClient as AsyncChatCompletionsClient,
)
from azure.ai.inference.models import (
ChatCompletions,
ChatCompletionsToolCall,
ChatCompletionsToolDefinition,
FunctionDefinition,
JsonSchemaFormat,
StreamingChatCompletionsUpdate,
)
from azure.core.credentials import (
AzureKeyCredential,
)
from azure.core.exceptions import (
HttpResponseError,
)
from crewai.events.types.llm_events import LLMCallType
from crewai.llms.base_llm import BaseLLM, llm_call_context
except ImportError:
raise ImportError(
'Azure AI Inference native provider not available, to install: uv add "crewai[azure-ai-inference]"'
) from None
class AzureCompletionParams(TypedDict, total=False):
"""Type definition for Azure chat completion parameters."""
messages: list[LLMMessage]
stream: bool
model_extras: dict[str, Any]
response_format: JsonSchemaFormat
model: str
temperature: float
top_p: float
frequency_penalty: float
presence_penalty: float
max_tokens: int
stop: list[str]
tools: list[ChatCompletionsToolDefinition]
tool_choice: str
class AzureCompletion(BaseLLM):
"""Azure AI Inference native completion implementation.
This class provides direct integration with the Azure AI Inference Python SDK,
offering native function calling, streaming support, and proper Azure authentication.
"""
def __init__(
self,
model: str,
api_key: str | None = None,
endpoint: str | None = None,
api_version: str | None = None,
timeout: float | None = None,
max_retries: int = 2,
temperature: float | None = None,
top_p: float | None = None,
frequency_penalty: float | None = None,
presence_penalty: float | None = None,
max_tokens: int | None = None,
stop: list[str] | None = None,
stream: bool = False,
interceptor: BaseInterceptor[Any, Any] | None = None,
response_format: type[BaseModel] | None = None,
**kwargs: Any,
):
"""Initialize Azure AI Inference chat completion client.
Args:
model: Azure deployment name or model name
api_key: Azure API key (defaults to AZURE_API_KEY env var)
endpoint: Azure endpoint URL (defaults to AZURE_ENDPOINT env var)
api_version: Azure API version (defaults to AZURE_API_VERSION env var)
timeout: Request timeout in seconds
max_retries: Maximum number of retries
temperature: Sampling temperature (0-2)
top_p: Nucleus sampling parameter
frequency_penalty: Frequency penalty (-2 to 2)
presence_penalty: Presence penalty (-2 to 2)
max_tokens: Maximum tokens in response
stop: Stop sequences
stream: Enable streaming responses
interceptor: HTTP interceptor (not yet supported for Azure).
response_format: Pydantic model for structured output. Used as default when
response_model is not passed to call()/acall() methods.
Only works with OpenAI models deployed on Azure.
**kwargs: Additional parameters
"""
if interceptor is not None:
raise NotImplementedError(
"HTTP interceptors are not yet supported for Azure AI Inference provider. "
"Interceptors are currently supported for OpenAI and Anthropic providers only."
)
super().__init__(
model=model, temperature=temperature, stop=stop or [], **kwargs
)
self.api_key = api_key or os.getenv("AZURE_API_KEY")
self.endpoint = (
endpoint
or os.getenv("AZURE_ENDPOINT")
or os.getenv("AZURE_OPENAI_ENDPOINT")
or os.getenv("AZURE_API_BASE")
)
self.api_version = api_version or os.getenv("AZURE_API_VERSION") or "2024-06-01"
self.timeout = timeout
self.max_retries = max_retries
if not self.api_key:
raise ValueError(
"Azure API key is required. Set AZURE_API_KEY environment variable or pass api_key parameter."
)
if not self.endpoint:
raise ValueError(
"Azure endpoint is required. Set AZURE_ENDPOINT environment variable or pass endpoint parameter."
)
# Validate and potentially fix Azure OpenAI endpoint URL
self.endpoint = self._validate_and_fix_endpoint(self.endpoint, model)
# Build client kwargs
client_kwargs = {
"endpoint": self.endpoint,
"credential": AzureKeyCredential(self.api_key),
}
# Add api_version if specified (primarily for Azure OpenAI endpoints)
if self.api_version:
client_kwargs["api_version"] = self.api_version
self.client = ChatCompletionsClient(**client_kwargs) # type: ignore[arg-type]
self.async_client = AsyncChatCompletionsClient(**client_kwargs) # type: ignore[arg-type]
self.top_p = top_p
self.frequency_penalty = frequency_penalty
self.presence_penalty = presence_penalty
self.max_tokens = max_tokens
self.stream = stream
self.response_format = response_format
self.is_openai_model = any(
prefix in model.lower() for prefix in ["gpt-", "o1-", "text-"]
)
self.is_azure_openai_endpoint = (
"openai.azure.com" in self.endpoint
and "/openai/deployments/" in self.endpoint
)
@staticmethod
def _validate_and_fix_endpoint(endpoint: str, model: str) -> str:
"""Validate and fix Azure endpoint URL format.
Azure OpenAI endpoints should be in the format:
https://<resource-name>.openai.azure.com/openai/deployments/<deployment-name>
Args:
endpoint: The endpoint URL
model: The model/deployment name
Returns:
Validated and potentially corrected endpoint URL
"""
if "openai.azure.com" in endpoint and "/openai/deployments/" not in endpoint:
endpoint = endpoint.rstrip("/")
if not endpoint.endswith("/openai/deployments"):
deployment_name = model.replace("azure/", "")
endpoint = f"{endpoint}/openai/deployments/{deployment_name}"
logging.info(f"Constructed Azure OpenAI endpoint URL: {endpoint}")
return endpoint
def _handle_api_error(
self,
error: Exception,
from_task: Any | None = None,
from_agent: Any | None = None,
) -> None:
"""Handle API errors with appropriate logging and events.
Args:
error: The exception that occurred
from_task: Task that initiated the call
from_agent: Agent that initiated the call
Raises:
The original exception after logging and emitting events
"""
if isinstance(error, HttpResponseError):
if error.status_code == 401:
error_msg = "Azure authentication failed. Check your API key."
elif error.status_code == 404:
error_msg = (
f"Azure endpoint not found. Check endpoint URL: {self.endpoint}"
)
elif error.status_code == 429:
error_msg = "Azure API rate limit exceeded. Please retry later."
else:
error_msg = (
f"Azure API HTTP error: {error.status_code} - {error.message}"
)
else:
error_msg = f"Azure API call failed: {error!s}"
logging.error(error_msg)
self._emit_call_failed_event(
error=error_msg, from_task=from_task, from_agent=from_agent
)
raise error
def _handle_completion_error(
self,
error: Exception,
from_task: Any | None = None,
from_agent: Any | None = None,
) -> None:
"""Handle completion-specific errors including context length checks.
Args:
error: The exception that occurred
from_task: Task that initiated the call
from_agent: Agent that initiated the call
Raises:
LLMContextLengthExceededError if context window exceeded, otherwise the original exception
"""
if is_context_length_exceeded(error):
logging.error(f"Context window exceeded: {error}")
raise LLMContextLengthExceededError(str(error)) from error
error_msg = f"Azure API call failed: {error!s}"
logging.error(error_msg)
self._emit_call_failed_event(
error=error_msg, from_task=from_task, from_agent=from_agent
)
raise error
def call(
self,
messages: str | list[LLMMessage],
tools: list[dict[str, Any]] | None = None,
callbacks: list[Any] | None = None,
available_functions: dict[str, Any] | None = None,
from_task: Any | None = None,
from_agent: Any | None = None,
response_model: type[BaseModel] | None = None,
) -> str | Any:
"""Call Azure AI Inference chat completions API.
Args:
messages: Input messages for the chat completion
tools: List of tool/function definitions
callbacks: Callback functions (not used in native implementation)
available_functions: Available functions for tool calling
from_task: Task that initiated the call
from_agent: Agent that initiated the call
response_model: Response model
Returns:
Chat completion response or tool call result
"""
with llm_call_context():
try:
# Emit call started event
self._emit_call_started_event(
messages=messages,
tools=tools,
callbacks=callbacks,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
)
effective_response_model = response_model or self.response_format
# Format messages for Azure
formatted_messages = self._format_messages_for_azure(messages)
if not self._invoke_before_llm_call_hooks(
formatted_messages, from_agent
):
raise ValueError("LLM call blocked by before_llm_call hook")
# Prepare completion parameters
completion_params = self._prepare_completion_params(
formatted_messages, tools, effective_response_model
)
# Handle streaming vs non-streaming
if self.stream:
return self._handle_streaming_completion(
completion_params,
available_functions,
from_task,
from_agent,
effective_response_model,
)
return self._handle_completion(
completion_params,
available_functions,
from_task,
from_agent,
effective_response_model,
)
except Exception as e:
return self._handle_api_error(e, from_task, from_agent) # type: ignore[func-returns-value]
async def acall( # type: ignore[return]
self,
messages: str | list[LLMMessage],
tools: list[dict[str, Any]] | None = None,
callbacks: list[Any] | None = None,
available_functions: dict[str, Any] | None = None,
from_task: Any | None = None,
from_agent: Any | None = None,
response_model: type[BaseModel] | None = None,
) -> str | Any:
"""Call Azure AI Inference chat completions API asynchronously.
Args:
messages: Input messages for the chat completion
tools: List of tool/function definitions
callbacks: Callback functions (not used in native implementation)
available_functions: Available functions for tool calling
from_task: Task that initiated the call
from_agent: Agent that initiated the call
response_model: Pydantic model for structured output
Returns:
Chat completion response or tool call result
"""
with llm_call_context():
try:
self._emit_call_started_event(
messages=messages,
tools=tools,
callbacks=callbacks,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
)
effective_response_model = response_model or self.response_format
formatted_messages = self._format_messages_for_azure(messages)
completion_params = self._prepare_completion_params(
formatted_messages, tools, effective_response_model
)
if self.stream:
return await self._ahandle_streaming_completion(
completion_params,
available_functions,
from_task,
from_agent,
effective_response_model,
)
return await self._ahandle_completion(
completion_params,
available_functions,
from_task,
from_agent,
effective_response_model,
)
except Exception as e:
self._handle_api_error(e, from_task, from_agent)
def _prepare_completion_params(
self,
messages: list[LLMMessage],
tools: list[dict[str, Any]] | None = None,
response_model: type[BaseModel] | None = None,
) -> AzureCompletionParams:
"""Prepare parameters for Azure AI Inference chat completion.
Args:
messages: Formatted messages for Azure
tools: Tool definitions
response_model: Pydantic model for structured output
Returns:
Parameters dictionary for Azure API
"""
params: AzureCompletionParams = {
"messages": messages,
"stream": self.stream,
}
model_extras: dict[str, Any] = {}
if self.stream:
model_extras["stream_options"] = {"include_usage": True}
if response_model and self.is_openai_model:
model_description = generate_model_description(response_model)
json_schema_info = model_description["json_schema"]
json_schema_name = json_schema_info["name"]
params["response_format"] = JsonSchemaFormat(
name=json_schema_name,
schema=json_schema_info["schema"],
description=f"Schema for {json_schema_name}",
strict=json_schema_info["strict"],
)
# Only include model parameter for non-Azure OpenAI endpoints
# Azure OpenAI endpoints have the deployment name in the URL
if not self.is_azure_openai_endpoint:
params["model"] = self.model
# Add optional parameters if set
if self.temperature is not None:
params["temperature"] = self.temperature
if self.top_p is not None:
params["top_p"] = self.top_p
if self.frequency_penalty is not None:
params["frequency_penalty"] = self.frequency_penalty
if self.presence_penalty is not None:
params["presence_penalty"] = self.presence_penalty
if self.max_tokens is not None:
params["max_tokens"] = self.max_tokens
if self.stop and self.supports_stop_words():
params["stop"] = self.stop
# Handle tools/functions for Azure OpenAI models
if tools and self.is_openai_model:
params["tools"] = self._convert_tools_for_interference(tools)
params["tool_choice"] = "auto"
prompt_cache_key = self.additional_params.get("prompt_cache_key")
if prompt_cache_key:
model_extras["prompt_cache_key"] = prompt_cache_key
if model_extras:
params["model_extras"] = model_extras
additional_params = self.additional_params
additional_drop_params = additional_params.get("additional_drop_params")
drop_params = additional_params.get("drop_params")
if drop_params and isinstance(additional_drop_params, list):
for drop_param in additional_drop_params:
if isinstance(drop_param, str):
params.pop(drop_param, None) # type: ignore[misc]
return params
def _convert_tools_for_interference( # type: ignore[override]
self, tools: list[dict[str, Any]]
) -> list[ChatCompletionsToolDefinition]:
"""Convert CrewAI tool format to Azure OpenAI function calling format.
Args:
tools: List of CrewAI tool definitions
Returns:
List of Azure ChatCompletionsToolDefinition objects
"""
from crewai.llms.providers.utils.common import safe_tool_conversion
azure_tools: list[ChatCompletionsToolDefinition] = []
for tool in tools:
name, description, parameters = safe_tool_conversion(tool, "Azure")
function_def = FunctionDefinition(
name=name,
description=description,
parameters=parameters
if isinstance(parameters, dict)
else dict(parameters)
if parameters
else None,
)
tool_def = ChatCompletionsToolDefinition(function=function_def)
azure_tools.append(tool_def)
return azure_tools
def _format_messages_for_azure(
self, messages: str | list[LLMMessage]
) -> list[LLMMessage]:
"""Format messages for Azure AI Inference API.
Args:
messages: Input messages
Returns:
List of dict objects with 'role' and 'content' keys
"""
# Use base class formatting first
base_formatted = super()._format_messages(messages)
azure_messages: list[LLMMessage] = []
for message in base_formatted:
role = message.get("role", "user") # Default to user if no role
# Handle None content - Azure requires string content
content = message.get("content") or ""
if role == "tool":
tool_call_id = message.get("tool_call_id", "")
if not tool_call_id:
raise ValueError("Tool message missing required tool_call_id")
azure_messages.append(
{
"role": "tool",
"tool_call_id": tool_call_id,
"content": content,
}
)
# Handle assistant messages with tool_calls
elif role == "assistant" and message.get("tool_calls"):
tool_calls = message.get("tool_calls", [])
azure_msg: LLMMessage = {
"role": "assistant",
"content": content, # Already defaulted to "" above
"tool_calls": tool_calls,
}
azure_messages.append(azure_msg)
else:
# Azure AI Inference requires both 'role' and 'content'
azure_messages.append({"role": role, "content": content})
return azure_messages
def _validate_and_emit_structured_output(
self,
content: str,
response_model: type[BaseModel],
params: AzureCompletionParams,
from_task: Any | None = None,
from_agent: Any | None = None,
) -> BaseModel:
"""Validate content against response model and emit completion event.
Args:
content: Response content to validate
response_model: Pydantic model for validation
params: Completion parameters containing messages
from_task: Task that initiated the call
from_agent: Agent that initiated the call
Returns:
Validated Pydantic model instance
Raises:
ValueError: If validation fails
"""
try:
structured_data = response_model.model_validate_json(content)
self._emit_call_completed_event(
response=structured_data.model_dump_json(),
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
return structured_data
except Exception as e:
error_msg = f"Failed to validate structured output with model {response_model.__name__}: {e}"
logging.error(error_msg)
raise ValueError(error_msg) from e
def _process_completion_response(
self,
response: ChatCompletions,
params: AzureCompletionParams,
available_functions: dict[str, Any] | None = None,
from_task: Any | None = None,
from_agent: Any | None = None,
response_model: type[BaseModel] | None = None,
) -> str | Any:
"""Process completion response with usage tracking, tool execution, and events.
Args:
response: Chat completion response from Azure API
params: Completion parameters containing messages
available_functions: Available functions for tool calling
from_task: Task that initiated the call
from_agent: Agent that initiated the call
response_model: Pydantic model for structured output
Returns:
Response content or structured output
"""
if not response.choices:
raise ValueError("No choices returned from Azure API")
choice = response.choices[0]
message = choice.message
# Extract and track token usage
usage = self._extract_azure_token_usage(response)
self._track_token_usage_internal(usage)
# If there are tool_calls but no available_functions, return the tool_calls
# This allows the caller (e.g., executor) to handle tool execution
if message.tool_calls and not available_functions:
self._emit_call_completed_event(
response=list(message.tool_calls),
call_type=LLMCallType.TOOL_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
return list(message.tool_calls)
# Handle tool calls
if message.tool_calls and available_functions:
tool_call = message.tool_calls[0] # Handle first tool call
if isinstance(tool_call, ChatCompletionsToolCall):
function_name = tool_call.function.name
try:
function_args = json.loads(tool_call.function.arguments)
except json.JSONDecodeError as e:
logging.error(f"Failed to parse tool arguments: {e}")
function_args = {}
# Execute tool
result = self._handle_tool_execution(
function_name=function_name,
function_args=function_args,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
)
if result is not None:
return result
# Extract content
content = message.content or ""
if response_model and self.is_openai_model:
return self._validate_and_emit_structured_output(
content=content,
response_model=response_model,
params=params,
from_task=from_task,
from_agent=from_agent,
)
content = self._apply_stop_words(content)
# Emit completion event and return content
self._emit_call_completed_event(
response=content,
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
return self._invoke_after_llm_call_hooks(
params["messages"], content, from_agent
)
def _handle_completion(
self,
params: AzureCompletionParams,
available_functions: dict[str, Any] | None = None,
from_task: Any | None = None,
from_agent: Any | None = None,
response_model: type[BaseModel] | None = None,
) -> str | Any:
"""Handle non-streaming chat completion."""
try:
# Cast params to Any to avoid type checking issues with TypedDict unpacking
response: ChatCompletions = self.client.complete(**params) # type: ignore[assignment,arg-type]
return self._process_completion_response(
response=response,
params=params,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
response_model=response_model,
)
except Exception as e:
return self._handle_completion_error(e, from_task, from_agent) # type: ignore[func-returns-value]
def _process_streaming_update(
self,
update: StreamingChatCompletionsUpdate,
full_response: str,
tool_calls: dict[int, dict[str, Any]],
from_task: Any | None = None,
from_agent: Any | None = None,
) -> str:
"""Process a single streaming update chunk.
Args:
update: Streaming update from Azure API
full_response: Accumulated response content
tool_calls: Dictionary of accumulated tool calls
from_task: Task that initiated the call
from_agent: Agent that initiated the call
Returns:
Updated full_response string
"""
if update.choices:
choice = update.choices[0]
response_id = update.id if hasattr(update, "id") else None
if choice.delta and choice.delta.content:
content_delta = choice.delta.content
full_response += content_delta
self._emit_stream_chunk_event(
chunk=content_delta,
from_task=from_task,
from_agent=from_agent,
response_id=response_id,
)
if choice.delta and choice.delta.tool_calls:
for idx, tool_call in enumerate(choice.delta.tool_calls):
if idx not in tool_calls:
tool_calls[idx] = {
"id": tool_call.id,
"name": "",
"arguments": "",
}
elif tool_call.id and not tool_calls[idx]["id"]:
tool_calls[idx]["id"] = tool_call.id
if tool_call.function and tool_call.function.name:
tool_calls[idx]["name"] = tool_call.function.name
if tool_call.function and tool_call.function.arguments:
tool_calls[idx]["arguments"] += tool_call.function.arguments
self._emit_stream_chunk_event(
chunk=tool_call.function.arguments
if tool_call.function and tool_call.function.arguments
else "",
from_task=from_task,
from_agent=from_agent,
tool_call={
"id": tool_calls[idx]["id"],
"function": {
"name": tool_calls[idx]["name"],
"arguments": tool_calls[idx]["arguments"],
},
"type": "function",
"index": idx,
},
call_type=LLMCallType.TOOL_CALL,
response_id=response_id,
)
return full_response
def _finalize_streaming_response(
self,
full_response: str,
tool_calls: dict[int, dict[str, Any]],
usage_data: dict[str, int],
params: AzureCompletionParams,
available_functions: dict[str, Any] | None = None,
from_task: Any | None = None,
from_agent: Any | None = None,
response_model: type[BaseModel] | None = None,
) -> str | Any:
"""Finalize streaming response with usage tracking, tool execution, and events.
Args:
full_response: The complete streamed response content
tool_calls: Dictionary of tool calls accumulated during streaming
usage_data: Token usage data from the stream
params: Completion parameters containing messages
available_functions: Available functions for tool calling
from_task: Task that initiated the call
from_agent: Agent that initiated the call
response_model: Pydantic model for structured output validation
Returns:
Final response content after processing, or structured output
"""
self._track_token_usage_internal(usage_data)
# Handle structured output validation
if response_model and self.is_openai_model:
return self._validate_and_emit_structured_output(
content=full_response,
response_model=response_model,
params=params,
from_task=from_task,
from_agent=from_agent,
)
# If there are tool_calls but no available_functions, return them
# in OpenAI-compatible format for executor to handle
if tool_calls and not available_functions:
formatted_tool_calls = [
{
"id": call_data.get("id", f"call_{idx}"),
"type": "function",
"function": {
"name": call_data["name"],
"arguments": call_data["arguments"],
},
}
for idx, call_data in tool_calls.items()
]
self._emit_call_completed_event(
response=formatted_tool_calls,
call_type=LLMCallType.TOOL_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
return formatted_tool_calls
# Handle completed tool calls
if tool_calls and available_functions:
for call_data in tool_calls.values():
function_name = call_data["name"]
try:
function_args = json.loads(call_data["arguments"])
except json.JSONDecodeError as e:
logging.error(f"Failed to parse streamed tool arguments: {e}")
continue
# Execute tool
result = self._handle_tool_execution(
function_name=function_name,
function_args=function_args,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
)
if result is not None:
return result
# Apply stop words to full response
full_response = self._apply_stop_words(full_response)
# Emit completion event and return full response
self._emit_call_completed_event(
response=full_response,
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
return self._invoke_after_llm_call_hooks(
params["messages"], full_response, from_agent
)
def _handle_streaming_completion(
self,
params: AzureCompletionParams,
available_functions: dict[str, Any] | None = None,
from_task: Any | None = None,
from_agent: Any | None = None,
response_model: type[BaseModel] | None = None,
) -> str | Any:
"""Handle streaming chat completion."""
full_response = ""
tool_calls: dict[int, dict[str, Any]] = {}
usage_data = {"total_tokens": 0}
for update in self.client.complete(**params): # type: ignore[arg-type]
if isinstance(update, StreamingChatCompletionsUpdate):
if update.usage:
usage = update.usage
usage_data = {
"prompt_tokens": usage.prompt_tokens,
"completion_tokens": usage.completion_tokens,
"total_tokens": usage.total_tokens,
}
continue
full_response = self._process_streaming_update(
update=update,
full_response=full_response,
tool_calls=tool_calls,
from_task=from_task,
from_agent=from_agent,
)
return self._finalize_streaming_response(
full_response=full_response,
tool_calls=tool_calls,
usage_data=usage_data,
params=params,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
response_model=response_model,
)
async def _ahandle_completion(
self,
params: AzureCompletionParams,
available_functions: dict[str, Any] | None = None,
from_task: Any | None = None,
from_agent: Any | None = None,
response_model: type[BaseModel] | None = None,
) -> str | Any:
"""Handle non-streaming chat completion asynchronously."""
try:
# Cast params to Any to avoid type checking issues with TypedDict unpacking
response: ChatCompletions = await self.async_client.complete(**params) # type: ignore[assignment,arg-type]
return self._process_completion_response(
response=response,
params=params,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
response_model=response_model,
)
except Exception as e:
return self._handle_completion_error(e, from_task, from_agent) # type: ignore[func-returns-value]
async def _ahandle_streaming_completion(
self,
params: AzureCompletionParams,
available_functions: dict[str, Any] | None = None,
from_task: Any | None = None,
from_agent: Any | None = None,
response_model: type[BaseModel] | None = None,
) -> str | Any:
"""Handle streaming chat completion asynchronously."""
full_response = ""
tool_calls: dict[int, dict[str, Any]] = {}
usage_data = {"total_tokens": 0}
stream = await self.async_client.complete(**params) # type: ignore[arg-type]
async for update in stream: # type: ignore[union-attr]
if isinstance(update, StreamingChatCompletionsUpdate):
if hasattr(update, "usage") and update.usage:
usage = update.usage
usage_data = {
"prompt_tokens": getattr(usage, "prompt_tokens", 0),
"completion_tokens": getattr(usage, "completion_tokens", 0),
"total_tokens": getattr(usage, "total_tokens", 0),
}
continue
full_response = self._process_streaming_update(
update=update,
full_response=full_response,
tool_calls=tool_calls,
from_task=from_task,
from_agent=from_agent,
)
return self._finalize_streaming_response(
full_response=full_response,
tool_calls=tool_calls,
usage_data=usage_data,
params=params,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
response_model=response_model,
)
def supports_function_calling(self) -> bool:
"""Check if the model supports function calling."""
# Azure OpenAI models support function calling
return self.is_openai_model
def supports_stop_words(self) -> bool:
"""Check if the model supports stop words.
Models using the Responses API (GPT-5 family, o-series reasoning models,
computer-use-preview) do not support stop sequences.
See: https://learn.microsoft.com/en-us/azure/ai-foundry/foundry-models/concepts/models-sold-directly-by-azure
"""
model_lower = self.model.lower() if self.model else ""
if "gpt-5" in model_lower:
return False
o_series_models = ["o1", "o3", "o4", "o1-mini", "o3-mini", "o4-mini"]
responses_api_models = ["computer-use-preview"]
unsupported_stop_models = o_series_models + responses_api_models
for unsupported in unsupported_stop_models:
if unsupported in model_lower:
return False
return True
def get_context_window_size(self) -> int:
"""Get the context window size for the model."""
from crewai.llm import CONTEXT_WINDOW_USAGE_RATIO, LLM_CONTEXT_WINDOW_SIZES
min_context = 1024
max_context = 2097152
for key, value in LLM_CONTEXT_WINDOW_SIZES.items():
if value < min_context or value > max_context:
raise ValueError(
f"Context window for {key} must be between {min_context} and {max_context}"
)
# Context window sizes for common Azure models
context_windows = {
"gpt-4": 8192,
"gpt-4o": 128000,
"gpt-4o-mini": 200000,
"gpt-4-turbo": 128000,
"gpt-35-turbo": 16385,
"gpt-3.5-turbo": 16385,
"text-embedding": 8191,
}
# Find the best match for the model name
for model_prefix, size in sorted(
context_windows.items(), key=lambda x: len(x[0]), reverse=True
):
if self.model.startswith(model_prefix):
return int(size * CONTEXT_WINDOW_USAGE_RATIO)
# Default context window size
return int(8192 * CONTEXT_WINDOW_USAGE_RATIO)
@staticmethod
def _extract_azure_token_usage(response: ChatCompletions) -> dict[str, Any]:
"""Extract token usage from Azure response."""
if hasattr(response, "usage") and response.usage:
usage = response.usage
cached_tokens = 0
prompt_details = getattr(usage, "prompt_tokens_details", None)
if prompt_details:
cached_tokens = getattr(prompt_details, "cached_tokens", 0) or 0
return {
"prompt_tokens": getattr(usage, "prompt_tokens", 0),
"completion_tokens": getattr(usage, "completion_tokens", 0),
"total_tokens": getattr(usage, "total_tokens", 0),
"cached_prompt_tokens": cached_tokens,
}
return {"total_tokens": 0}
async def aclose(self) -> None:
"""Close the async client and clean up resources.
This ensures proper cleanup of the underlying aiohttp session
to avoid unclosed connector warnings.
"""
if hasattr(self.async_client, "close"):
await self.async_client.close()
async def __aenter__(self) -> Self:
"""Async context manager entry."""
return self
async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
"""Async context manager exit."""
await self.aclose()
def supports_multimodal(self) -> bool:
"""Check if the model supports multimodal inputs.
Azure OpenAI vision-enabled models include GPT-4o and GPT-4 Turbo with Vision.
Returns:
True if the model supports images.
"""
vision_models = ("gpt-4o", "gpt-4-turbo", "gpt-4-vision", "gpt-4v")
return any(self.model.lower().startswith(m) for m in vision_models)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/llms/providers/azure/completion.py",
"license": "MIT License",
"lines": 947,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai/src/crewai/llms/providers/bedrock/completion.py | from __future__ import annotations
from collections.abc import Mapping, Sequence
from contextlib import AsyncExitStack
import json
import logging
import os
from typing import TYPE_CHECKING, Any, TypedDict, cast
from pydantic import BaseModel
from typing_extensions import Required
from crewai.events.types.llm_events import LLMCallType
from crewai.llms.base_llm import BaseLLM, llm_call_context
from crewai.utilities.agent_utils import is_context_length_exceeded
from crewai.utilities.exceptions.context_window_exceeding_exception import (
LLMContextLengthExceededError,
)
from crewai.utilities.pydantic_schema_utils import generate_model_description
from crewai.utilities.types import LLMMessage
if TYPE_CHECKING:
from mypy_boto3_bedrock_runtime.type_defs import (
GuardrailConfigurationTypeDef,
GuardrailStreamConfigurationTypeDef,
InferenceConfigurationTypeDef,
MessageOutputTypeDef,
MessageTypeDef,
SystemContentBlockTypeDef,
TokenUsageTypeDef,
ToolConfigurationTypeDef,
ToolTypeDef,
)
from crewai.llms.hooks.base import BaseInterceptor
try:
from boto3.session import Session
from botocore.config import Config
from botocore.exceptions import BotoCoreError, ClientError
except ImportError:
raise ImportError(
'AWS Bedrock native provider not available, to install: uv add "crewai[bedrock]"'
) from None
STRUCTURED_OUTPUT_TOOL_NAME = "structured_output"
def _preprocess_structured_data(
data: dict[str, Any], response_model: type[BaseModel]
) -> dict[str, Any]:
"""Preprocess structured data to handle common LLM output format issues.
Some models (especially Claude on Bedrock) may return array fields as
markdown-formatted strings instead of proper JSON arrays. This function
attempts to convert such strings to arrays before validation.
Args:
data: The raw structured data from the tool response
response_model: The Pydantic model class to validate against
Returns:
Preprocessed data with string-to-array conversions where needed
"""
import re
from typing import get_origin
# Get model field annotations
model_fields = response_model.model_fields
processed_data = dict(data)
for field_name, field_info in model_fields.items():
if field_name not in processed_data:
continue
value = processed_data[field_name]
# Check if the field expects a list type
annotation = field_info.annotation
origin = get_origin(annotation)
# Handle list[X] or List[X] types
is_list_type = origin is list or (
origin is not None and str(origin).startswith("list")
)
if is_list_type and isinstance(value, str):
# Try to parse markdown-style bullet points or numbered lists
lines = value.strip().split("\n")
parsed_items = []
for line in lines:
line = line.strip()
if not line:
continue
# Remove common bullet point prefixes
# Matches: "- item", "* item", "• item", "1. item", "1) item"
cleaned = re.sub(r"^[-*•]\s*", "", line)
cleaned = re.sub(r"^\d+[.)]\s*", "", cleaned)
cleaned = cleaned.strip()
if cleaned:
parsed_items.append(cleaned)
if parsed_items:
processed_data[field_name] = parsed_items
logging.debug(
f"Converted markdown-formatted string to list for field '{field_name}': "
f"{len(parsed_items)} items"
)
return processed_data
try:
from aiobotocore.session import ( # type: ignore[import-untyped]
get_session as get_aiobotocore_session,
)
AIOBOTOCORE_AVAILABLE = True
except ImportError:
AIOBOTOCORE_AVAILABLE = False
get_aiobotocore_session = None
if TYPE_CHECKING:
class EnhancedInferenceConfigurationTypeDef(
InferenceConfigurationTypeDef, total=False
):
"""Extended InferenceConfigurationTypeDef with topK support.
AWS Bedrock supports topK for Claude models, but it's not in the boto3 type stubs.
This extends the base type to include topK while maintaining all other fields.
"""
topK: int # noqa: N815 - AWS API uses topK naming
else:
class EnhancedInferenceConfigurationTypeDef(TypedDict, total=False):
"""Extended InferenceConfigurationTypeDef with topK support.
AWS Bedrock supports topK for Claude models, but it's not in the boto3 type stubs.
This extends the base type to include topK while maintaining all other fields.
"""
maxTokens: int
temperature: float
topP: float
stopSequences: list[str]
topK: int
class ToolInputSchema(TypedDict):
"""Type definition for tool input schema in Converse API."""
json: dict[str, Any]
class ToolSpec(TypedDict, total=False):
"""Type definition for tool specification in Converse API."""
name: Required[str]
description: Required[str]
inputSchema: ToolInputSchema
class ConverseToolTypeDef(TypedDict):
"""Type definition for a Converse API tool."""
toolSpec: ToolSpec
class BedrockConverseRequestBody(TypedDict, total=False):
"""Type definition for AWS Bedrock Converse API request body.
Based on AWS Bedrock Converse API specification.
"""
inferenceConfig: Required[EnhancedInferenceConfigurationTypeDef]
system: list[SystemContentBlockTypeDef]
toolConfig: ToolConfigurationTypeDef
guardrailConfig: GuardrailConfigurationTypeDef
additionalModelRequestFields: dict[str, Any]
additionalModelResponseFieldPaths: list[str]
class BedrockConverseStreamRequestBody(TypedDict, total=False):
"""Type definition for AWS Bedrock Converse Stream API request body.
Based on AWS Bedrock Converse Stream API specification.
"""
inferenceConfig: Required[EnhancedInferenceConfigurationTypeDef]
system: list[SystemContentBlockTypeDef]
toolConfig: ToolConfigurationTypeDef
guardrailConfig: GuardrailStreamConfigurationTypeDef
additionalModelRequestFields: dict[str, Any]
additionalModelResponseFieldPaths: list[str]
class BedrockCompletion(BaseLLM):
"""AWS Bedrock native completion implementation using the Converse API.
This class provides direct integration with AWS Bedrock using the modern
Converse API, which provides a unified interface across all Bedrock models.
Features:
- Full tool calling support with proper conversation continuation
- Streaming and non-streaming responses with comprehensive event handling
- Guardrail configuration for content filtering
- Model-specific parameters via additionalModelRequestFields
- Custom response field extraction
- Proper error handling for all AWS exception types
- Token usage tracking and stop reason logging
- Support for both text and tool use content blocks
The implementation follows AWS Bedrock Converse API best practices including:
- Proper tool use ID tracking for multi-turn tool conversations
- Complete streaming event handling (messageStart, contentBlockStart, etc.)
- Response metadata and trace information capture
- Model-specific conversation format handling (e.g., Cohere requirements)
"""
def __init__(
self,
model: str = "anthropic.claude-3-5-sonnet-20241022-v2:0",
aws_access_key_id: str | None = None,
aws_secret_access_key: str | None = None,
aws_session_token: str | None = None,
region_name: str | None = None,
temperature: float | None = None,
max_tokens: int | None = None,
top_p: float | None = None,
top_k: int | None = None,
stop_sequences: Sequence[str] | None = None,
stream: bool = False,
guardrail_config: dict[str, Any] | None = None,
additional_model_request_fields: dict[str, Any] | None = None,
additional_model_response_field_paths: list[str] | None = None,
interceptor: BaseInterceptor[Any, Any] | None = None,
response_format: type[BaseModel] | None = None,
**kwargs: Any,
) -> None:
"""Initialize AWS Bedrock completion client.
Args:
model: The Bedrock model ID to use
aws_access_key_id: AWS access key (defaults to environment variable)
aws_secret_access_key: AWS secret key (defaults to environment variable)
aws_session_token: AWS session token for temporary credentials
region_name: AWS region name
temperature: Sampling temperature for response generation
max_tokens: Maximum tokens to generate
top_p: Nucleus sampling parameter
top_k: Top-k sampling parameter (Claude models only)
stop_sequences: List of sequences that stop generation
stream: Whether to use streaming responses
guardrail_config: Guardrail configuration for content filtering
additional_model_request_fields: Model-specific request parameters
additional_model_response_field_paths: Custom response field paths
interceptor: HTTP interceptor (not yet supported for Bedrock).
response_format: Pydantic model for structured output. Used as default when
response_model is not passed to call()/acall() methods.
**kwargs: Additional parameters
"""
if interceptor is not None:
raise NotImplementedError(
"HTTP interceptors are not yet supported for AWS Bedrock provider. "
"Interceptors are currently supported for OpenAI and Anthropic providers only."
)
# Extract provider from kwargs to avoid duplicate argument
kwargs.pop("provider", None)
super().__init__(
model=model,
temperature=temperature,
stop=stop_sequences or [],
provider="bedrock",
**kwargs,
)
# Configure client with timeouts and retries following AWS best practices
config = Config(
read_timeout=300,
retries={
"max_attempts": 3,
"mode": "adaptive",
},
tcp_keepalive=True,
)
self.region_name = (
region_name
or os.getenv("AWS_DEFAULT_REGION")
or os.getenv("AWS_REGION_NAME")
or "us-east-1"
)
self.aws_access_key_id = aws_access_key_id or os.getenv("AWS_ACCESS_KEY_ID")
self.aws_secret_access_key = aws_secret_access_key or os.getenv(
"AWS_SECRET_ACCESS_KEY"
)
self.aws_session_token = aws_session_token or os.getenv("AWS_SESSION_TOKEN")
# Initialize Bedrock client with proper configuration
session = Session(
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key,
aws_session_token=self.aws_session_token,
region_name=self.region_name,
)
self.client = session.client("bedrock-runtime", config=config)
self._async_exit_stack = AsyncExitStack() if AIOBOTOCORE_AVAILABLE else None
self._async_client_initialized = False
# Store completion parameters
self.max_tokens = max_tokens
self.top_p = top_p
self.top_k = top_k
self.stream = stream
self.stop_sequences = stop_sequences
self.response_format = response_format
# Store advanced features (optional)
self.guardrail_config = guardrail_config
self.additional_model_request_fields = additional_model_request_fields
self.additional_model_response_field_paths = (
additional_model_response_field_paths
)
# Model-specific settings
self.is_claude_model = "claude" in model.lower()
self.supports_tools = True # Converse API supports tools for most models
self.supports_streaming = True
# Handle inference profiles for newer models
self.model_id = model
@property
def stop(self) -> list[str]:
"""Get stop sequences sent to the API."""
return [] if self.stop_sequences is None else list(self.stop_sequences)
@stop.setter
def stop(self, value: Sequence[str] | str | None) -> None:
"""Set stop sequences.
Synchronizes stop_sequences to ensure values set by CrewAgentExecutor
are properly sent to the Bedrock API.
Args:
value: Stop sequences as a Sequence, single string, or None
"""
if value is None:
self.stop_sequences = []
elif isinstance(value, str):
self.stop_sequences = [value]
elif isinstance(value, Sequence):
self.stop_sequences = list(value)
else:
self.stop_sequences = []
def call(
self,
messages: str | list[LLMMessage],
tools: list[dict[Any, Any]] | None = None,
callbacks: list[Any] | None = None,
available_functions: dict[str, Any] | None = None,
from_task: Any | None = None,
from_agent: Any | None = None,
response_model: type[BaseModel] | None = None,
) -> str | Any:
"""Call AWS Bedrock Converse API."""
effective_response_model = response_model or self.response_format
with llm_call_context():
try:
# Emit call started event
self._emit_call_started_event(
messages=messages,
tools=tools,
callbacks=callbacks,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
)
# Format messages for Converse API
formatted_messages, system_message = self._format_messages_for_converse(
messages
)
if not self._invoke_before_llm_call_hooks(
formatted_messages, from_agent
):
raise ValueError("LLM call blocked by before_llm_call hook")
# Prepare request body
body: BedrockConverseRequestBody = {
"inferenceConfig": self._get_inference_config(),
}
# Add system message if present
if system_message:
body["system"] = cast(
"list[SystemContentBlockTypeDef]",
cast(object, [{"text": system_message}]),
)
# Add tool config if present or if messages contain tool content
# Bedrock requires toolConfig when messages have toolUse/toolResult
if tools:
tool_config: ToolConfigurationTypeDef = {
"tools": cast(
"Sequence[ToolTypeDef]",
cast(object, self._format_tools_for_converse(tools)),
)
}
body["toolConfig"] = tool_config
elif self._messages_contain_tool_content(formatted_messages):
# Create minimal toolConfig from tool history in messages
tools_from_history = self._extract_tools_from_message_history(
formatted_messages
)
if tools_from_history:
body["toolConfig"] = cast(
"ToolConfigurationTypeDef",
cast(object, {"tools": tools_from_history}),
)
# Add optional advanced features if configured
if self.guardrail_config:
guardrail_config: GuardrailConfigurationTypeDef = cast(
"GuardrailConfigurationTypeDef",
cast(object, self.guardrail_config),
)
body["guardrailConfig"] = guardrail_config
if self.additional_model_request_fields:
body["additionalModelRequestFields"] = (
self.additional_model_request_fields
)
if self.additional_model_response_field_paths:
body["additionalModelResponseFieldPaths"] = (
self.additional_model_response_field_paths
)
if self.stream:
return self._handle_streaming_converse(
formatted_messages,
body,
available_functions,
from_task,
from_agent,
effective_response_model,
)
return self._handle_converse(
formatted_messages,
body,
available_functions,
from_task,
from_agent,
effective_response_model,
)
except Exception as e:
if is_context_length_exceeded(e):
logging.error(f"Context window exceeded: {e}")
raise LLMContextLengthExceededError(str(e)) from e
error_msg = f"AWS Bedrock API call failed: {e!s}"
logging.error(error_msg)
self._emit_call_failed_event(
error=error_msg, from_task=from_task, from_agent=from_agent
)
raise
async def acall(
self,
messages: str | list[LLMMessage],
tools: list[dict[Any, Any]] | None = None,
callbacks: list[Any] | None = None,
available_functions: dict[str, Any] | None = None,
from_task: Any | None = None,
from_agent: Any | None = None,
response_model: type[BaseModel] | None = None,
) -> str | Any:
"""Async call to AWS Bedrock Converse API.
Args:
messages: Input messages as string or list of message dicts.
tools: Optional list of tool definitions.
callbacks: Optional list of callback handlers.
available_functions: Optional dict mapping function names to callables.
from_task: Optional task context for events.
from_agent: Optional agent context for events.
response_model: Optional Pydantic model for structured output.
Returns:
Generated text response or structured output.
Raises:
NotImplementedError: If aiobotocore is not installed.
LLMContextLengthExceededError: If context window is exceeded.
"""
effective_response_model = response_model or self.response_format
if not AIOBOTOCORE_AVAILABLE:
raise NotImplementedError(
"Async support for AWS Bedrock requires aiobotocore. "
'Install with: uv add "crewai[bedrock-async]"'
)
with llm_call_context():
try:
self._emit_call_started_event(
messages=messages,
tools=tools,
callbacks=callbacks,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
)
formatted_messages, system_message = self._format_messages_for_converse(
messages
)
body: BedrockConverseRequestBody = {
"inferenceConfig": self._get_inference_config(),
}
if system_message:
body["system"] = cast(
"list[SystemContentBlockTypeDef]",
cast(object, [{"text": system_message}]),
)
# Add tool config if present or if messages contain tool content
# Bedrock requires toolConfig when messages have toolUse/toolResult
if tools:
tool_config: ToolConfigurationTypeDef = {
"tools": cast(
"Sequence[ToolTypeDef]",
cast(object, self._format_tools_for_converse(tools)),
)
}
body["toolConfig"] = tool_config
elif self._messages_contain_tool_content(formatted_messages):
# Create minimal toolConfig from tool history in messages
tools_from_history = self._extract_tools_from_message_history(
formatted_messages
)
if tools_from_history:
body["toolConfig"] = cast(
"ToolConfigurationTypeDef",
cast(object, {"tools": tools_from_history}),
)
if self.guardrail_config:
guardrail_config: GuardrailConfigurationTypeDef = cast(
"GuardrailConfigurationTypeDef",
cast(object, self.guardrail_config),
)
body["guardrailConfig"] = guardrail_config
if self.additional_model_request_fields:
body["additionalModelRequestFields"] = (
self.additional_model_request_fields
)
if self.additional_model_response_field_paths:
body["additionalModelResponseFieldPaths"] = (
self.additional_model_response_field_paths
)
if self.stream:
return await self._ahandle_streaming_converse(
formatted_messages,
body,
available_functions,
from_task,
from_agent,
effective_response_model,
)
return await self._ahandle_converse(
formatted_messages,
body,
available_functions,
from_task,
from_agent,
effective_response_model,
)
except Exception as e:
if is_context_length_exceeded(e):
logging.error(f"Context window exceeded: {e}")
raise LLMContextLengthExceededError(str(e)) from e
error_msg = f"AWS Bedrock API call failed: {e!s}"
logging.error(error_msg)
self._emit_call_failed_event(
error=error_msg, from_task=from_task, from_agent=from_agent
)
raise
def _handle_converse(
self,
messages: list[LLMMessage],
body: BedrockConverseRequestBody,
available_functions: Mapping[str, Any] | None = None,
from_task: Any | None = None,
from_agent: Any | None = None,
response_model: type[BaseModel] | None = None,
) -> str | Any:
"""Handle non-streaming converse API call following AWS best practices."""
if response_model:
# Check if structured_output tool already exists (from a previous recursive call)
existing_tool_config = body.get("toolConfig")
existing_tools: list[Any] = []
structured_output_already_exists = False
if existing_tool_config:
existing_tools = list(existing_tool_config.get("tools", []))
for tool in existing_tools:
tool_spec = tool.get("toolSpec", {})
if tool_spec.get("name") == STRUCTURED_OUTPUT_TOOL_NAME:
structured_output_already_exists = True
break
if not structured_output_already_exists:
structured_tool: ConverseToolTypeDef = {
"toolSpec": {
"name": STRUCTURED_OUTPUT_TOOL_NAME,
"description": (
"Use this tool to provide your final structured response. "
"Call this tool when you have gathered all necessary information "
"and are ready to provide the final answer in the required format."
),
"inputSchema": {
"json": generate_model_description(response_model)
.get("json_schema", {})
.get("schema", {})
},
}
}
if existing_tools:
existing_tools.append(structured_tool)
body["toolConfig"] = cast(
"ToolConfigurationTypeDef",
cast(object, {"tools": existing_tools}),
)
else:
# No existing tools, use only structured_output with forced toolChoice
body["toolConfig"] = cast(
"ToolConfigurationTypeDef",
cast(
object,
{
"tools": [structured_tool],
"toolChoice": {
"tool": {"name": STRUCTURED_OUTPUT_TOOL_NAME}
},
},
),
)
try:
if not messages:
raise ValueError("Messages cannot be empty")
# Ensure we have valid message structure
for i, msg in enumerate(messages):
if (
not isinstance(msg, dict)
or "role" not in msg
or "content" not in msg
):
raise ValueError(f"Invalid message format at index {i}")
# Call Bedrock Converse API with proper error handling
response = self.client.converse(
modelId=self.model_id,
messages=cast(
"Sequence[MessageTypeDef | MessageOutputTypeDef]",
cast(object, messages),
),
**body,
)
# Track token usage according to AWS response format
if "usage" in response:
self._track_token_usage_internal(response["usage"])
stop_reason = response.get("stopReason")
if stop_reason:
logging.debug(f"Response stop reason: {stop_reason}")
if stop_reason == "max_tokens":
logging.warning("Response truncated due to max_tokens limit")
elif stop_reason == "content_filtered":
logging.warning("Response was filtered due to content policy")
# Extract content following AWS response structure
output = response.get("output", {})
message = output.get("message", {})
content = message.get("content", [])
if not content:
logging.warning("No content in Bedrock response")
return (
"I apologize, but I received an empty response. Please try again."
)
# If there are tool uses but no available_functions, return them for the executor to handle
tool_uses = [block["toolUse"] for block in content if "toolUse" in block]
# Check for structured_output tool call first
if response_model and tool_uses:
for tool_use in tool_uses:
if tool_use.get("name") == STRUCTURED_OUTPUT_TOOL_NAME:
structured_data = tool_use.get("input", {})
structured_data = _preprocess_structured_data(
structured_data, response_model
)
try:
result = response_model.model_validate(structured_data)
self._emit_call_completed_event(
response=result.model_dump_json(),
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=messages,
)
return result
except Exception as e:
error_msg = (
f"Failed to validate {STRUCTURED_OUTPUT_TOOL_NAME} tool response "
f"with model {response_model.__name__}: {e}"
)
logging.error(error_msg)
raise ValueError(error_msg) from e
# Filter out structured_output from tool_uses returned to executor
non_structured_output_tool_uses = [
tu for tu in tool_uses if tu.get("name") != STRUCTURED_OUTPUT_TOOL_NAME
]
if non_structured_output_tool_uses and not available_functions:
self._emit_call_completed_event(
response=non_structured_output_tool_uses,
call_type=LLMCallType.TOOL_CALL,
from_task=from_task,
from_agent=from_agent,
messages=messages,
)
return non_structured_output_tool_uses
# Process content blocks and handle tool use correctly
text_content = ""
for content_block in content:
# Handle text content
if "text" in content_block:
text_content += content_block["text"]
# Handle tool use - corrected structure according to AWS API docs
elif "toolUse" in content_block and available_functions:
tool_use_block = content_block["toolUse"]
tool_use_id = tool_use_block.get("toolUseId")
function_name = tool_use_block["name"]
function_args = tool_use_block.get("input", {})
if function_name == STRUCTURED_OUTPUT_TOOL_NAME:
continue
logging.debug(
f"Tool use requested: {function_name} with ID {tool_use_id}"
)
# Execute the tool
tool_result = self._handle_tool_execution(
function_name=function_name,
function_args=function_args,
available_functions=dict(available_functions),
from_task=from_task,
from_agent=from_agent,
)
if tool_result is not None:
messages.append(
{
"role": "assistant",
"content": [{"toolUse": tool_use_block}],
}
)
messages.append(
{
"role": "user",
"content": [
{
"toolResult": {
"toolUseId": tool_use_id,
"content": [{"text": str(tool_result)}],
}
}
],
}
)
return self._handle_converse(
messages,
body,
available_functions,
from_task,
from_agent,
response_model,
)
# Apply stop sequences if configured
text_content = self._apply_stop_words(text_content)
# Validate final response
if not text_content or text_content.strip() == "":
logging.warning("Extracted empty text content from Bedrock response")
text_content = "I apologize, but I couldn't generate a proper response. Please try again."
self._emit_call_completed_event(
response=text_content,
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=messages,
)
return self._invoke_after_llm_call_hooks(
messages,
text_content,
from_agent,
)
except ClientError as e:
# Handle all AWS ClientError exceptions as per documentation
error_code = e.response.get("Error", {}).get("Code", "Unknown")
error_msg = e.response.get("Error", {}).get("Message", str(e))
# Log the specific error for debugging
logging.error(f"AWS Bedrock ClientError ({error_code}): {error_msg}")
# Handle specific error codes as documented
if error_code == "ValidationException":
# This is the error we're seeing with Cohere
if "last turn" in error_msg and "user message" in error_msg:
raise ValueError(
f"Conversation format error: {error_msg}. Check message alternation."
) from e
raise ValueError(f"Request validation failed: {error_msg}") from e
if error_code == "AccessDeniedException":
raise PermissionError(
f"Access denied to model {self.model_id}: {error_msg}"
) from e
if error_code == "ResourceNotFoundException":
raise ValueError(f"Model {self.model_id} not found: {error_msg}") from e
if error_code == "ThrottlingException":
raise RuntimeError(
f"API throttled, please retry later: {error_msg}"
) from e
if error_code == "ModelTimeoutException":
raise TimeoutError(f"Model request timed out: {error_msg}") from e
if error_code == "ServiceQuotaExceededException":
raise RuntimeError(f"Service quota exceeded: {error_msg}") from e
if error_code == "ModelNotReadyException":
raise RuntimeError(
f"Model {self.model_id} not ready: {error_msg}"
) from e
if error_code == "ModelErrorException":
raise RuntimeError(f"Model error: {error_msg}") from e
if error_code == "InternalServerException":
raise RuntimeError(f"Internal server error: {error_msg}") from e
if error_code == "ServiceUnavailableException":
raise RuntimeError(f"Service unavailable: {error_msg}") from e
raise RuntimeError(f"Bedrock API error ({error_code}): {error_msg}") from e
except BotoCoreError as e:
error_msg = f"Bedrock connection error: {e}"
logging.error(error_msg)
raise ConnectionError(error_msg) from e
except Exception as e:
# Catch any other unexpected errors
error_msg = f"Unexpected error in Bedrock converse call: {e}"
logging.error(error_msg)
raise RuntimeError(error_msg) from e
def _handle_streaming_converse(
self,
messages: list[LLMMessage],
body: BedrockConverseRequestBody,
available_functions: dict[str, Any] | None = None,
from_task: Any | None = None,
from_agent: Any | None = None,
response_model: type[BaseModel] | None = None,
) -> str:
"""Handle streaming converse API call with comprehensive event handling."""
if response_model:
# Check if structured_output tool already exists (from a previous recursive call)
existing_tool_config = body.get("toolConfig")
existing_tools: list[Any] = []
structured_output_already_exists = False
if existing_tool_config:
existing_tools = list(existing_tool_config.get("tools", []))
# Check if structured_output tool is already in the tools list
for tool in existing_tools:
tool_spec = tool.get("toolSpec", {})
if tool_spec.get("name") == STRUCTURED_OUTPUT_TOOL_NAME:
structured_output_already_exists = True
break
if not structured_output_already_exists:
structured_tool: ConverseToolTypeDef = {
"toolSpec": {
"name": STRUCTURED_OUTPUT_TOOL_NAME,
"description": (
"Use this tool to provide your final structured response. "
"Call this tool when you have gathered all necessary information "
"and are ready to provide the final answer in the required format."
),
"inputSchema": {
"json": generate_model_description(response_model)
.get("json_schema", {})
.get("schema", {})
},
}
}
if existing_tools:
# Append structured_output to existing tools, don't force toolChoice
existing_tools.append(structured_tool)
body["toolConfig"] = cast(
"ToolConfigurationTypeDef",
cast(object, {"tools": existing_tools}),
)
else:
# No existing tools, use only structured_output with forced toolChoice
body["toolConfig"] = cast(
"ToolConfigurationTypeDef",
cast(
object,
{
"tools": [structured_tool],
"toolChoice": {
"tool": {"name": STRUCTURED_OUTPUT_TOOL_NAME}
},
},
),
)
full_response = ""
current_tool_use: dict[str, Any] | None = None
tool_use_id: str | None = None
tool_use_index = 0
accumulated_tool_input = ""
try:
response = self.client.converse_stream(
modelId=self.model_id,
messages=cast(
"Sequence[MessageTypeDef | MessageOutputTypeDef]",
cast(object, messages),
),
**body, # type: ignore[arg-type]
)
stream = response.get("stream")
response_id = None
if stream:
for event in stream:
if "messageStart" in event:
role = event["messageStart"].get("role")
logging.debug(f"Streaming message started with role: {role}")
elif "contentBlockStart" in event:
start = event["contentBlockStart"].get("start", {})
content_block_index = event["contentBlockStart"].get(
"contentBlockIndex", 0
)
if "toolUse" in start:
tool_use_block = start["toolUse"]
current_tool_use = cast(dict[str, Any], tool_use_block)
tool_use_id = current_tool_use.get("toolUseId")
tool_use_index = content_block_index
accumulated_tool_input = ""
self._emit_stream_chunk_event(
chunk="",
from_task=from_task,
from_agent=from_agent,
tool_call={
"id": tool_use_id or "",
"function": {
"name": current_tool_use.get("name", ""),
"arguments": "",
},
"type": "function",
"index": tool_use_index,
},
call_type=LLMCallType.TOOL_CALL,
response_id=response_id,
)
logging.debug(
f"Tool use started in stream: {json.dumps(current_tool_use)} (ID: {tool_use_id})"
)
elif "contentBlockDelta" in event:
delta = event["contentBlockDelta"]["delta"]
if "text" in delta:
text_chunk = delta["text"]
logging.debug(f"Streaming text chunk: {text_chunk[:50]}...")
full_response += text_chunk
self._emit_stream_chunk_event(
chunk=text_chunk,
from_task=from_task,
from_agent=from_agent,
response_id=response_id,
)
elif "toolUse" in delta and current_tool_use:
tool_input = delta["toolUse"].get("input", "")
if tool_input:
accumulated_tool_input += tool_input
logging.debug(f"Tool input delta: {tool_input}")
self._emit_stream_chunk_event(
chunk=tool_input,
from_task=from_task,
from_agent=from_agent,
tool_call={
"id": tool_use_id or "",
"function": {
"name": current_tool_use.get("name", ""),
"arguments": accumulated_tool_input,
},
"type": "function",
"index": tool_use_index,
},
call_type=LLMCallType.TOOL_CALL,
response_id=response_id,
)
elif "contentBlockStop" in event:
logging.debug("Content block stopped in stream")
if current_tool_use:
function_name = current_tool_use["name"]
function_args = cast(
dict[str, Any], current_tool_use.get("input", {})
)
# Check if this is the structured_output tool
if (
function_name == STRUCTURED_OUTPUT_TOOL_NAME
and response_model
):
function_args = _preprocess_structured_data(
function_args, response_model
)
try:
result = response_model.model_validate(
function_args
)
self._emit_call_completed_event(
response=result.model_dump_json(),
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=messages,
)
return result # type: ignore[return-value]
except Exception as e:
error_msg = (
f"Failed to validate {STRUCTURED_OUTPUT_TOOL_NAME} tool response "
f"with model {response_model.__name__}: {e}"
)
logging.error(error_msg)
raise ValueError(error_msg) from e
# Handle regular tool execution
if available_functions:
tool_result = self._handle_tool_execution(
function_name=function_name,
function_args=function_args,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
)
if tool_result is not None and tool_use_id:
messages.append(
{
"role": "assistant",
"content": [{"toolUse": current_tool_use}],
}
)
messages.append(
{
"role": "user",
"content": [
{
"toolResult": {
"toolUseId": tool_use_id,
"content": [
{"text": str(tool_result)}
],
}
}
],
}
)
return self._handle_converse(
messages,
body,
available_functions,
from_task,
from_agent,
response_model,
)
current_tool_use = None
tool_use_id = None
elif "messageStop" in event:
stop_reason = event["messageStop"].get("stopReason")
logging.debug(f"Streaming message stopped: {stop_reason}")
if stop_reason == "max_tokens":
logging.warning(
"Streaming response truncated due to max_tokens"
)
elif stop_reason == "content_filtered":
logging.warning(
"Streaming response filtered due to content policy"
)
break
elif "metadata" in event:
metadata = event["metadata"]
if "usage" in metadata:
usage_metrics = metadata["usage"]
self._track_token_usage_internal(usage_metrics)
logging.debug(f"Token usage: {usage_metrics}")
if "trace" in metadata:
logging.debug(
f"Trace information available: {metadata['trace']}"
)
except ClientError as e:
error_msg = self._handle_client_error(e)
raise RuntimeError(error_msg) from e
except BotoCoreError as e:
error_msg = f"Bedrock streaming connection error: {e}"
logging.error(error_msg)
raise ConnectionError(error_msg) from e
full_response = self._apply_stop_words(full_response)
if not full_response or full_response.strip() == "":
logging.warning("Bedrock streaming returned empty content, using fallback")
full_response = (
"I apologize, but I couldn't generate a response. Please try again."
)
self._emit_call_completed_event(
response=full_response,
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=messages,
)
return full_response
async def _ensure_async_client(self) -> Any:
"""Ensure async client is initialized and return it."""
if not self._async_client_initialized and get_aiobotocore_session:
if self._async_exit_stack is None:
raise RuntimeError(
"Async exit stack not initialized - aiobotocore not available"
)
session = get_aiobotocore_session()
client = await self._async_exit_stack.enter_async_context(
session.create_client(
"bedrock-runtime",
region_name=self.region_name,
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key,
aws_session_token=self.aws_session_token,
)
)
self._async_client = client
self._async_client_initialized = True
return self._async_client
async def _ahandle_converse(
self,
messages: list[LLMMessage],
body: BedrockConverseRequestBody,
available_functions: Mapping[str, Any] | None = None,
from_task: Any | None = None,
from_agent: Any | None = None,
response_model: type[BaseModel] | None = None,
) -> str | Any:
"""Handle async non-streaming converse API call."""
if response_model:
# Check if structured_output tool already exists (from a previous recursive call)
existing_tool_config = body.get("toolConfig")
existing_tools: list[Any] = []
structured_output_already_exists = False
if existing_tool_config:
existing_tools = list(existing_tool_config.get("tools", []))
# Check if structured_output tool is already in the tools list
for tool in existing_tools:
tool_spec = tool.get("toolSpec", {})
if tool_spec.get("name") == STRUCTURED_OUTPUT_TOOL_NAME:
structured_output_already_exists = True
break
if not structured_output_already_exists:
structured_tool: ConverseToolTypeDef = {
"toolSpec": {
"name": STRUCTURED_OUTPUT_TOOL_NAME,
"description": (
"Use this tool to provide your final structured response. "
"Call this tool when you have gathered all necessary information "
"and are ready to provide the final answer in the required format."
),
"inputSchema": {
"json": generate_model_description(response_model)
.get("json_schema", {})
.get("schema", {})
},
}
}
if existing_tools:
# Append structured_output to existing tools, don't force toolChoice
existing_tools.append(structured_tool)
body["toolConfig"] = cast(
"ToolConfigurationTypeDef",
cast(object, {"tools": existing_tools}),
)
else:
# No existing tools, use only structured_output with forced toolChoice
body["toolConfig"] = cast(
"ToolConfigurationTypeDef",
cast(
object,
{
"tools": [structured_tool],
"toolChoice": {
"tool": {"name": STRUCTURED_OUTPUT_TOOL_NAME}
},
},
),
)
try:
if not messages:
raise ValueError("Messages cannot be empty")
for i, msg in enumerate(messages):
if (
not isinstance(msg, dict)
or "role" not in msg
or "content" not in msg
):
raise ValueError(f"Invalid message format at index {i}")
async_client = await self._ensure_async_client()
response = await async_client.converse(
modelId=self.model_id,
messages=cast(
"Sequence[MessageTypeDef | MessageOutputTypeDef]",
cast(object, messages),
),
**body,
)
if "usage" in response:
self._track_token_usage_internal(response["usage"])
stop_reason = response.get("stopReason")
if stop_reason:
logging.debug(f"Response stop reason: {stop_reason}")
if stop_reason == "max_tokens":
logging.warning("Response truncated due to max_tokens limit")
elif stop_reason == "content_filtered":
logging.warning("Response was filtered due to content policy")
output = response.get("output", {})
message = output.get("message", {})
content = message.get("content", [])
if not content:
logging.warning("No content in Bedrock response")
return (
"I apologize, but I received an empty response. Please try again."
)
# If there are tool uses but no available_functions, return them for the executor to handle
tool_uses = [block["toolUse"] for block in content if "toolUse" in block]
# Check for structured_output tool call first
if response_model and tool_uses:
for tool_use in tool_uses:
if tool_use.get("name") == STRUCTURED_OUTPUT_TOOL_NAME:
structured_data = tool_use.get("input", {})
structured_data = _preprocess_structured_data(
structured_data, response_model
)
try:
result = response_model.model_validate(structured_data)
self._emit_call_completed_event(
response=result.model_dump_json(),
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=messages,
)
return result
except Exception as e:
error_msg = (
f"Failed to validate {STRUCTURED_OUTPUT_TOOL_NAME} tool response "
f"with model {response_model.__name__}: {e}"
)
logging.error(error_msg)
raise ValueError(error_msg) from e
# Filter out structured_output from tool_uses returned to executor
non_structured_output_tool_uses = [
tu for tu in tool_uses if tu.get("name") != STRUCTURED_OUTPUT_TOOL_NAME
]
if non_structured_output_tool_uses and not available_functions:
self._emit_call_completed_event(
response=non_structured_output_tool_uses,
call_type=LLMCallType.TOOL_CALL,
from_task=from_task,
from_agent=from_agent,
messages=messages,
)
return non_structured_output_tool_uses
text_content = ""
for content_block in content:
if "text" in content_block:
text_content += content_block["text"]
elif "toolUse" in content_block and available_functions:
tool_use_block = content_block["toolUse"]
tool_use_id = tool_use_block.get("toolUseId")
function_name = tool_use_block["name"]
function_args = tool_use_block.get("input", {})
# Skip structured_output - it's handled above
if function_name == STRUCTURED_OUTPUT_TOOL_NAME:
continue
logging.debug(
f"Tool use requested: {function_name} with ID {tool_use_id}"
)
tool_result = self._handle_tool_execution(
function_name=function_name,
function_args=function_args,
available_functions=dict(available_functions),
from_task=from_task,
from_agent=from_agent,
)
if tool_result is not None:
messages.append(
{
"role": "assistant",
"content": [{"toolUse": tool_use_block}],
}
)
messages.append(
{
"role": "user",
"content": [
{
"toolResult": {
"toolUseId": tool_use_id,
"content": [{"text": str(tool_result)}],
}
}
],
}
)
return await self._ahandle_converse(
messages,
body,
available_functions,
from_task,
from_agent,
response_model,
)
text_content = self._apply_stop_words(text_content)
if not text_content or text_content.strip() == "":
logging.warning("Extracted empty text content from Bedrock response")
text_content = "I apologize, but I couldn't generate a proper response. Please try again."
self._emit_call_completed_event(
response=text_content,
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=messages,
)
return text_content
except ClientError as e:
error_code = e.response.get("Error", {}).get("Code", "Unknown")
error_msg = e.response.get("Error", {}).get("Message", str(e))
logging.error(f"AWS Bedrock ClientError ({error_code}): {error_msg}")
if error_code == "ValidationException":
if "last turn" in error_msg and "user message" in error_msg:
raise ValueError(
f"Conversation format error: {error_msg}. Check message alternation."
) from e
raise ValueError(f"Request validation failed: {error_msg}") from e
if error_code == "AccessDeniedException":
raise PermissionError(
f"Access denied to model {self.model_id}: {error_msg}"
) from e
if error_code == "ResourceNotFoundException":
raise ValueError(f"Model {self.model_id} not found: {error_msg}") from e
if error_code == "ThrottlingException":
raise RuntimeError(
f"API throttled, please retry later: {error_msg}"
) from e
if error_code == "ModelTimeoutException":
raise TimeoutError(f"Model request timed out: {error_msg}") from e
if error_code == "ServiceQuotaExceededException":
raise RuntimeError(f"Service quota exceeded: {error_msg}") from e
if error_code == "ModelNotReadyException":
raise RuntimeError(
f"Model {self.model_id} not ready: {error_msg}"
) from e
if error_code == "ModelErrorException":
raise RuntimeError(f"Model error: {error_msg}") from e
if error_code == "InternalServerException":
raise RuntimeError(f"Internal server error: {error_msg}") from e
if error_code == "ServiceUnavailableException":
raise RuntimeError(f"Service unavailable: {error_msg}") from e
raise RuntimeError(f"Bedrock API error ({error_code}): {error_msg}") from e
except BotoCoreError as e:
error_msg = f"Bedrock connection error: {e}"
logging.error(error_msg)
raise ConnectionError(error_msg) from e
except Exception as e:
error_msg = f"Unexpected error in Bedrock converse call: {e}"
logging.error(error_msg)
raise RuntimeError(error_msg) from e
async def _ahandle_streaming_converse(
self,
messages: list[LLMMessage],
body: BedrockConverseRequestBody,
available_functions: dict[str, Any] | None = None,
from_task: Any | None = None,
from_agent: Any | None = None,
response_model: type[BaseModel] | None = None,
) -> str:
"""Handle async streaming converse API call."""
if response_model:
# Check if structured_output tool already exists (from a previous recursive call)
existing_tool_config = body.get("toolConfig")
existing_tools: list[Any] = []
structured_output_already_exists = False
if existing_tool_config:
existing_tools = list(existing_tool_config.get("tools", []))
# Check if structured_output tool is already in the tools list
for tool in existing_tools:
tool_spec = tool.get("toolSpec", {})
if tool_spec.get("name") == STRUCTURED_OUTPUT_TOOL_NAME:
structured_output_already_exists = True
break
if not structured_output_already_exists:
structured_tool: ConverseToolTypeDef = {
"toolSpec": {
"name": STRUCTURED_OUTPUT_TOOL_NAME,
"description": (
"Use this tool to provide your final structured response. "
"Call this tool when you have gathered all necessary information "
"and are ready to provide the final answer in the required format."
),
"inputSchema": {
"json": generate_model_description(response_model)
.get("json_schema", {})
.get("schema", {})
},
}
}
if existing_tools:
# Append structured_output to existing tools, don't force toolChoice
existing_tools.append(structured_tool)
body["toolConfig"] = cast(
"ToolConfigurationTypeDef",
cast(object, {"tools": existing_tools}),
)
else:
# No existing tools, use only structured_output with forced toolChoice
body["toolConfig"] = cast(
"ToolConfigurationTypeDef",
cast(
object,
{
"tools": [structured_tool],
"toolChoice": {
"tool": {"name": STRUCTURED_OUTPUT_TOOL_NAME}
},
},
),
)
full_response = ""
current_tool_use: dict[str, Any] | None = None
tool_use_id: str | None = None
tool_use_index = 0
accumulated_tool_input = ""
try:
async_client = await self._ensure_async_client()
response = await async_client.converse_stream(
modelId=self.model_id,
messages=cast(
"Sequence[MessageTypeDef | MessageOutputTypeDef]",
cast(object, messages),
),
**body,
)
stream = response.get("stream")
response_id = None
if stream:
async for event in stream:
if "messageStart" in event:
role = event["messageStart"].get("role")
logging.debug(f"Streaming message started with role: {role}")
elif "contentBlockStart" in event:
start = event["contentBlockStart"].get("start", {})
content_block_index = event["contentBlockStart"].get(
"contentBlockIndex", 0
)
if "toolUse" in start:
tool_use_block = start["toolUse"]
current_tool_use = cast(dict[str, Any], tool_use_block)
tool_use_id = current_tool_use.get("toolUseId")
tool_use_index = content_block_index
accumulated_tool_input = ""
self._emit_stream_chunk_event(
chunk="",
from_task=from_task,
from_agent=from_agent,
tool_call={
"id": tool_use_id or "",
"function": {
"name": current_tool_use.get("name", ""),
"arguments": "",
},
"type": "function",
"index": tool_use_index,
},
call_type=LLMCallType.TOOL_CALL,
response_id=response_id,
)
logging.debug(
f"Tool use started in stream: {current_tool_use.get('name')} (ID: {tool_use_id})"
)
elif "contentBlockDelta" in event:
delta = event["contentBlockDelta"]["delta"]
if "text" in delta:
text_chunk = delta["text"]
logging.debug(f"Streaming text chunk: {text_chunk[:50]}...")
full_response += text_chunk
self._emit_stream_chunk_event(
chunk=text_chunk,
from_task=from_task,
from_agent=from_agent,
response_id=response_id,
)
elif "toolUse" in delta and current_tool_use:
tool_input = delta["toolUse"].get("input", "")
if tool_input:
accumulated_tool_input += tool_input
logging.debug(f"Tool input delta: {tool_input}")
self._emit_stream_chunk_event(
chunk=tool_input,
from_task=from_task,
from_agent=from_agent,
tool_call={
"id": tool_use_id or "",
"function": {
"name": current_tool_use.get("name", ""),
"arguments": accumulated_tool_input,
},
"type": "function",
"index": tool_use_index,
},
call_type=LLMCallType.TOOL_CALL,
response_id=response_id,
)
elif "contentBlockStop" in event:
logging.debug("Content block stopped in stream")
if current_tool_use:
function_name = current_tool_use["name"]
function_args = cast(
dict[str, Any], current_tool_use.get("input", {})
)
# Check if this is the structured_output tool
if (
function_name == STRUCTURED_OUTPUT_TOOL_NAME
and response_model
):
function_args = _preprocess_structured_data(
function_args, response_model
)
try:
result = response_model.model_validate(
function_args
)
self._emit_call_completed_event(
response=result.model_dump_json(),
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=messages,
)
return result # type: ignore[return-value]
except Exception as e:
error_msg = (
f"Failed to validate {STRUCTURED_OUTPUT_TOOL_NAME} tool response "
f"with model {response_model.__name__}: {e}"
)
logging.error(error_msg)
raise ValueError(error_msg) from e
# Handle regular tool execution
if available_functions:
tool_result = self._handle_tool_execution(
function_name=function_name,
function_args=function_args,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
)
if tool_result is not None and tool_use_id:
messages.append(
{
"role": "assistant",
"content": [{"toolUse": current_tool_use}],
}
)
messages.append(
{
"role": "user",
"content": [
{
"toolResult": {
"toolUseId": tool_use_id,
"content": [
{"text": str(tool_result)}
],
}
}
],
}
)
return await self._ahandle_converse(
messages,
body,
available_functions,
from_task,
from_agent,
response_model,
)
current_tool_use = None
tool_use_id = None
elif "messageStop" in event:
stop_reason = event["messageStop"].get("stopReason")
logging.debug(f"Streaming message stopped: {stop_reason}")
if stop_reason == "max_tokens":
logging.warning(
"Streaming response truncated due to max_tokens"
)
elif stop_reason == "content_filtered":
logging.warning(
"Streaming response filtered due to content policy"
)
break
elif "metadata" in event:
metadata = event["metadata"]
if "usage" in metadata:
usage_metrics = metadata["usage"]
self._track_token_usage_internal(usage_metrics)
logging.debug(f"Token usage: {usage_metrics}")
if "trace" in metadata:
logging.debug(
f"Trace information available: {metadata['trace']}"
)
except ClientError as e:
error_msg = self._handle_client_error(e)
raise RuntimeError(error_msg) from e
except BotoCoreError as e:
error_msg = f"Bedrock streaming connection error: {e}"
logging.error(error_msg)
raise ConnectionError(error_msg) from e
full_response = self._apply_stop_words(full_response)
if not full_response or full_response.strip() == "":
logging.warning("Bedrock streaming returned empty content, using fallback")
full_response = (
"I apologize, but I couldn't generate a response. Please try again."
)
self._emit_call_completed_event(
response=full_response,
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=messages,
)
return self._invoke_after_llm_call_hooks(
messages,
full_response,
from_agent,
)
def _format_messages_for_converse(
self, messages: str | list[LLMMessage]
) -> tuple[list[LLMMessage], str | None]:
"""Format messages for Converse API following AWS documentation.
Note: Returns dict[str, Any] instead of LLMMessage because Bedrock uses
a different content structure: {"role": str, "content": [{"text": str}]}
rather than the standard {"role": str, "content": str}.
"""
# Use base class formatting first
formatted_messages = self._format_messages(messages)
converse_messages: list[LLMMessage] = []
system_message: str | None = None
for message in formatted_messages:
role = message.get("role")
content = message.get("content", "")
tool_calls = message.get("tool_calls")
tool_call_id = message.get("tool_call_id")
if role == "system":
# Extract system message - Converse API handles it separately
if system_message:
system_message += f"\n\n{content}"
else:
system_message = cast(str, content)
elif role == "assistant" and tool_calls:
# Convert OpenAI-style tool_calls to Bedrock toolUse format
bedrock_content = []
for tc in tool_calls:
func = tc.get("function", {})
tool_use_block = {
"toolUse": {
"toolUseId": tc.get("id", f"call_{id(tc)}"),
"name": func.get("name", ""),
"input": func.get("arguments", {})
if isinstance(func.get("arguments"), dict)
else json.loads(func.get("arguments", "{}") or "{}"),
}
}
bedrock_content.append(tool_use_block)
converse_messages.append(
{"role": "assistant", "content": bedrock_content}
)
elif role == "tool":
if not tool_call_id:
raise ValueError("Tool message missing required tool_call_id")
converse_messages.append(
{
"role": "user",
"content": [
{
"toolResult": {
"toolUseId": tool_call_id,
"content": [
{"text": str(content) if content else ""}
],
}
}
],
}
)
else:
# Convert to Converse API format with proper content structure
if isinstance(content, list):
# Already formatted as multimodal content blocks
converse_messages.append({"role": role, "content": content})
else:
# String content - wrap in text block
text_content = content if content else ""
converse_messages.append(
{"role": role, "content": [{"text": text_content}]}
)
# CRITICAL: Handle model-specific conversation requirements
# Cohere and some other models require conversation to end with user message
if converse_messages:
last_message = converse_messages[-1]
if last_message["role"] == "assistant":
# For Cohere models, add a continuation user message
if "cohere" in self.model.lower():
converse_messages.append(
{
"role": "user",
"content": [
{
"text": "Please continue and provide your final answer."
}
],
}
)
# For other models that might have similar requirements
elif any(
model_family in self.model.lower()
for model_family in ["command", "coral"]
):
converse_messages.append(
{
"role": "user",
"content": [{"text": "Continue your response."}],
}
)
# Ensure first message is from user (required by Converse API)
if not converse_messages:
converse_messages.append(
{
"role": "user",
"content": [{"text": "Hello, please help me with my request."}],
}
)
elif converse_messages[0]["role"] != "user":
converse_messages.insert(
0,
{
"role": "user",
"content": [{"text": "Hello, please help me with my request."}],
},
)
return converse_messages, system_message
@staticmethod
def _messages_contain_tool_content(messages: list[LLMMessage]) -> bool:
"""Check if messages contain toolUse or toolResult content blocks.
Bedrock requires toolConfig when messages have tool-related content.
"""
for message in messages:
content = message.get("content", [])
if isinstance(content, list):
for block in content:
if isinstance(block, dict):
if "toolUse" in block or "toolResult" in block:
return True
return False
@staticmethod
def _extract_tools_from_message_history(
messages: list[LLMMessage],
) -> list[dict[str, Any]]:
"""Extract tool definitions from toolUse blocks in message history.
When no tools are passed but messages contain toolUse, we need to
recreate a minimal toolConfig to satisfy Bedrock's API requirements.
"""
tools: list[dict[str, Any]] = []
seen_tool_names: set[str] = set()
for message in messages:
content = message.get("content", [])
if isinstance(content, list):
for block in content:
if isinstance(block, dict) and "toolUse" in block:
tool_use = block["toolUse"]
tool_name = tool_use.get("name", "")
if tool_name and tool_name not in seen_tool_names:
seen_tool_names.add(tool_name)
# Create a minimal tool spec from the toolUse block
tool_spec: dict[str, Any] = {
"toolSpec": {
"name": tool_name,
"description": f"Tool: {tool_name}",
"inputSchema": {
"json": {
"type": "object",
"properties": {},
}
},
}
}
tools.append(tool_spec)
return tools
@staticmethod
def _format_tools_for_converse(
tools: list[dict[str, Any]],
) -> list[ConverseToolTypeDef]:
"""Convert CrewAI tools to Converse API format following AWS specification."""
from crewai.llms.providers.utils.common import safe_tool_conversion
converse_tools: list[ConverseToolTypeDef] = []
for tool in tools:
try:
name, description, parameters = safe_tool_conversion(tool, "Bedrock")
tool_spec: ToolSpec = {
"name": name,
"description": description,
}
if parameters and isinstance(parameters, dict):
input_schema: ToolInputSchema = {"json": parameters}
tool_spec["inputSchema"] = input_schema
converse_tool: ConverseToolTypeDef = {"toolSpec": tool_spec}
converse_tools.append(converse_tool)
except Exception as e: # noqa: PERF203
logging.warning(
f"Failed to convert tool {tool.get('name', 'unknown')}: {e}"
)
continue
return converse_tools
def _get_inference_config(self) -> EnhancedInferenceConfigurationTypeDef:
"""Get inference configuration following AWS Converse API specification."""
config: EnhancedInferenceConfigurationTypeDef = {}
if self.max_tokens:
config["maxTokens"] = self.max_tokens
if self.temperature is not None:
config["temperature"] = float(self.temperature)
if self.top_p is not None:
config["topP"] = float(self.top_p)
if self.stop_sequences:
config["stopSequences"] = self.stop_sequences
if self.is_claude_model and self.top_k is not None:
# top_k is supported by Claude models
config["topK"] = int(self.top_k)
return config
def _handle_client_error(self, e: ClientError) -> str:
"""Handle AWS ClientError with specific error codes and return error message."""
error_code = e.response.get("Error", {}).get("Code", "Unknown")
error_msg = e.response.get("Error", {}).get("Message", str(e))
error_mapping = {
"AccessDeniedException": f"Access denied to model {self.model_id}: {error_msg}",
"ResourceNotFoundException": f"Model {self.model_id} not found: {error_msg}",
"ThrottlingException": f"API throttled, please retry later: {error_msg}",
"ValidationException": f"Invalid request: {error_msg}",
"ModelTimeoutException": f"Model request timed out: {error_msg}",
"ServiceQuotaExceededException": f"Service quota exceeded: {error_msg}",
"ModelNotReadyException": f"Model {self.model_id} not ready: {error_msg}",
"ModelErrorException": f"Model error: {error_msg}",
}
full_error_msg = error_mapping.get(
error_code, f"Bedrock API error: {error_msg}"
)
logging.error(f"Bedrock client error ({error_code}): {full_error_msg}")
return full_error_msg
def _track_token_usage_internal(self, usage: TokenUsageTypeDef) -> None: # type: ignore[override]
"""Track token usage from Bedrock response."""
input_tokens = usage.get("inputTokens", 0)
output_tokens = usage.get("outputTokens", 0)
total_tokens = usage.get("totalTokens", input_tokens + output_tokens)
self._token_usage["prompt_tokens"] += input_tokens
self._token_usage["completion_tokens"] += output_tokens
self._token_usage["total_tokens"] += total_tokens
self._token_usage["successful_requests"] += 1
def supports_function_calling(self) -> bool:
"""Check if the model supports function calling."""
return self.supports_tools
def supports_stop_words(self) -> bool:
"""Check if the model supports stop words."""
return True
def get_context_window_size(self) -> int:
"""Get the context window size for the model."""
from crewai.llm import CONTEXT_WINDOW_USAGE_RATIO
# Context window sizes for common Bedrock models
context_windows = {
"anthropic.claude-3-5-sonnet": 200000,
"anthropic.claude-3-5-haiku": 200000,
"anthropic.claude-3-opus": 200000,
"anthropic.claude-3-sonnet": 200000,
"anthropic.claude-3-haiku": 200000,
"anthropic.claude-3-7-sonnet": 200000,
"anthropic.claude-v2": 100000,
"amazon.titan-text-express": 8000,
"ai21.j2-ultra": 8192,
"cohere.command-text": 4096,
"meta.llama2-13b-chat": 4096,
"meta.llama2-70b-chat": 4096,
"meta.llama3-70b-instruct": 128000,
"deepseek.r1": 32768,
}
# Find the best match for the model name
for model_prefix, size in context_windows.items():
if self.model.startswith(model_prefix):
return int(size * CONTEXT_WINDOW_USAGE_RATIO)
# Default context window size
return int(8192 * CONTEXT_WINDOW_USAGE_RATIO)
def supports_multimodal(self) -> bool:
"""Check if the model supports multimodal inputs.
Claude 3+ and Nova Lite/Pro/Premier on Bedrock support vision.
Returns:
True if the model supports images.
"""
model_lower = self.model.lower()
vision_models = (
"anthropic.claude-3",
"amazon.nova-lite",
"amazon.nova-pro",
"amazon.nova-premier",
"us.amazon.nova-lite",
"us.amazon.nova-pro",
"us.amazon.nova-premier",
)
return any(model_lower.startswith(m) for m in vision_models)
def _is_nova_model(self) -> bool:
"""Check if the model is an Amazon Nova model.
Only Nova models support S3 links for multimedia.
Returns:
True if the model is a Nova model.
"""
model_lower = self.model.lower()
return "amazon.nova-" in model_lower
def get_file_uploader(self) -> Any:
"""Get a Bedrock S3 file uploader using this LLM's AWS credentials.
Creates an S3 client using the same AWS credentials configured for
this Bedrock LLM instance.
Returns:
BedrockFileUploader instance with pre-configured S3 client,
or None if crewai_files is not installed.
"""
try:
import boto3
from crewai_files.uploaders.bedrock import BedrockFileUploader
s3_client = boto3.client(
"s3",
region_name=self.region_name,
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key,
aws_session_token=self.aws_session_token,
)
return BedrockFileUploader(
region=self.region_name,
client=s3_client,
)
except ImportError:
return None
def _get_document_format(self, content_type: str) -> str | None:
"""Map content type to Bedrock document format.
Args:
content_type: MIME type of the document.
Returns:
Bedrock format string or None if unsupported.
"""
format_map = {
"application/pdf": "pdf",
"text/csv": "csv",
"text/plain": "txt",
"text/markdown": "md",
"text/html": "html",
"application/msword": "doc",
"application/vnd.openxmlformats-officedocument.wordprocessingml.document": "docx",
"application/vnd.ms-excel": "xls",
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": "xlsx",
}
return format_map.get(content_type)
def _get_video_format(self, content_type: str) -> str | None:
"""Map content type to Bedrock video format.
Args:
content_type: MIME type of the video.
Returns:
Bedrock format string or None if unsupported.
"""
format_map = {
"video/mp4": "mp4",
"video/quicktime": "mov",
"video/x-matroska": "mkv",
"video/webm": "webm",
"video/x-flv": "flv",
"video/mpeg": "mpeg",
"video/x-ms-wmv": "wmv",
"video/3gpp": "three_gp",
}
return format_map.get(content_type)
def format_text_content(self, text: str) -> dict[str, Any]:
"""Format text as a Bedrock content block.
Bedrock uses {"text": "..."} format instead of {"type": "text", "text": "..."}.
Args:
text: The text content to format.
Returns:
A content block in Bedrock's expected format.
"""
return {"text": text}
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/llms/providers/bedrock/completion.py",
"license": "MIT License",
"lines": 1911,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai/src/crewai/llms/providers/gemini/completion.py | from __future__ import annotations
import base64
import json
import logging
import os
import re
from typing import TYPE_CHECKING, Any, Literal, cast
from pydantic import BaseModel
from crewai.events.types.llm_events import LLMCallType
from crewai.llms.base_llm import BaseLLM, llm_call_context
from crewai.utilities.agent_utils import is_context_length_exceeded
from crewai.utilities.exceptions.context_window_exceeding_exception import (
LLMContextLengthExceededError,
)
from crewai.utilities.pydantic_schema_utils import generate_model_description
from crewai.utilities.types import LLMMessage
if TYPE_CHECKING:
from crewai.llms.hooks.base import BaseInterceptor
try:
from google import genai
from google.genai import types
from google.genai.errors import APIError
from google.genai.types import GenerateContentResponse
except ImportError:
raise ImportError(
'Google Gen AI native provider not available, to install: uv add "crewai[google-genai]"'
) from None
STRUCTURED_OUTPUT_TOOL_NAME = "structured_output"
class GeminiCompletion(BaseLLM):
"""Google Gemini native completion implementation.
This class provides direct integration with the Google Gen AI Python SDK,
offering native function calling, streaming support, and proper Gemini formatting.
"""
def __init__(
self,
model: str = "gemini-2.0-flash-001",
api_key: str | None = None,
project: str | None = None,
location: str | None = None,
temperature: float | None = None,
top_p: float | None = None,
top_k: int | None = None,
max_output_tokens: int | None = None,
stop_sequences: list[str] | None = None,
stream: bool = False,
safety_settings: dict[str, Any] | None = None,
client_params: dict[str, Any] | None = None,
interceptor: BaseInterceptor[Any, Any] | None = None,
use_vertexai: bool | None = None,
response_format: type[BaseModel] | None = None,
**kwargs: Any,
):
"""Initialize Google Gemini chat completion client.
Args:
model: Gemini model name (e.g., 'gemini-2.0-flash-001', 'gemini-1.5-pro')
api_key: Google API key for Gemini API authentication.
Defaults to GOOGLE_API_KEY or GEMINI_API_KEY env var.
NOTE: Cannot be used with Vertex AI (project parameter). Use Gemini API instead.
project: Google Cloud project ID for Vertex AI with ADC authentication.
Requires Application Default Credentials (gcloud auth application-default login).
NOTE: Vertex AI does NOT support API keys, only OAuth2/ADC.
If both api_key and project are set, api_key takes precedence.
location: Google Cloud location (for Vertex AI with ADC, defaults to 'us-central1')
temperature: Sampling temperature (0-2)
top_p: Nucleus sampling parameter
top_k: Top-k sampling parameter
max_output_tokens: Maximum tokens in response
stop_sequences: Stop sequences
stream: Enable streaming responses
safety_settings: Safety filter settings
client_params: Additional parameters to pass to the Google Gen AI Client constructor.
Supports parameters like http_options, credentials, debug_config, etc.
interceptor: HTTP interceptor (not yet supported for Gemini).
use_vertexai: Whether to use Vertex AI instead of Gemini API.
- True: Use Vertex AI (with ADC or Express mode with API key)
- False: Use Gemini API (explicitly override env var)
- None (default): Check GOOGLE_GENAI_USE_VERTEXAI env var
When using Vertex AI with API key (Express mode), http_options with
api_version="v1" is automatically configured.
response_format: Pydantic model for structured output. Used as default when
response_model is not passed to call()/acall() methods.
**kwargs: Additional parameters
"""
if interceptor is not None:
raise NotImplementedError(
"HTTP interceptors are not yet supported for Google Gemini provider. "
"Interceptors are currently supported for OpenAI and Anthropic providers only."
)
super().__init__(
model=model, temperature=temperature, stop=stop_sequences or [], **kwargs
)
# Store client params for later use
self.client_params = client_params or {}
# Get API configuration with environment variable fallbacks
self.api_key = (
api_key or os.getenv("GOOGLE_API_KEY") or os.getenv("GEMINI_API_KEY")
)
self.project = project or os.getenv("GOOGLE_CLOUD_PROJECT")
self.location = location or os.getenv("GOOGLE_CLOUD_LOCATION") or "us-central1"
if use_vertexai is None:
use_vertexai = os.getenv("GOOGLE_GENAI_USE_VERTEXAI", "").lower() == "true"
self.client = self._initialize_client(use_vertexai)
# Store completion parameters
self.top_p = top_p
self.top_k = top_k
self.max_output_tokens = max_output_tokens
self.stream = stream
self.safety_settings = safety_settings or {}
self.stop_sequences = stop_sequences or []
self.tools: list[dict[str, Any]] | None = None
self.response_format = response_format
# Model-specific settings
version_match = re.search(r"gemini-(\d+(?:\.\d+)?)", model.lower())
self.supports_tools = bool(
version_match and float(version_match.group(1)) >= 1.5
)
self.is_gemini_2_0 = bool(
version_match and float(version_match.group(1)) >= 2.0
)
@property
def stop(self) -> list[str]:
"""Get stop sequences sent to the API."""
return self.stop_sequences
@stop.setter
def stop(self, value: list[str] | str | None) -> None:
"""Set stop sequences.
Synchronizes stop_sequences to ensure values set by CrewAgentExecutor
are properly sent to the Gemini API.
Args:
value: Stop sequences as a list, single string, or None
"""
if value is None:
self.stop_sequences = []
elif isinstance(value, str):
self.stop_sequences = [value]
elif isinstance(value, list):
self.stop_sequences = value
else:
self.stop_sequences = []
def _initialize_client(self, use_vertexai: bool = False) -> genai.Client:
"""Initialize the Google Gen AI client with proper parameter handling.
Args:
use_vertexai: Whether to use Vertex AI (from environment variable)
Returns:
Initialized Google Gen AI Client
Note:
Google Gen AI SDK has two distinct endpoints with different auth requirements:
- Gemini API (generativelanguage.googleapis.com): Supports API key authentication
- Vertex AI (aiplatform.googleapis.com): Only supports OAuth2/ADC, NO API keys
When vertexai=True is set, it routes to aiplatform.googleapis.com which rejects
API keys. Use Gemini API endpoint for API key authentication instead.
"""
client_params = {}
if self.client_params:
client_params.update(self.client_params)
# Determine authentication mode based on available credentials
has_api_key = bool(self.api_key)
has_project = bool(self.project)
if has_api_key and has_project:
logging.warning(
"Both API key and project provided. Using API key authentication. "
"Project/location parameters are ignored when using API keys. "
"To use Vertex AI with ADC, remove the api_key parameter."
)
has_project = False
# Vertex AI with ADC (project without API key)
if (use_vertexai or has_project) and not has_api_key:
client_params.update(
{
"vertexai": True,
"project": self.project,
"location": self.location,
}
)
# API key authentication (works with both Gemini API and Vertex AI Express)
elif has_api_key:
client_params["api_key"] = self.api_key
# Vertex AI Express mode: API key + vertexai=True + http_options with api_version="v1"
# See: https://cloud.google.com/vertex-ai/generative-ai/docs/start/quickstart?usertype=apikey
if use_vertexai:
client_params["vertexai"] = True
client_params["http_options"] = types.HttpOptions(api_version="v1")
else:
# This ensures we use the Gemini API (generativelanguage.googleapis.com)
client_params["vertexai"] = False
# Clean up project/location (not allowed with API key)
client_params.pop("project", None)
client_params.pop("location", None)
else:
try:
return genai.Client(**client_params)
except Exception as e:
raise ValueError(
"Authentication required. Provide one of:\n"
" 1. API key via GOOGLE_API_KEY or GEMINI_API_KEY environment variable\n"
" (use_vertexai=True is optional for Vertex AI with API key)\n"
" 2. For Vertex AI with ADC: Set GOOGLE_CLOUD_PROJECT and run:\n"
" gcloud auth application-default login\n"
" 3. Pass api_key parameter directly to LLM constructor\n"
) from e
return genai.Client(**client_params)
def _get_client_params(self) -> dict[str, Any]:
"""Get client parameters for compatibility with base class.
Note: This method is kept for compatibility but the Google Gen AI SDK
uses a different initialization pattern via the Client constructor.
"""
params = {}
if (
hasattr(self, "client")
and hasattr(self.client, "vertexai")
and self.client.vertexai
):
# Vertex AI configuration
params.update(
{
"vertexai": True,
"project": self.project,
"location": self.location,
}
)
if self.api_key:
params["api_key"] = self.api_key
elif self.api_key:
params["api_key"] = self.api_key
if self.client_params:
params.update(self.client_params)
return params
def call(
self,
messages: str | list[LLMMessage],
tools: list[dict[str, Any]] | None = None,
callbacks: list[Any] | None = None,
available_functions: dict[str, Any] | None = None,
from_task: Any | None = None,
from_agent: Any | None = None,
response_model: type[BaseModel] | None = None,
) -> str | Any:
"""Call Google Gemini generate content API.
Args:
messages: Input messages for the chat completion
tools: List of tool/function definitions
callbacks: Callback functions (not used as token counts are handled by the response)
available_functions: Available functions for tool calling
from_task: Task that initiated the call
from_agent: Agent that initiated the call
response_model: Response model to use.
Returns:
Chat completion response or tool call result
"""
with llm_call_context():
try:
self._emit_call_started_event(
messages=messages,
tools=tools,
callbacks=callbacks,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
)
self.tools = tools
effective_response_model = response_model or self.response_format
formatted_content, system_instruction = (
self._format_messages_for_gemini(messages)
)
messages_for_hooks = self._convert_contents_to_dict(formatted_content)
if not self._invoke_before_llm_call_hooks(
messages_for_hooks, from_agent
):
raise ValueError("LLM call blocked by before_llm_call hook")
config = self._prepare_generation_config(
system_instruction, tools, effective_response_model
)
if self.stream:
return self._handle_streaming_completion(
formatted_content,
config,
available_functions,
from_task,
from_agent,
effective_response_model,
)
return self._handle_completion(
formatted_content,
config,
available_functions,
from_task,
from_agent,
effective_response_model,
)
except APIError as e:
error_msg = f"Google Gemini API error: {e.code} - {e.message}"
logging.error(error_msg)
self._emit_call_failed_event(
error=error_msg, from_task=from_task, from_agent=from_agent
)
raise
except Exception as e:
error_msg = f"Google Gemini API call failed: {e!s}"
logging.error(error_msg)
self._emit_call_failed_event(
error=error_msg, from_task=from_task, from_agent=from_agent
)
raise
async def acall(
self,
messages: str | list[LLMMessage],
tools: list[dict[str, Any]] | None = None,
callbacks: list[Any] | None = None,
available_functions: dict[str, Any] | None = None,
from_task: Any | None = None,
from_agent: Any | None = None,
response_model: type[BaseModel] | None = None,
) -> str | Any:
"""Async call to Google Gemini generate content API.
Args:
messages: Input messages for the chat completion
tools: List of tool/function definitions
callbacks: Callback functions (not used as token counts are handled by the response)
available_functions: Available functions for tool calling
from_task: Task that initiated the call
from_agent: Agent that initiated the call
response_model: Response model to use.
Returns:
Chat completion response or tool call result
"""
with llm_call_context():
try:
self._emit_call_started_event(
messages=messages,
tools=tools,
callbacks=callbacks,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
)
self.tools = tools
effective_response_model = response_model or self.response_format
formatted_content, system_instruction = (
self._format_messages_for_gemini(messages)
)
config = self._prepare_generation_config(
system_instruction, tools, effective_response_model
)
if self.stream:
return await self._ahandle_streaming_completion(
formatted_content,
config,
available_functions,
from_task,
from_agent,
effective_response_model,
)
return await self._ahandle_completion(
formatted_content,
config,
available_functions,
from_task,
from_agent,
effective_response_model,
)
except APIError as e:
error_msg = f"Google Gemini API error: {e.code} - {e.message}"
logging.error(error_msg)
self._emit_call_failed_event(
error=error_msg, from_task=from_task, from_agent=from_agent
)
raise
except Exception as e:
error_msg = f"Google Gemini API call failed: {e!s}"
logging.error(error_msg)
self._emit_call_failed_event(
error=error_msg, from_task=from_task, from_agent=from_agent
)
raise
def _prepare_generation_config(
self,
system_instruction: str | None = None,
tools: list[dict[str, Any]] | None = None,
response_model: type[BaseModel] | None = None,
) -> types.GenerateContentConfig:
"""Prepare generation config for Google Gemini API.
Args:
system_instruction: System instruction for the model
tools: Tool definitions
response_model: Pydantic model for structured output
Returns:
GenerateContentConfig object for Gemini API
Note:
Structured output support varies by model version:
- Gemini 1.5 and earlier: Uses response_schema (Pydantic model)
- Gemini 2.0+: Uses response_json_schema (JSON Schema) with propertyOrdering
When both tools AND response_model are present, we add a structured_output
pseudo-tool since Gemini doesn't support tools + response_schema together.
"""
self.tools = tools
config_params: dict[str, Any] = {}
# Add system instruction if present
if system_instruction:
# Convert system instruction to Content format
system_content = types.Content(
role="user", parts=[types.Part.from_text(text=system_instruction)]
)
config_params["system_instruction"] = system_content
# Add generation config parameters
if self.temperature is not None:
config_params["temperature"] = self.temperature
if self.top_p is not None:
config_params["top_p"] = self.top_p
if self.top_k is not None:
config_params["top_k"] = self.top_k
if self.max_output_tokens is not None:
config_params["max_output_tokens"] = self.max_output_tokens
if self.stop_sequences:
config_params["stop_sequences"] = self.stop_sequences
if tools and self.supports_tools:
gemini_tools = self._convert_tools_for_interference(tools)
if response_model:
schema_output = generate_model_description(response_model)
schema = schema_output.get("json_schema", {}).get("schema", {})
if self.is_gemini_2_0:
schema = self._add_property_ordering(schema)
structured_output_tool = types.Tool(
function_declarations=[
types.FunctionDeclaration(
name=STRUCTURED_OUTPUT_TOOL_NAME,
description=(
"Use this tool to provide your final structured response. "
"Call this tool when you have gathered all necessary information "
"and are ready to provide the final answer in the required format."
),
parameters_json_schema=schema,
)
]
)
gemini_tools.append(structured_output_tool)
config_params["tools"] = gemini_tools
elif response_model:
config_params["response_mime_type"] = "application/json"
schema_output = generate_model_description(response_model)
schema = schema_output.get("json_schema", {}).get("schema", {})
if self.is_gemini_2_0:
schema = self._add_property_ordering(schema)
config_params["response_json_schema"] = schema
else:
config_params["response_schema"] = response_model
if self.safety_settings:
config_params["safety_settings"] = self.safety_settings
return types.GenerateContentConfig(**config_params)
def _convert_tools_for_interference( # type: ignore[override]
self, tools: list[dict[str, Any]]
) -> list[types.Tool]:
"""Convert CrewAI tool format to Gemini function declaration format."""
gemini_tools = []
from crewai.llms.providers.utils.common import safe_tool_conversion
for tool in tools:
name, description, parameters = safe_tool_conversion(tool, "Gemini")
function_declaration = types.FunctionDeclaration(
name=name,
description=description,
parameters_json_schema=parameters if parameters else None,
)
gemini_tool = types.Tool(function_declarations=[function_declaration])
gemini_tools.append(gemini_tool)
return gemini_tools
def _format_messages_for_gemini(
self, messages: str | list[LLMMessage]
) -> tuple[list[types.Content], str | None]:
"""Format messages for Gemini API.
Gemini has specific requirements:
- System messages are separate system_instruction
- Content is organized as Content objects with Parts
- Roles are 'user' and 'model' (not 'assistant')
Args:
messages: Input messages
Returns:
Tuple of (formatted_contents, system_instruction)
"""
# Use base class formatting first
base_formatted = super()._format_messages(messages)
contents: list[types.Content] = []
system_instruction: str | None = None
for message in base_formatted:
role = message["role"]
content = message["content"]
# Build parts list from content
parts: list[types.Part] = []
if isinstance(content, list):
for item in content:
if isinstance(item, dict):
if "text" in item:
parts.append(types.Part.from_text(text=str(item["text"])))
elif "inlineData" in item:
inline = item["inlineData"]
parts.append(
types.Part.from_bytes(
data=base64.b64decode(inline["data"]),
mime_type=inline["mimeType"],
)
)
else:
parts.append(types.Part.from_text(text=str(item)))
else:
parts.append(types.Part.from_text(text=str(content) if content else ""))
text_content: str = " ".join(p.text for p in parts if p.text is not None)
if role == "system":
# Extract system instruction - Gemini handles it separately
if system_instruction:
system_instruction += f"\n\n{text_content}"
else:
system_instruction = text_content
elif role == "tool":
tool_call_id = message.get("tool_call_id")
if not tool_call_id:
raise ValueError("Tool message missing required tool_call_id")
tool_name = message.get("name", "")
response_data: dict[str, Any]
try:
parsed = json.loads(text_content) if text_content else {}
if isinstance(parsed, dict):
response_data = parsed
else:
response_data = {"result": parsed}
except (json.JSONDecodeError, TypeError):
response_data = {"result": text_content}
function_response_part = types.Part.from_function_response(
name=tool_name, response=response_data
)
contents.append(
types.Content(role="user", parts=[function_response_part])
)
elif role == "assistant" and message.get("tool_calls"):
raw_parts: list[Any] | None = message.get("raw_tool_call_parts")
if raw_parts and all(isinstance(p, types.Part) for p in raw_parts):
tool_parts: list[types.Part] = list(raw_parts)
if text_content:
tool_parts.insert(0, types.Part.from_text(text=text_content))
else:
tool_parts = []
if text_content:
tool_parts.append(types.Part.from_text(text=text_content))
tool_calls: list[dict[str, Any]] = message.get("tool_calls") or []
for tool_call in tool_calls:
func: dict[str, Any] = tool_call.get("function") or {}
func_name: str = str(func.get("name") or "")
func_args_raw: str | dict[str, Any] = (
func.get("arguments") or {}
)
func_args: dict[str, Any]
if isinstance(func_args_raw, str):
try:
func_args = (
json.loads(func_args_raw) if func_args_raw else {}
)
except (json.JSONDecodeError, TypeError):
func_args = {}
else:
func_args = func_args_raw
tool_parts.append(
types.Part.from_function_call(
name=func_name, args=func_args
)
)
contents.append(types.Content(role="model", parts=tool_parts))
else:
# Convert role for Gemini (assistant -> model)
gemini_role = "model" if role == "assistant" else "user"
# Create Content object
gemini_content = types.Content(role=gemini_role, parts=parts)
contents.append(gemini_content)
return contents, system_instruction
def _validate_and_emit_structured_output(
self,
content: str,
response_model: type[BaseModel],
messages_for_event: list[LLMMessage],
from_task: Any | None = None,
from_agent: Any | None = None,
) -> BaseModel:
"""Validate content against response model and emit completion event.
Args:
content: Response content to validate
response_model: Pydantic model for validation
messages_for_event: Messages to include in event
from_task: Task that initiated the call
from_agent: Agent that initiated the call
Returns:
Validated Pydantic model instance
Raises:
ValueError: If validation fails
"""
try:
structured_data = response_model.model_validate_json(content)
self._emit_call_completed_event(
response=structured_data.model_dump_json(),
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=messages_for_event,
)
return structured_data
except Exception as e:
error_msg = f"Failed to validate structured output with model {response_model.__name__}: {e}"
logging.error(error_msg)
raise ValueError(error_msg) from e
def _finalize_completion_response(
self,
content: str,
contents: list[types.Content],
response_model: type[BaseModel] | None = None,
from_task: Any | None = None,
from_agent: Any | None = None,
) -> str | BaseModel:
"""Finalize completion response with validation and event emission.
Args:
content: The response content
contents: Original contents for event conversion
response_model: Pydantic model for structured output validation
from_task: Task that initiated the call
from_agent: Agent that initiated the call
Returns:
Final response content after processing (str or Pydantic model if response_model provided)
"""
messages_for_event = self._convert_contents_to_dict(contents)
# Handle structured output validation
if response_model:
return self._validate_and_emit_structured_output(
content=content,
response_model=response_model,
messages_for_event=messages_for_event,
from_task=from_task,
from_agent=from_agent,
)
self._emit_call_completed_event(
response=content,
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=messages_for_event,
)
return self._invoke_after_llm_call_hooks(
messages_for_event, content, from_agent
)
def _handle_structured_output_tool_call(
self,
structured_data: dict[str, Any],
response_model: type[BaseModel],
contents: list[types.Content],
from_task: Any | None = None,
from_agent: Any | None = None,
) -> BaseModel:
"""Validate and emit event for structured_output tool call.
Args:
structured_data: The arguments passed to the structured_output tool
response_model: Pydantic model to validate against
contents: Original contents for event conversion
from_task: Task that initiated the call
from_agent: Agent that initiated the call
Returns:
Validated Pydantic model instance
Raises:
ValueError: If validation fails
"""
try:
validated_data = response_model.model_validate(structured_data)
self._emit_call_completed_event(
response=validated_data.model_dump_json(),
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=self._convert_contents_to_dict(contents),
)
return validated_data
except Exception as e:
error_msg = (
f"Failed to validate {STRUCTURED_OUTPUT_TOOL_NAME} tool response "
f"with model {response_model.__name__}: {e}"
)
logging.error(error_msg)
raise ValueError(error_msg) from e
def _process_response_with_tools(
self,
response: GenerateContentResponse,
contents: list[types.Content],
available_functions: dict[str, Any] | None = None,
from_task: Any | None = None,
from_agent: Any | None = None,
response_model: type[BaseModel] | None = None,
) -> str | Any:
"""Process response, execute function calls, and finalize completion.
Args:
response: The completion response
contents: Original contents for event conversion
available_functions: Available functions for function calling
from_task: Task that initiated the call
from_agent: Agent that initiated the call
response_model: Pydantic model for structured output validation
Returns:
Final response content or function call result
"""
if response.candidates and (self.tools or available_functions):
candidate = response.candidates[0]
if candidate.content and candidate.content.parts:
# Collect function call parts
function_call_parts = [
part for part in candidate.content.parts if part.function_call
]
# Check for structured_output pseudo-tool call (used when tools + response_model)
if response_model and function_call_parts:
for part in function_call_parts:
if (
part.function_call
and part.function_call.name == STRUCTURED_OUTPUT_TOOL_NAME
):
structured_data = (
dict(part.function_call.args)
if part.function_call.args
else {}
)
return self._handle_structured_output_tool_call(
structured_data=structured_data,
response_model=response_model,
contents=contents,
from_task=from_task,
from_agent=from_agent,
)
# Filter out structured_output from function calls returned to executor
non_structured_output_parts = [
part
for part in function_call_parts
if not (
part.function_call
and part.function_call.name == STRUCTURED_OUTPUT_TOOL_NAME
)
]
# If there are function calls but no available_functions,
# return them for the executor to handle (like OpenAI/Anthropic)
if non_structured_output_parts and not available_functions:
self._emit_call_completed_event(
response=non_structured_output_parts,
call_type=LLMCallType.TOOL_CALL,
from_task=from_task,
from_agent=from_agent,
messages=self._convert_contents_to_dict(contents),
)
return non_structured_output_parts
# Otherwise execute the tools internally
for part in candidate.content.parts:
if part.function_call:
function_name = part.function_call.name
if function_name is None:
continue
# Skip structured_output - it's handled above
if function_name == STRUCTURED_OUTPUT_TOOL_NAME:
continue
function_args = (
dict(part.function_call.args)
if part.function_call.args
else {}
)
result = self._handle_tool_execution(
function_name=function_name,
function_args=function_args,
available_functions=available_functions or {},
from_task=from_task,
from_agent=from_agent,
)
if result is not None:
return result
content = self._extract_text_from_response(response)
effective_response_model = None if self.tools else response_model
if not response_model:
content = self._apply_stop_words(content)
return self._finalize_completion_response(
content=content,
contents=contents,
response_model=effective_response_model,
from_task=from_task,
from_agent=from_agent,
)
def _process_stream_chunk(
self,
chunk: GenerateContentResponse,
full_response: str,
function_calls: dict[int, dict[str, Any]],
usage_data: dict[str, int],
from_task: Any | None = None,
from_agent: Any | None = None,
) -> tuple[str, dict[int, dict[str, Any]], dict[str, int]]:
"""Process a single streaming chunk.
Args:
chunk: The streaming chunk response
full_response: Accumulated response text
function_calls: Accumulated function calls keyed by sequential index
usage_data: Accumulated usage data
from_task: Task that initiated the call
from_agent: Agent that initiated the call
Returns:
Tuple of (updated full_response, updated function_calls, updated usage_data)
"""
response_id = chunk.response_id if hasattr(chunk, "response_id") else None
if chunk.usage_metadata:
usage_data = self._extract_token_usage(chunk)
if chunk.text:
full_response += chunk.text
self._emit_stream_chunk_event(
chunk=chunk.text,
from_task=from_task,
from_agent=from_agent,
response_id=response_id,
)
if chunk.candidates:
candidate = chunk.candidates[0]
if candidate.content and candidate.content.parts:
for part in candidate.content.parts:
if part.function_call:
call_index = len(function_calls)
call_id = f"call_{call_index}"
args_dict = (
dict(part.function_call.args)
if part.function_call.args
else {}
)
args_json = json.dumps(args_dict)
function_calls[call_index] = {
"id": call_id,
"name": part.function_call.name,
"args": args_dict,
}
self._emit_stream_chunk_event(
chunk=args_json,
from_task=from_task,
from_agent=from_agent,
tool_call={
"id": call_id,
"function": {
"name": part.function_call.name or "",
"arguments": args_json,
},
"type": "function",
"index": call_index,
},
call_type=LLMCallType.TOOL_CALL,
response_id=response_id,
)
return full_response, function_calls, usage_data
def _finalize_streaming_response(
self,
full_response: str,
function_calls: dict[int, dict[str, Any]],
usage_data: dict[str, int],
contents: list[types.Content],
available_functions: dict[str, Any] | None = None,
from_task: Any | None = None,
from_agent: Any | None = None,
response_model: type[BaseModel] | None = None,
) -> str | BaseModel | list[dict[str, Any]]:
"""Finalize streaming response with usage tracking, function execution, and events.
Args:
full_response: The complete streamed response content
function_calls: Dictionary of function calls accumulated during streaming
usage_data: Token usage data from the stream
contents: Original contents for event conversion
available_functions: Available functions for function calling
from_task: Task that initiated the call
from_agent: Agent that initiated the call
response_model: Pydantic model for structured output validation
Returns:
Final response content after processing
"""
self._track_token_usage_internal(usage_data)
if response_model and function_calls:
for call_data in function_calls.values():
if call_data.get("name") == STRUCTURED_OUTPUT_TOOL_NAME:
structured_data = call_data.get("args", {})
return self._handle_structured_output_tool_call(
structured_data=structured_data,
response_model=response_model,
contents=contents,
from_task=from_task,
from_agent=from_agent,
)
non_structured_output_calls = {
idx: call_data
for idx, call_data in function_calls.items()
if call_data.get("name") != STRUCTURED_OUTPUT_TOOL_NAME
}
# If there are function calls but no available_functions,
# return them for the executor to handle
if non_structured_output_calls and not available_functions:
formatted_function_calls = [
{
"id": call_data["id"],
"function": {
"name": call_data["name"],
"arguments": json.dumps(call_data["args"]),
},
"type": "function",
}
for call_data in non_structured_output_calls.values()
]
self._emit_call_completed_event(
response=formatted_function_calls,
call_type=LLMCallType.TOOL_CALL,
from_task=from_task,
from_agent=from_agent,
messages=self._convert_contents_to_dict(contents),
)
return formatted_function_calls
# Handle completed function calls (excluding structured_output)
if non_structured_output_calls and available_functions:
for call_data in non_structured_output_calls.values():
function_name = call_data["name"]
function_args = call_data["args"]
# Skip if function_name is None
if not isinstance(function_name, str):
continue
# Ensure function_args is a dict
if not isinstance(function_args, dict):
function_args = {}
# Execute tool
result = self._handle_tool_execution(
function_name=function_name,
function_args=function_args,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
)
if result is not None:
return result
# When tools are present, structured output should come via the structured_output
# pseudo-tool, not via direct text response. If we reach here with tools present,
# the LLM chose to return plain text instead of calling structured_output.
effective_response_model = None if self.tools else response_model
return self._finalize_completion_response(
content=full_response,
contents=contents,
response_model=effective_response_model,
from_task=from_task,
from_agent=from_agent,
)
def _handle_completion(
self,
contents: list[types.Content],
config: types.GenerateContentConfig,
available_functions: dict[str, Any] | None = None,
from_task: Any | None = None,
from_agent: Any | None = None,
response_model: type[BaseModel] | None = None,
) -> str | Any:
"""Handle non-streaming content generation."""
try:
# The API accepts list[Content] but mypy is overly strict about variance
contents_for_api: Any = contents
response = self.client.models.generate_content(
model=self.model,
contents=contents_for_api,
config=config,
)
usage = self._extract_token_usage(response)
except Exception as e:
if is_context_length_exceeded(e):
logging.error(f"Context window exceeded: {e}")
raise LLMContextLengthExceededError(str(e)) from e
raise e from e
self._track_token_usage_internal(usage)
return self._process_response_with_tools(
response=response,
contents=contents,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
response_model=response_model,
)
def _handle_streaming_completion(
self,
contents: list[types.Content],
config: types.GenerateContentConfig,
available_functions: dict[str, Any] | None = None,
from_task: Any | None = None,
from_agent: Any | None = None,
response_model: type[BaseModel] | None = None,
) -> str | BaseModel | list[dict[str, Any]] | Any:
"""Handle streaming content generation."""
full_response = ""
function_calls: dict[int, dict[str, Any]] = {}
usage_data = {"total_tokens": 0}
# The API accepts list[Content] but mypy is overly strict about variance
contents_for_api: Any = contents
for chunk in self.client.models.generate_content_stream(
model=self.model,
contents=contents_for_api,
config=config,
):
full_response, function_calls, usage_data = self._process_stream_chunk(
chunk=chunk,
full_response=full_response,
function_calls=function_calls,
usage_data=usage_data,
from_task=from_task,
from_agent=from_agent,
)
return self._finalize_streaming_response(
full_response=full_response,
function_calls=function_calls,
usage_data=usage_data,
contents=contents,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
response_model=response_model,
)
async def _ahandle_completion(
self,
contents: list[types.Content],
config: types.GenerateContentConfig,
available_functions: dict[str, Any] | None = None,
from_task: Any | None = None,
from_agent: Any | None = None,
response_model: type[BaseModel] | None = None,
) -> str | Any:
"""Handle async non-streaming content generation."""
try:
# The API accepts list[Content] but mypy is overly strict about variance
contents_for_api: Any = contents
response = await self.client.aio.models.generate_content(
model=self.model,
contents=contents_for_api,
config=config,
)
usage = self._extract_token_usage(response)
except Exception as e:
if is_context_length_exceeded(e):
logging.error(f"Context window exceeded: {e}")
raise LLMContextLengthExceededError(str(e)) from e
raise e from e
self._track_token_usage_internal(usage)
return self._process_response_with_tools(
response=response,
contents=contents,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
response_model=response_model,
)
async def _ahandle_streaming_completion(
self,
contents: list[types.Content],
config: types.GenerateContentConfig,
available_functions: dict[str, Any] | None = None,
from_task: Any | None = None,
from_agent: Any | None = None,
response_model: type[BaseModel] | None = None,
) -> str | Any:
"""Handle async streaming content generation."""
full_response = ""
function_calls: dict[int, dict[str, Any]] = {}
usage_data = {"total_tokens": 0}
# The API accepts list[Content] but mypy is overly strict about variance
contents_for_api: Any = contents
stream = await self.client.aio.models.generate_content_stream(
model=self.model,
contents=contents_for_api,
config=config,
)
async for chunk in stream:
full_response, function_calls, usage_data = self._process_stream_chunk(
chunk=chunk,
full_response=full_response,
function_calls=function_calls,
usage_data=usage_data,
from_task=from_task,
from_agent=from_agent,
)
return self._finalize_streaming_response(
full_response=full_response,
function_calls=function_calls,
usage_data=usage_data,
contents=contents,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
response_model=response_model,
)
def supports_function_calling(self) -> bool:
"""Check if the model supports function calling."""
return self.supports_tools
def supports_stop_words(self) -> bool:
"""Check if the model supports stop words."""
return True
def get_context_window_size(self) -> int:
"""Get the context window size for the model."""
from crewai.llm import CONTEXT_WINDOW_USAGE_RATIO, LLM_CONTEXT_WINDOW_SIZES
min_context = 1024
max_context = 2097152
for key, value in LLM_CONTEXT_WINDOW_SIZES.items():
if value < min_context or value > max_context:
raise ValueError(
f"Context window for {key} must be between {min_context} and {max_context}"
)
context_windows = {
"gemini-3-pro-preview": 1048576, # 1M tokens
"gemini-2.0-flash": 1048576, # 1M tokens
"gemini-2.0-flash-thinking": 32768,
"gemini-2.0-flash-lite": 1048576,
"gemini-2.5-flash": 1048576,
"gemini-2.5-pro": 1048576,
"gemini-1.5-pro": 2097152, # 2M tokens
"gemini-1.5-flash": 1048576,
"gemini-1.5-flash-8b": 1048576,
"gemini-1.0-pro": 32768,
"gemma-3-1b": 32000,
"gemma-3-4b": 128000,
"gemma-3-12b": 128000,
"gemma-3-27b": 128000,
}
# Find the best match for the model name
for model_prefix, size in context_windows.items():
if self.model.startswith(model_prefix):
return int(size * CONTEXT_WINDOW_USAGE_RATIO)
# Default context window size for Gemini models
return int(1048576 * CONTEXT_WINDOW_USAGE_RATIO) # 1M tokens
@staticmethod
def _extract_token_usage(response: GenerateContentResponse) -> dict[str, Any]:
"""Extract token usage from Gemini response."""
if response.usage_metadata:
usage = response.usage_metadata
cached_tokens = getattr(usage, "cached_content_token_count", 0) or 0
return {
"prompt_token_count": getattr(usage, "prompt_token_count", 0),
"candidates_token_count": getattr(usage, "candidates_token_count", 0),
"total_token_count": getattr(usage, "total_token_count", 0),
"total_tokens": getattr(usage, "total_token_count", 0),
"cached_prompt_tokens": cached_tokens,
}
return {"total_tokens": 0}
@staticmethod
def _extract_text_from_response(response: GenerateContentResponse) -> str:
"""Extract text content from Gemini response without triggering warnings.
This method directly accesses the response parts to extract text content,
avoiding the warning that occurs when using response.text on responses
containing non-text parts (e.g., 'thought_signature' from thinking models).
Args:
response: The Gemini API response
Returns:
Concatenated text content from all text parts
"""
if not response.candidates:
return ""
candidate = response.candidates[0]
if not candidate.content or not candidate.content.parts:
return ""
text_parts = [
part.text
for part in candidate.content.parts
if hasattr(part, "text") and part.text
]
return "".join(text_parts)
@staticmethod
def _add_property_ordering(schema: dict[str, Any]) -> dict[str, Any]:
"""Add propertyOrdering to JSON schema for Gemini 2.0 compatibility.
Gemini 2.0 models require an explicit propertyOrdering list to define
the preferred structure of JSON objects. This recursively adds
propertyOrdering to all objects in the schema.
Args:
schema: JSON schema dictionary.
Returns:
Modified schema with propertyOrdering added to all objects.
"""
if isinstance(schema, dict):
if schema.get("type") == "object" and "properties" in schema:
properties = schema["properties"]
if properties and "propertyOrdering" not in schema:
schema["propertyOrdering"] = list(properties.keys())
for value in schema.values():
if isinstance(value, dict):
GeminiCompletion._add_property_ordering(value)
elif isinstance(value, list):
for item in value:
if isinstance(item, dict):
GeminiCompletion._add_property_ordering(item)
return schema
@staticmethod
def _convert_contents_to_dict(
contents: list[types.Content],
) -> list[LLMMessage]:
"""Convert contents to dict format."""
result: list[LLMMessage] = []
for content_obj in contents:
role = content_obj.role
if role == "model":
role = "assistant"
elif role is None:
role = "user"
parts = content_obj.parts or []
content = " ".join(
part.text for part in parts if hasattr(part, "text") and part.text
)
result.append(
LLMMessage(
role=cast(Literal["user", "assistant", "system"], role),
content=content,
)
)
return result
def supports_multimodal(self) -> bool:
"""Check if the model supports multimodal inputs.
Gemini models support images, audio, video, and PDFs.
Returns:
True if the model supports multimodal inputs.
"""
return True
def format_text_content(self, text: str) -> dict[str, Any]:
"""Format text as a Gemini content block.
Gemini uses {"text": "..."} format instead of {"type": "text", "text": "..."}.
Args:
text: The text content to format.
Returns:
A content block in Gemini's expected format.
"""
return {"text": text}
def get_file_uploader(self) -> Any:
"""Get a Gemini file uploader using this LLM's client.
Returns:
GeminiFileUploader instance with pre-configured client.
"""
try:
from crewai_files.uploaders.gemini import GeminiFileUploader
return GeminiFileUploader(client=self.client)
except ImportError:
return None
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/llms/providers/gemini/completion.py",
"license": "MIT License",
"lines": 1230,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai/src/crewai/llms/providers/openai/completion.py | from __future__ import annotations
from collections.abc import AsyncIterator
from dataclasses import dataclass, field
import json
import logging
import os
from typing import TYPE_CHECKING, Any, ClassVar, Literal, TypedDict
import httpx
from openai import APIConnectionError, AsyncOpenAI, NotFoundError, OpenAI, Stream
from openai.lib.streaming.chat import ChatCompletionStream
from openai.types.chat import ChatCompletion, ChatCompletionChunk
from openai.types.chat.chat_completion import Choice
from openai.types.chat.chat_completion_chunk import ChoiceDelta
from openai.types.responses import Response
from pydantic import BaseModel
from crewai.events.types.llm_events import LLMCallType
from crewai.llms.base_llm import BaseLLM, llm_call_context
from crewai.llms.hooks.transport import AsyncHTTPTransport, HTTPTransport
from crewai.utilities.agent_utils import is_context_length_exceeded
from crewai.utilities.exceptions.context_window_exceeding_exception import (
LLMContextLengthExceededError,
)
from crewai.utilities.pydantic_schema_utils import generate_model_description
from crewai.utilities.types import LLMMessage
if TYPE_CHECKING:
from crewai.agent.core import Agent
from crewai.llms.hooks.base import BaseInterceptor
from crewai.task import Task
from crewai.tools.base_tool import BaseTool
class WebSearchResult(TypedDict, total=False):
"""Result from web search built-in tool."""
id: str | None
status: str | None
type: str
class FileSearchResultItem(TypedDict, total=False):
"""Individual file search result."""
file_id: str | None
filename: str | None
text: str | None
score: float | None
attributes: dict[str, str | float | bool] | None
class FileSearchResult(TypedDict, total=False):
"""Result from file search built-in tool."""
id: str | None
status: str | None
type: str
queries: list[str]
results: list[FileSearchResultItem]
class CodeInterpreterLogResult(TypedDict):
"""Log output from code interpreter."""
type: str
logs: str
class CodeInterpreterFileResult(TypedDict):
"""File output from code interpreter."""
type: str
files: list[dict[str, Any]]
class CodeInterpreterResult(TypedDict, total=False):
"""Result from code interpreter built-in tool."""
id: str | None
status: str | None
type: str
code: str | None
container_id: str | None
results: list[CodeInterpreterLogResult | CodeInterpreterFileResult]
class ComputerUseResult(TypedDict, total=False):
"""Result from computer use built-in tool."""
id: str | None
status: str | None
type: str
call_id: str | None
action: dict[str, Any]
pending_safety_checks: list[dict[str, Any]]
class ReasoningSummary(TypedDict, total=False):
"""Summary from model reasoning."""
id: str | None
status: str | None
type: str
summary: list[dict[str, Any]]
encrypted_content: str | None
@dataclass
class ResponsesAPIResult:
"""Result from OpenAI Responses API including text and tool outputs.
Attributes:
text: The text content from the response.
web_search_results: Results from web_search built-in tool calls.
file_search_results: Results from file_search built-in tool calls.
code_interpreter_results: Results from code_interpreter built-in tool calls.
computer_use_results: Results from computer_use built-in tool calls.
reasoning_summaries: Reasoning/thinking summaries from the model.
function_calls: Custom function tool calls.
response_id: The response ID for multi-turn conversations.
"""
text: str = ""
web_search_results: list[WebSearchResult] = field(default_factory=list)
file_search_results: list[FileSearchResult] = field(default_factory=list)
code_interpreter_results: list[CodeInterpreterResult] = field(default_factory=list)
computer_use_results: list[ComputerUseResult] = field(default_factory=list)
reasoning_summaries: list[ReasoningSummary] = field(default_factory=list)
function_calls: list[dict[str, Any]] = field(default_factory=list)
response_id: str | None = None
def has_tool_outputs(self) -> bool:
"""Check if there are any built-in tool outputs."""
return bool(
self.web_search_results
or self.file_search_results
or self.code_interpreter_results
or self.computer_use_results
)
def has_reasoning(self) -> bool:
"""Check if there are reasoning summaries."""
return bool(self.reasoning_summaries)
class OpenAICompletion(BaseLLM):
"""OpenAI native completion implementation.
This class provides direct integration with the OpenAI Python SDK,
supporting both Chat Completions API and Responses API.
The Responses API is OpenAI's newer API primitive with built-in tools
(web search, file search, code interpreter), stateful conversations,
and improved reasoning model support.
Args:
api: Which OpenAI API to use - "completions" (default) or "responses".
instructions: System-level instructions (Responses API only).
store: Whether to store responses for multi-turn (Responses API only).
previous_response_id: ID of previous response for multi-turn (Responses API only).
include: Additional data to include in response (Responses API only).
builtin_tools: List of OpenAI built-in tools to enable (Responses API only).
Supported: "web_search", "file_search", "code_interpreter", "computer_use".
parse_tool_outputs: Whether to return structured ResponsesAPIResult with
parsed built-in tool outputs instead of just text (Responses API only).
auto_chain: Automatically track and use response IDs for multi-turn
conversations (Responses API only). When True, each response ID is saved
and used as previous_response_id in subsequent calls.
auto_chain_reasoning: Automatically track and pass encrypted reasoning items
for ZDR (Zero Data Retention) compliance (Responses API only). When True,
adds "reasoning.encrypted_content" to include, captures reasoning items
from responses, and passes them back in subsequent calls to preserve
chain-of-thought without storing data on OpenAI servers.
"""
BUILTIN_TOOL_TYPES: ClassVar[dict[str, str]] = {
"web_search": "web_search_preview",
"file_search": "file_search",
"code_interpreter": "code_interpreter",
"computer_use": "computer_use_preview",
}
def __init__(
self,
model: str = "gpt-4o",
api_key: str | None = None,
base_url: str | None = None,
organization: str | None = None,
project: str | None = None,
timeout: float | None = None,
max_retries: int = 2,
default_headers: dict[str, str] | None = None,
default_query: dict[str, Any] | None = None,
client_params: dict[str, Any] | None = None,
temperature: float | None = None,
top_p: float | None = None,
frequency_penalty: float | None = None,
presence_penalty: float | None = None,
max_tokens: int | None = None,
max_completion_tokens: int | None = None,
seed: int | None = None,
stream: bool = False,
response_format: dict[str, Any] | type[BaseModel] | None = None,
logprobs: bool | None = None,
top_logprobs: int | None = None,
reasoning_effort: str | None = None,
provider: str | None = None,
interceptor: BaseInterceptor[httpx.Request, httpx.Response] | None = None,
api: Literal["completions", "responses"] = "completions",
instructions: str | None = None,
store: bool | None = None,
previous_response_id: str | None = None,
include: list[str] | None = None,
builtin_tools: list[str] | None = None,
parse_tool_outputs: bool = False,
auto_chain: bool = False,
auto_chain_reasoning: bool = False,
**kwargs: Any,
) -> None:
"""Initialize OpenAI completion client."""
if provider is None:
provider = kwargs.pop("provider", "openai")
self.interceptor = interceptor
# Client configuration attributes
self.organization = organization
self.project = project
self.max_retries = max_retries
self.default_headers = default_headers
self.default_query = default_query
self.client_params = client_params
self.timeout = timeout
self.base_url = base_url
self.api_base = kwargs.pop("api_base", None)
super().__init__(
model=model,
temperature=temperature,
api_key=api_key or os.getenv("OPENAI_API_KEY"),
base_url=base_url,
timeout=timeout,
provider=provider,
**kwargs,
)
client_config = self._get_client_params()
if self.interceptor:
transport = HTTPTransport(interceptor=self.interceptor)
http_client = httpx.Client(transport=transport)
client_config["http_client"] = http_client
self.client = OpenAI(**client_config)
async_client_config = self._get_client_params()
if self.interceptor:
async_transport = AsyncHTTPTransport(interceptor=self.interceptor)
async_http_client = httpx.AsyncClient(transport=async_transport)
async_client_config["http_client"] = async_http_client
self.async_client = AsyncOpenAI(**async_client_config)
# Completion parameters
self.top_p = top_p
self.frequency_penalty = frequency_penalty
self.presence_penalty = presence_penalty
self.max_tokens = max_tokens
self.max_completion_tokens = max_completion_tokens
self.seed = seed
self.stream = stream
self.response_format = response_format
self.logprobs = logprobs
self.top_logprobs = top_logprobs
self.reasoning_effort = reasoning_effort
self.is_o1_model = "o1" in model.lower()
self.is_gpt4_model = "gpt-4" in model.lower()
# API selection and Responses API parameters
self.api = api
self.instructions = instructions
self.store = store
self.previous_response_id = previous_response_id
self.include = include
self.builtin_tools = builtin_tools
self.parse_tool_outputs = parse_tool_outputs
self.auto_chain = auto_chain
self.auto_chain_reasoning = auto_chain_reasoning
self._last_response_id: str | None = None
self._last_reasoning_items: list[Any] | None = None
@property
def last_response_id(self) -> str | None:
"""Get the last response ID from auto-chaining.
Returns:
The response ID from the most recent Responses API call,
or None if no calls have been made or auto_chain is disabled.
"""
return self._last_response_id
def reset_chain(self) -> None:
"""Reset the auto-chain state to start a new conversation.
Clears the stored response ID so the next call starts fresh
without linking to previous responses.
"""
self._last_response_id = None
@property
def last_reasoning_items(self) -> list[Any] | None:
"""Get the last reasoning items from auto-chain reasoning.
Returns:
The reasoning items from the most recent Responses API call
containing encrypted content, or None if no calls have been made
or auto_chain_reasoning is disabled.
"""
return self._last_reasoning_items
def reset_reasoning_chain(self) -> None:
"""Reset the reasoning chain state to start fresh.
Clears the stored reasoning items so the next call starts without
preserving previous chain-of-thought context. Useful when starting
a new reasoning task that shouldn't reference previous reasoning.
"""
self._last_reasoning_items = None
def _get_client_params(self) -> dict[str, Any]:
"""Get OpenAI client parameters."""
if self.api_key is None:
self.api_key = os.getenv("OPENAI_API_KEY")
if self.api_key is None:
raise ValueError("OPENAI_API_KEY is required")
base_params = {
"api_key": self.api_key,
"organization": self.organization,
"project": self.project,
"base_url": self.base_url
or self.api_base
or os.getenv("OPENAI_BASE_URL")
or None,
"timeout": self.timeout,
"max_retries": self.max_retries,
"default_headers": self.default_headers,
"default_query": self.default_query,
}
client_params = {k: v for k, v in base_params.items() if v is not None}
if self.client_params:
client_params.update(self.client_params)
return client_params
def call(
self,
messages: str | list[LLMMessage],
tools: list[dict[str, BaseTool]] | None = None,
callbacks: list[Any] | None = None,
available_functions: dict[str, Any] | None = None,
from_task: Task | None = None,
from_agent: Agent | None = None,
response_model: type[BaseModel] | None = None,
) -> str | Any:
"""Call OpenAI API (Chat Completions or Responses based on api setting).
Args:
messages: Input messages for the completion.
tools: List of tool/function definitions.
callbacks: Callback functions (not used in native implementation).
available_functions: Available functions for tool calling.
from_task: Task that initiated the call.
from_agent: Agent that initiated the call.
response_model: Response model for structured output.
Returns:
Completion response or tool call result.
"""
with llm_call_context():
try:
self._emit_call_started_event(
messages=messages,
tools=tools,
callbacks=callbacks,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
)
formatted_messages = self._format_messages(messages)
if not self._invoke_before_llm_call_hooks(
formatted_messages, from_agent
):
raise ValueError("LLM call blocked by before_llm_call hook")
if self.api == "responses":
return self._call_responses(
messages=formatted_messages,
tools=tools,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
response_model=response_model,
)
return self._call_completions(
messages=formatted_messages,
tools=tools,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
response_model=response_model,
)
except Exception as e:
error_msg = f"OpenAI API call failed: {e!s}"
logging.error(error_msg)
self._emit_call_failed_event(
error=error_msg, from_task=from_task, from_agent=from_agent
)
raise
def _call_completions(
self,
messages: list[LLMMessage],
tools: list[dict[str, BaseTool]] | None = None,
available_functions: dict[str, Any] | None = None,
from_task: Task | None = None,
from_agent: Agent | None = None,
response_model: type[BaseModel] | None = None,
) -> str | Any:
"""Call OpenAI Chat Completions API."""
completion_params = self._prepare_completion_params(
messages=messages, tools=tools
)
if self.stream:
return self._handle_streaming_completion(
params=completion_params,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
response_model=response_model,
)
return self._handle_completion(
params=completion_params,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
response_model=response_model,
)
async def acall(
self,
messages: str | list[LLMMessage],
tools: list[dict[str, BaseTool]] | None = None,
callbacks: list[Any] | None = None,
available_functions: dict[str, Any] | None = None,
from_task: Task | None = None,
from_agent: Agent | None = None,
response_model: type[BaseModel] | None = None,
) -> str | Any:
"""Async call to OpenAI API (Chat Completions or Responses).
Args:
messages: Input messages for the completion.
tools: List of tool/function definitions.
callbacks: Callback functions (not used in native implementation).
available_functions: Available functions for tool calling.
from_task: Task that initiated the call.
from_agent: Agent that initiated the call.
response_model: Response model for structured output.
Returns:
Completion response or tool call result.
"""
with llm_call_context():
try:
self._emit_call_started_event(
messages=messages,
tools=tools,
callbacks=callbacks,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
)
formatted_messages = self._format_messages(messages)
if self.api == "responses":
return await self._acall_responses(
messages=formatted_messages,
tools=tools,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
response_model=response_model,
)
return await self._acall_completions(
messages=formatted_messages,
tools=tools,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
response_model=response_model,
)
except Exception as e:
error_msg = f"OpenAI API call failed: {e!s}"
logging.error(error_msg)
self._emit_call_failed_event(
error=error_msg, from_task=from_task, from_agent=from_agent
)
raise
async def _acall_completions(
self,
messages: list[LLMMessage],
tools: list[dict[str, BaseTool]] | None = None,
available_functions: dict[str, Any] | None = None,
from_task: Task | None = None,
from_agent: Agent | None = None,
response_model: type[BaseModel] | None = None,
) -> str | Any:
"""Async call to OpenAI Chat Completions API."""
completion_params = self._prepare_completion_params(
messages=messages, tools=tools
)
if self.stream:
return await self._ahandle_streaming_completion(
params=completion_params,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
response_model=response_model,
)
return await self._ahandle_completion(
params=completion_params,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
response_model=response_model,
)
def _call_responses(
self,
messages: list[LLMMessage],
tools: list[dict[str, BaseTool]] | None = None,
available_functions: dict[str, Any] | None = None,
from_task: Task | None = None,
from_agent: Agent | None = None,
response_model: type[BaseModel] | None = None,
) -> str | Any:
"""Call OpenAI Responses API."""
params = self._prepare_responses_params(
messages=messages, tools=tools, response_model=response_model
)
if self.stream:
return self._handle_streaming_responses(
params=params,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
response_model=response_model,
)
return self._handle_responses(
params=params,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
response_model=response_model,
)
async def _acall_responses(
self,
messages: list[LLMMessage],
tools: list[dict[str, BaseTool]] | None = None,
available_functions: dict[str, Any] | None = None,
from_task: Task | None = None,
from_agent: Agent | None = None,
response_model: type[BaseModel] | None = None,
) -> str | Any:
"""Async call to OpenAI Responses API."""
params = self._prepare_responses_params(
messages=messages, tools=tools, response_model=response_model
)
if self.stream:
return await self._ahandle_streaming_responses(
params=params,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
response_model=response_model,
)
return await self._ahandle_responses(
params=params,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
response_model=response_model,
)
def _prepare_responses_params(
self,
messages: list[LLMMessage],
tools: list[dict[str, BaseTool]] | None = None,
response_model: type[BaseModel] | None = None,
) -> dict[str, Any]:
"""Prepare parameters for OpenAI Responses API.
The Responses API uses a different structure than Chat Completions:
- `input` instead of `messages`
- `instructions` for system-level guidance (extracted from system messages)
- `text.format` instead of `response_format` for structured outputs
- Internally-tagged tool format (flat structure)
"""
instructions: str | None = self.instructions
input_messages: list[LLMMessage] = []
for message in messages:
if message.get("role") == "system":
content = message.get("content", "")
# System messages should always have string content
content_str = content if isinstance(content, str) else str(content)
if instructions:
instructions = f"{instructions}\n\n{content_str}"
else:
instructions = content_str
else:
input_messages.append(message)
# Prepare input with optional reasoning items for ZDR chaining
final_input: list[Any] = []
if self.auto_chain_reasoning and self._last_reasoning_items:
final_input.extend(self._last_reasoning_items)
final_input.extend(input_messages if input_messages else messages)
params: dict[str, Any] = {
"model": self.model,
"input": final_input,
}
if instructions:
params["instructions"] = instructions
if self.stream:
params["stream"] = True
if self.store is not None:
params["store"] = self.store
# Handle response chaining: explicit previous_response_id takes precedence
if self.previous_response_id:
params["previous_response_id"] = self.previous_response_id
elif self.auto_chain and self._last_response_id:
params["previous_response_id"] = self._last_response_id
# Handle include parameter with auto_chain_reasoning support
include_items: list[str] = list(self.include) if self.include else []
if self.auto_chain_reasoning:
if "reasoning.encrypted_content" not in include_items:
include_items.append("reasoning.encrypted_content")
if include_items:
params["include"] = include_items
params.update(self.additional_params)
if self.temperature is not None:
params["temperature"] = self.temperature
if self.top_p is not None:
params["top_p"] = self.top_p
if self.max_completion_tokens is not None:
params["max_output_tokens"] = self.max_completion_tokens
elif self.max_tokens is not None:
params["max_output_tokens"] = self.max_tokens
if self.seed is not None:
params["seed"] = self.seed
if self.reasoning_effort:
params["reasoning"] = {"effort": self.reasoning_effort}
if response_model or self.response_format:
format_model = response_model or self.response_format
if isinstance(format_model, type) and issubclass(format_model, BaseModel):
schema_output = generate_model_description(format_model)
json_schema = schema_output.get("json_schema", {})
params["text"] = {
"format": {
"type": "json_schema",
"name": json_schema.get("name", format_model.__name__),
"strict": json_schema.get("strict", True),
"schema": json_schema.get("schema", {}),
}
}
elif isinstance(format_model, dict):
params["text"] = {"format": format_model}
all_tools: list[dict[str, Any]] = []
if self.builtin_tools:
for tool_name in self.builtin_tools:
tool_type = self.BUILTIN_TOOL_TYPES.get(tool_name, tool_name)
all_tools.append({"type": tool_type})
if tools:
all_tools.extend(self._convert_tools_for_responses(tools))
if all_tools:
params["tools"] = all_tools
crewai_specific_params = {
"callbacks",
"available_functions",
"from_task",
"from_agent",
"provider",
"api_key",
"base_url",
"api_base",
"timeout",
}
return {k: v for k, v in params.items() if k not in crewai_specific_params}
def _convert_tools_for_responses(
self, tools: list[dict[str, BaseTool]]
) -> list[dict[str, Any]]:
"""Convert CrewAI tools to Responses API format.
Responses API uses internally-tagged format (flat structure):
{
"type": "function",
"name": "get_weather",
"description": "...",
"parameters": {...}
}
Unlike Chat Completions which uses externally-tagged:
{
"type": "function",
"function": {"name": "...", "description": "...", "parameters": {...}}
}
"""
from crewai.llms.providers.utils.common import safe_tool_conversion
responses_tools = []
for tool in tools:
name, description, parameters = safe_tool_conversion(tool, "OpenAI")
responses_tool: dict[str, Any] = {
"type": "function",
"name": name,
"description": description,
}
if parameters:
if isinstance(parameters, dict):
responses_tool["parameters"] = parameters
else:
responses_tool["parameters"] = dict(parameters)
responses_tools.append(responses_tool)
return responses_tools
def _handle_responses(
self,
params: dict[str, Any],
available_functions: dict[str, Any] | None = None,
from_task: Any | None = None,
from_agent: Any | None = None,
response_model: type[BaseModel] | None = None,
) -> str | ResponsesAPIResult | Any:
"""Handle non-streaming Responses API call."""
try:
response: Response = self.client.responses.create(**params)
# Track response ID for auto-chaining
if self.auto_chain and response.id:
self._last_response_id = response.id
# Track reasoning items for ZDR auto-chaining
if self.auto_chain_reasoning:
reasoning_items = self._extract_reasoning_items(response)
if reasoning_items:
self._last_reasoning_items = reasoning_items
usage = self._extract_responses_token_usage(response)
self._track_token_usage_internal(usage)
# If parse_tool_outputs is enabled, return structured result
if self.parse_tool_outputs:
parsed_result = self._extract_builtin_tool_outputs(response)
parsed_result.text = self._apply_stop_words(parsed_result.text)
self._emit_call_completed_event(
response=parsed_result.text,
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params.get("input", []),
)
return parsed_result
function_calls = self._extract_function_calls_from_response(response)
if function_calls and not available_functions:
self._emit_call_completed_event(
response=function_calls,
call_type=LLMCallType.TOOL_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params.get("input", []),
)
return function_calls
if function_calls and available_functions:
for call in function_calls:
function_name = call.get("name", "")
function_args = call.get("arguments", {})
if isinstance(function_args, str):
try:
function_args = json.loads(function_args)
except json.JSONDecodeError:
function_args = {}
result = self._handle_tool_execution(
function_name=function_name,
function_args=function_args,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
)
if result is not None:
return result
content = response.output_text or ""
if response_model:
try:
structured_result = self._validate_structured_output(
content, response_model
)
self._emit_call_completed_event(
response=structured_result,
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params.get("input", []),
)
return structured_result
except ValueError as e:
logging.warning(f"Structured output validation failed: {e}")
content = self._apply_stop_words(content)
self._emit_call_completed_event(
response=content,
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params.get("input", []),
)
content = self._invoke_after_llm_call_hooks(
params.get("input", []), content, from_agent
)
except NotFoundError as e:
error_msg = f"Model {self.model} not found: {e}"
logging.error(error_msg)
self._emit_call_failed_event(
error=error_msg, from_task=from_task, from_agent=from_agent
)
raise ValueError(error_msg) from e
except APIConnectionError as e:
error_msg = f"Failed to connect to OpenAI API: {e}"
logging.error(error_msg)
self._emit_call_failed_event(
error=error_msg, from_task=from_task, from_agent=from_agent
)
raise ConnectionError(error_msg) from e
except Exception as e:
if is_context_length_exceeded(e):
logging.error(f"Context window exceeded: {e}")
raise LLMContextLengthExceededError(str(e)) from e
error_msg = f"OpenAI Responses API call failed: {e!s}"
logging.error(error_msg)
self._emit_call_failed_event(
error=error_msg, from_task=from_task, from_agent=from_agent
)
raise
return content
async def _ahandle_responses(
self,
params: dict[str, Any],
available_functions: dict[str, Any] | None = None,
from_task: Any | None = None,
from_agent: Any | None = None,
response_model: type[BaseModel] | None = None,
) -> str | ResponsesAPIResult | Any:
"""Handle async non-streaming Responses API call."""
try:
response: Response = await self.async_client.responses.create(**params)
# Track response ID for auto-chaining
if self.auto_chain and response.id:
self._last_response_id = response.id
# Track reasoning items for ZDR auto-chaining
if self.auto_chain_reasoning:
reasoning_items = self._extract_reasoning_items(response)
if reasoning_items:
self._last_reasoning_items = reasoning_items
usage = self._extract_responses_token_usage(response)
self._track_token_usage_internal(usage)
# If parse_tool_outputs is enabled, return structured result
if self.parse_tool_outputs:
parsed_result = self._extract_builtin_tool_outputs(response)
parsed_result.text = self._apply_stop_words(parsed_result.text)
self._emit_call_completed_event(
response=parsed_result.text,
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params.get("input", []),
)
return parsed_result
function_calls = self._extract_function_calls_from_response(response)
if function_calls and not available_functions:
self._emit_call_completed_event(
response=function_calls,
call_type=LLMCallType.TOOL_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params.get("input", []),
)
return function_calls
if function_calls and available_functions:
for call in function_calls:
function_name = call.get("name", "")
function_args = call.get("arguments", {})
if isinstance(function_args, str):
try:
function_args = json.loads(function_args)
except json.JSONDecodeError:
function_args = {}
result = self._handle_tool_execution(
function_name=function_name,
function_args=function_args,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
)
if result is not None:
return result
content = response.output_text or ""
if response_model:
try:
structured_result = self._validate_structured_output(
content, response_model
)
self._emit_call_completed_event(
response=structured_result,
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params.get("input", []),
)
return structured_result
except ValueError as e:
logging.warning(f"Structured output validation failed: {e}")
content = self._apply_stop_words(content)
self._emit_call_completed_event(
response=content,
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params.get("input", []),
)
except NotFoundError as e:
error_msg = f"Model {self.model} not found: {e}"
logging.error(error_msg)
self._emit_call_failed_event(
error=error_msg, from_task=from_task, from_agent=from_agent
)
raise ValueError(error_msg) from e
except APIConnectionError as e:
error_msg = f"Failed to connect to OpenAI API: {e}"
logging.error(error_msg)
self._emit_call_failed_event(
error=error_msg, from_task=from_task, from_agent=from_agent
)
raise ConnectionError(error_msg) from e
except Exception as e:
if is_context_length_exceeded(e):
logging.error(f"Context window exceeded: {e}")
raise LLMContextLengthExceededError(str(e)) from e
error_msg = f"OpenAI Responses API call failed: {e!s}"
logging.error(error_msg)
self._emit_call_failed_event(
error=error_msg, from_task=from_task, from_agent=from_agent
)
raise
return content
def _handle_streaming_responses(
self,
params: dict[str, Any],
available_functions: dict[str, Any] | None = None,
from_task: Any | None = None,
from_agent: Any | None = None,
response_model: type[BaseModel] | None = None,
) -> str | ResponsesAPIResult | Any:
"""Handle streaming Responses API call."""
full_response = ""
function_calls: list[dict[str, Any]] = []
final_response: Response | None = None
stream = self.client.responses.create(**params)
response_id_stream = None
for event in stream:
if event.type == "response.created":
response_id_stream = event.response.id
if event.type == "response.output_text.delta":
delta_text = event.delta or ""
full_response += delta_text
self._emit_stream_chunk_event(
chunk=delta_text,
from_task=from_task,
from_agent=from_agent,
response_id=response_id_stream,
)
elif event.type == "response.function_call_arguments.delta":
pass
elif event.type == "response.output_item.done":
item = event.item
if item.type == "function_call":
function_calls.append(
{
"id": item.call_id,
"name": item.name,
"arguments": item.arguments,
}
)
elif event.type == "response.completed":
final_response = event.response
# Track response ID for auto-chaining
if self.auto_chain and event.response and event.response.id:
self._last_response_id = event.response.id
# Track reasoning items for ZDR auto-chaining
if self.auto_chain_reasoning and event.response:
reasoning_items = self._extract_reasoning_items(event.response)
if reasoning_items:
self._last_reasoning_items = reasoning_items
if event.response and event.response.usage:
usage = self._extract_responses_token_usage(event.response)
self._track_token_usage_internal(usage)
# If parse_tool_outputs is enabled, return structured result
if self.parse_tool_outputs and final_response:
parsed_result = self._extract_builtin_tool_outputs(final_response)
parsed_result.text = self._apply_stop_words(parsed_result.text)
self._emit_call_completed_event(
response=parsed_result.text,
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params.get("input", []),
)
return parsed_result
if function_calls and available_functions:
for call in function_calls:
function_name = call.get("name", "")
function_args = call.get("arguments", {})
if isinstance(function_args, str):
try:
function_args = json.loads(function_args)
except json.JSONDecodeError:
function_args = {}
result = self._handle_tool_execution(
function_name=function_name,
function_args=function_args,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
)
if result is not None:
return result
if response_model:
try:
structured_result = self._validate_structured_output(
full_response, response_model
)
self._emit_call_completed_event(
response=structured_result,
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params.get("input", []),
)
return structured_result
except ValueError as e:
logging.warning(f"Structured output validation failed: {e}")
full_response = self._apply_stop_words(full_response)
self._emit_call_completed_event(
response=full_response,
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params.get("input", []),
)
return self._invoke_after_llm_call_hooks(
params.get("input", []), full_response, from_agent
)
async def _ahandle_streaming_responses(
self,
params: dict[str, Any],
available_functions: dict[str, Any] | None = None,
from_task: Any | None = None,
from_agent: Any | None = None,
response_model: type[BaseModel] | None = None,
) -> str | ResponsesAPIResult | Any:
"""Handle async streaming Responses API call."""
full_response = ""
function_calls: list[dict[str, Any]] = []
final_response: Response | None = None
stream = await self.async_client.responses.create(**params)
response_id_stream = None
async for event in stream:
if event.type == "response.created":
response_id_stream = event.response.id
if event.type == "response.output_text.delta":
delta_text = event.delta or ""
full_response += delta_text
self._emit_stream_chunk_event(
chunk=delta_text,
from_task=from_task,
from_agent=from_agent,
response_id=response_id_stream,
)
elif event.type == "response.function_call_arguments.delta":
pass
elif event.type == "response.output_item.done":
item = event.item
if item.type == "function_call":
function_calls.append(
{
"id": item.call_id,
"name": item.name,
"arguments": item.arguments,
}
)
elif event.type == "response.completed":
final_response = event.response
# Track response ID for auto-chaining
if self.auto_chain and event.response and event.response.id:
self._last_response_id = event.response.id
# Track reasoning items for ZDR auto-chaining
if self.auto_chain_reasoning and event.response:
reasoning_items = self._extract_reasoning_items(event.response)
if reasoning_items:
self._last_reasoning_items = reasoning_items
if event.response and event.response.usage:
usage = self._extract_responses_token_usage(event.response)
self._track_token_usage_internal(usage)
# If parse_tool_outputs is enabled, return structured result
if self.parse_tool_outputs and final_response:
parsed_result = self._extract_builtin_tool_outputs(final_response)
parsed_result.text = self._apply_stop_words(parsed_result.text)
self._emit_call_completed_event(
response=parsed_result.text,
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params.get("input", []),
)
return parsed_result
if function_calls and available_functions:
for call in function_calls:
function_name = call.get("name", "")
function_args = call.get("arguments", {})
if isinstance(function_args, str):
try:
function_args = json.loads(function_args)
except json.JSONDecodeError:
function_args = {}
result = self._handle_tool_execution(
function_name=function_name,
function_args=function_args,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
)
if result is not None:
return result
if response_model:
try:
structured_result = self._validate_structured_output(
full_response, response_model
)
self._emit_call_completed_event(
response=structured_result,
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params.get("input", []),
)
return structured_result
except ValueError as e:
logging.warning(f"Structured output validation failed: {e}")
full_response = self._apply_stop_words(full_response)
self._emit_call_completed_event(
response=full_response,
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params.get("input", []),
)
return full_response
def _extract_function_calls_from_response(
self, response: Response
) -> list[dict[str, Any]]:
"""Extract function calls from Responses API output."""
return [
{
"id": item.call_id,
"name": item.name,
"arguments": item.arguments,
}
for item in response.output
if item.type == "function_call"
]
def _extract_responses_token_usage(self, response: Response) -> dict[str, Any]:
"""Extract token usage from Responses API response."""
if response.usage:
result = {
"prompt_tokens": response.usage.input_tokens,
"completion_tokens": response.usage.output_tokens,
"total_tokens": response.usage.total_tokens,
}
# Extract cached prompt tokens from input_tokens_details
input_details = getattr(response.usage, "input_tokens_details", None)
if input_details:
result["cached_prompt_tokens"] = (
getattr(input_details, "cached_tokens", 0) or 0
)
return result
return {"total_tokens": 0}
def _extract_builtin_tool_outputs(self, response: Response) -> ResponsesAPIResult:
"""Extract and parse all built-in tool outputs from Responses API.
Parses web_search, file_search, code_interpreter, computer_use,
and reasoning outputs into structured types.
Args:
response: The OpenAI Response object.
Returns:
ResponsesAPIResult containing parsed outputs.
"""
result = ResponsesAPIResult(
text=response.output_text or "",
response_id=response.id,
)
for item in response.output:
item_type = item.type
if item_type == "web_search_call":
result.web_search_results.append(
WebSearchResult(
id=item.id,
status=item.status, # type: ignore[union-attr]
type=item_type,
)
)
elif item_type == "file_search_call":
file_results: list[FileSearchResultItem] = (
[
FileSearchResultItem(
file_id=r.file_id, # type: ignore[union-attr]
filename=r.filename, # type: ignore[union-attr]
text=r.text, # type: ignore[union-attr]
score=r.score, # type: ignore[union-attr]
attributes=r.attributes, # type: ignore[union-attr]
)
for r in item.results # type: ignore[union-attr]
]
if item.results # type: ignore[union-attr]
else []
)
result.file_search_results.append(
FileSearchResult(
id=item.id,
status=item.status, # type: ignore[union-attr]
type=item_type,
queries=list(item.queries), # type: ignore[union-attr]
results=file_results,
)
)
elif item_type == "code_interpreter_call":
code_results: list[
CodeInterpreterLogResult | CodeInterpreterFileResult
] = []
for r in item.results: # type: ignore[union-attr]
if r.type == "logs": # type: ignore[union-attr]
code_results.append(
CodeInterpreterLogResult(type="logs", logs=r.logs) # type: ignore[union-attr]
)
elif r.type == "files": # type: ignore[union-attr]
files_data = [
{"file_id": f.file_id, "mime_type": f.mime_type}
for f in r.files # type: ignore[union-attr]
]
code_results.append(
CodeInterpreterFileResult(type="files", files=files_data)
)
result.code_interpreter_results.append(
CodeInterpreterResult(
id=item.id,
status=item.status, # type: ignore[union-attr]
type=item_type,
code=item.code, # type: ignore[union-attr]
container_id=item.container_id, # type: ignore[union-attr]
results=code_results,
)
)
elif item_type == "computer_call":
action_dict = item.action.model_dump() if item.action else {} # type: ignore[union-attr]
safety_checks = [
{"id": c.id, "code": c.code, "message": c.message}
for c in item.pending_safety_checks # type: ignore[union-attr]
]
result.computer_use_results.append(
ComputerUseResult(
id=item.id,
status=item.status, # type: ignore[union-attr]
type=item_type,
call_id=item.call_id, # type: ignore[union-attr]
action=action_dict,
pending_safety_checks=safety_checks,
)
)
elif item_type == "reasoning":
summaries = [{"type": s.type, "text": s.text} for s in item.summary] # type: ignore[union-attr]
result.reasoning_summaries.append(
ReasoningSummary(
id=item.id,
status=item.status, # type: ignore[union-attr]
type=item_type,
summary=summaries,
encrypted_content=item.encrypted_content, # type: ignore[union-attr]
)
)
elif item_type == "function_call":
result.function_calls.append(
{
"id": item.call_id, # type: ignore[union-attr]
"name": item.name, # type: ignore[union-attr]
"arguments": item.arguments, # type: ignore[union-attr]
}
)
return result
def _extract_reasoning_items(self, response: Response) -> list[Any]:
"""Extract reasoning items with encrypted content from response.
Used for ZDR (Zero Data Retention) compliance to capture encrypted
reasoning tokens that can be passed back in subsequent requests.
Args:
response: The OpenAI Response object.
Returns:
List of reasoning items from the response output that have
encrypted_content, suitable for passing back in future requests.
"""
return [item for item in response.output if item.type == "reasoning"]
def _prepare_completion_params(
self, messages: list[LLMMessage], tools: list[dict[str, BaseTool]] | None = None
) -> dict[str, Any]:
"""Prepare parameters for OpenAI chat completion."""
params: dict[str, Any] = {
"model": self.model,
"messages": messages,
}
if self.stream:
params["stream"] = self.stream
params["stream_options"] = {"include_usage": True}
params.update(self.additional_params)
if self.temperature is not None:
params["temperature"] = self.temperature
if self.top_p is not None:
params["top_p"] = self.top_p
if self.frequency_penalty is not None:
params["frequency_penalty"] = self.frequency_penalty
if self.presence_penalty is not None:
params["presence_penalty"] = self.presence_penalty
if self.max_completion_tokens is not None:
params["max_completion_tokens"] = self.max_completion_tokens
elif self.max_tokens is not None:
params["max_tokens"] = self.max_tokens
if self.seed is not None:
params["seed"] = self.seed
if self.logprobs is not None:
params["logprobs"] = self.logprobs
if self.top_logprobs is not None:
params["top_logprobs"] = self.top_logprobs
# Handle o1 model specific parameters
if self.is_o1_model and self.reasoning_effort:
params["reasoning_effort"] = self.reasoning_effort
if self.response_format is not None:
if isinstance(self.response_format, type) and issubclass(
self.response_format, BaseModel
):
params["response_format"] = generate_model_description(
self.response_format
)
elif isinstance(self.response_format, dict):
params["response_format"] = self.response_format
if tools:
params["tools"] = self._convert_tools_for_interference(tools)
params["tool_choice"] = "auto"
# Filter out CrewAI-specific parameters that shouldn't go to the API
crewai_specific_params = {
"callbacks",
"available_functions",
"from_task",
"from_agent",
"provider",
"api_key",
"base_url",
"api_base",
"timeout",
}
return {k: v for k, v in params.items() if k not in crewai_specific_params}
def _convert_tools_for_interference(
self, tools: list[dict[str, BaseTool]]
) -> list[dict[str, Any]]:
"""Convert CrewAI tool format to OpenAI function calling format."""
from crewai.llms.providers.utils.common import safe_tool_conversion
from crewai.utilities.pydantic_schema_utils import (
force_additional_properties_false,
)
openai_tools = []
for tool in tools:
name, description, parameters = safe_tool_conversion(tool, "OpenAI")
openai_tool: dict[str, Any] = {
"type": "function",
"function": {
"name": name,
"description": description,
"strict": True,
},
}
if parameters:
params_dict = (
parameters if isinstance(parameters, dict) else dict(parameters)
)
params_dict = force_additional_properties_false(params_dict)
openai_tool["function"]["parameters"] = params_dict
openai_tools.append(openai_tool)
return openai_tools
def _handle_completion(
self,
params: dict[str, Any],
available_functions: dict[str, Any] | None = None,
from_task: Any | None = None,
from_agent: Any | None = None,
response_model: type[BaseModel] | None = None,
) -> str | Any:
"""Handle non-streaming chat completion."""
try:
if response_model:
parse_params = {
k: v for k, v in params.items() if k != "response_format"
}
parsed_response = self.client.beta.chat.completions.parse(
**parse_params,
response_format=response_model,
)
math_reasoning = parsed_response.choices[0].message
if math_reasoning.refusal:
pass
usage = self._extract_openai_token_usage(parsed_response)
self._track_token_usage_internal(usage)
parsed_object = parsed_response.choices[0].message.parsed
if parsed_object:
self._emit_call_completed_event(
response=parsed_object.model_dump_json(),
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
return parsed_object
response: ChatCompletion = self.client.chat.completions.create(**params)
usage = self._extract_openai_token_usage(response)
self._track_token_usage_internal(usage)
choice: Choice = response.choices[0]
message = choice.message
# If there are tool_calls but no available_functions, return the tool_calls
# This allows the caller (e.g., executor) to handle tool execution
if message.tool_calls and not available_functions:
self._emit_call_completed_event(
response=list(message.tool_calls),
call_type=LLMCallType.TOOL_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
return list(message.tool_calls)
# If there are tool_calls and available_functions, execute the tools
if message.tool_calls and available_functions:
tool_call = message.tool_calls[0]
function_name = tool_call.function.name
try:
function_args = json.loads(tool_call.function.arguments)
except json.JSONDecodeError as e:
logging.error(f"Failed to parse tool arguments: {e}")
function_args = {}
result = self._handle_tool_execution(
function_name=function_name,
function_args=function_args,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
)
if result is not None:
return result
content = message.content or ""
if self.response_format and isinstance(self.response_format, type):
try:
structured_result = self._validate_structured_output(
content, self.response_format
)
self._emit_call_completed_event(
response=structured_result,
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
return structured_result
except ValueError as e:
logging.warning(f"Structured output validation failed: {e}")
content = self._apply_stop_words(content)
self._emit_call_completed_event(
response=content,
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
if usage.get("total_tokens", 0) > 0:
logging.info(f"OpenAI API usage: {usage}")
content = self._invoke_after_llm_call_hooks(
params["messages"], content, from_agent
)
except NotFoundError as e:
error_msg = f"Model {self.model} not found: {e}"
logging.error(error_msg)
self._emit_call_failed_event(
error=error_msg, from_task=from_task, from_agent=from_agent
)
raise ValueError(error_msg) from e
except APIConnectionError as e:
error_msg = f"Failed to connect to OpenAI API: {e}"
logging.error(error_msg)
self._emit_call_failed_event(
error=error_msg, from_task=from_task, from_agent=from_agent
)
raise ConnectionError(error_msg) from e
except Exception as e:
# Handle context length exceeded and other errors
if is_context_length_exceeded(e):
logging.error(f"Context window exceeded: {e}")
raise LLMContextLengthExceededError(str(e)) from e
error_msg = f"OpenAI API call failed: {e!s}"
logging.error(error_msg)
self._emit_call_failed_event(
error=error_msg, from_task=from_task, from_agent=from_agent
)
raise e from e
return content
def _finalize_streaming_response(
self,
full_response: str,
tool_calls: dict[int, dict[str, Any]],
usage_data: dict[str, int],
params: dict[str, Any],
available_functions: dict[str, Any] | None = None,
from_task: Any | None = None,
from_agent: Any | None = None,
) -> str | list[dict[str, Any]]:
"""Finalize a streaming response with usage tracking, tool call handling, and events.
Args:
full_response: The accumulated text response from the stream.
tool_calls: Accumulated tool calls from the stream, keyed by index.
usage_data: Token usage data from the stream.
params: The completion parameters containing messages.
available_functions: Available functions for tool calling.
from_task: Task that initiated the call.
from_agent: Agent that initiated the call.
Returns:
Tool calls list when tools were invoked without available_functions,
tool execution result when available_functions is provided,
or the text response string.
"""
self._track_token_usage_internal(usage_data)
if tool_calls and not available_functions:
tool_calls_list = [
{
"id": call_data["id"],
"type": "function",
"function": {
"name": call_data["name"],
"arguments": call_data["arguments"],
},
"index": call_data["index"],
}
for call_data in tool_calls.values()
]
self._emit_call_completed_event(
response=tool_calls_list,
call_type=LLMCallType.TOOL_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
return tool_calls_list
if tool_calls and available_functions:
for call_data in tool_calls.values():
function_name = call_data["name"]
arguments = call_data["arguments"]
if not function_name or not arguments:
continue
if function_name not in available_functions:
logging.warning(
f"Function '{function_name}' not found in available functions"
)
continue
try:
function_args = json.loads(arguments)
except json.JSONDecodeError as e:
logging.error(f"Failed to parse streamed tool arguments: {e}")
continue
result = self._handle_tool_execution(
function_name=function_name,
function_args=function_args,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
)
if result is not None:
return result
full_response = self._apply_stop_words(full_response)
self._emit_call_completed_event(
response=full_response,
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
return full_response
def _handle_streaming_completion(
self,
params: dict[str, Any],
available_functions: dict[str, Any] | None = None,
from_task: Any | None = None,
from_agent: Any | None = None,
response_model: type[BaseModel] | None = None,
) -> str | list[dict[str, Any]] | BaseModel:
"""Handle streaming chat completion."""
full_response = ""
tool_calls: dict[int, dict[str, Any]] = {}
if response_model:
parse_params = {
k: v
for k, v in params.items()
if k not in ("response_format", "stream")
}
stream: ChatCompletionStream[BaseModel]
with self.client.beta.chat.completions.stream(
**parse_params, response_format=response_model
) as stream:
for chunk in stream:
response_id_stream = chunk.id if hasattr(chunk, "id") else None
if chunk.type == "content.delta":
delta_content = chunk.delta
if delta_content:
self._emit_stream_chunk_event(
chunk=delta_content,
from_task=from_task,
from_agent=from_agent,
response_id=response_id_stream,
)
final_completion = stream.get_final_completion()
if final_completion:
usage = self._extract_openai_token_usage(final_completion)
self._track_token_usage_internal(usage)
if final_completion.choices:
parsed_result = final_completion.choices[0].message.parsed
if parsed_result:
self._emit_call_completed_event(
response=parsed_result.model_dump_json(),
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
return parsed_result
logging.error("Failed to get parsed result from stream")
return ""
completion_stream: Stream[ChatCompletionChunk] = (
self.client.chat.completions.create(**params)
)
usage_data = {"total_tokens": 0}
for completion_chunk in completion_stream:
response_id_stream = (
completion_chunk.id if hasattr(completion_chunk, "id") else None
)
if hasattr(completion_chunk, "usage") and completion_chunk.usage:
usage_data = self._extract_openai_token_usage(completion_chunk)
continue
if not completion_chunk.choices:
continue
choice = completion_chunk.choices[0]
chunk_delta: ChoiceDelta = choice.delta
if chunk_delta.content:
full_response += chunk_delta.content
self._emit_stream_chunk_event(
chunk=chunk_delta.content,
from_task=from_task,
from_agent=from_agent,
response_id=response_id_stream,
)
if chunk_delta.tool_calls:
for tool_call in chunk_delta.tool_calls:
tool_index = tool_call.index if tool_call.index is not None else 0
if tool_index not in tool_calls:
tool_calls[tool_index] = {
"id": tool_call.id,
"name": "",
"arguments": "",
"index": tool_index,
}
elif tool_call.id and not tool_calls[tool_index]["id"]:
tool_calls[tool_index]["id"] = tool_call.id
if tool_call.function and tool_call.function.name:
tool_calls[tool_index]["name"] = tool_call.function.name
if tool_call.function and tool_call.function.arguments:
tool_calls[tool_index]["arguments"] += (
tool_call.function.arguments
)
self._emit_stream_chunk_event(
chunk=tool_call.function.arguments
if tool_call.function and tool_call.function.arguments
else "",
from_task=from_task,
from_agent=from_agent,
tool_call={
"id": tool_calls[tool_index]["id"],
"function": {
"name": tool_calls[tool_index]["name"],
"arguments": tool_calls[tool_index]["arguments"],
},
"type": "function",
"index": tool_calls[tool_index]["index"],
},
call_type=LLMCallType.TOOL_CALL,
response_id=response_id_stream,
)
result = self._finalize_streaming_response(
full_response=full_response,
tool_calls=tool_calls,
usage_data=usage_data,
params=params,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
)
if isinstance(result, str):
return self._invoke_after_llm_call_hooks(
params["messages"], result, from_agent
)
return result
async def _ahandle_completion(
self,
params: dict[str, Any],
available_functions: dict[str, Any] | None = None,
from_task: Any | None = None,
from_agent: Any | None = None,
response_model: type[BaseModel] | None = None,
) -> str | Any:
"""Handle non-streaming async chat completion."""
try:
if response_model:
parse_params = {
k: v for k, v in params.items() if k != "response_format"
}
parsed_response = await self.async_client.beta.chat.completions.parse(
**parse_params,
response_format=response_model,
)
math_reasoning = parsed_response.choices[0].message
if math_reasoning.refusal:
pass
usage = self._extract_openai_token_usage(parsed_response)
self._track_token_usage_internal(usage)
parsed_object = parsed_response.choices[0].message.parsed
if parsed_object:
self._emit_call_completed_event(
response=parsed_object.model_dump_json(),
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
return parsed_object
response: ChatCompletion = await self.async_client.chat.completions.create(
**params
)
usage = self._extract_openai_token_usage(response)
self._track_token_usage_internal(usage)
choice: Choice = response.choices[0]
message = choice.message
# If there are tool_calls but no available_functions, return the tool_calls
# This allows the caller (e.g., executor) to handle tool execution
if message.tool_calls and not available_functions:
self._emit_call_completed_event(
response=list(message.tool_calls),
call_type=LLMCallType.TOOL_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
return list(message.tool_calls)
# If there are tool_calls and available_functions, execute the tools
if message.tool_calls and available_functions:
tool_call = message.tool_calls[0]
function_name = tool_call.function.name
try:
function_args = json.loads(tool_call.function.arguments)
except json.JSONDecodeError as e:
logging.error(f"Failed to parse tool arguments: {e}")
function_args = {}
result = self._handle_tool_execution(
function_name=function_name,
function_args=function_args,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
)
if result is not None:
return result
content = message.content or ""
if self.response_format and isinstance(self.response_format, type):
try:
structured_result = self._validate_structured_output(
content, self.response_format
)
self._emit_call_completed_event(
response=structured_result,
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
return structured_result
except ValueError as e:
logging.warning(f"Structured output validation failed: {e}")
content = self._apply_stop_words(content)
self._emit_call_completed_event(
response=content,
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
if usage.get("total_tokens", 0) > 0:
logging.info(f"OpenAI API usage: {usage}")
except NotFoundError as e:
error_msg = f"Model {self.model} not found: {e}"
logging.error(error_msg)
self._emit_call_failed_event(
error=error_msg, from_task=from_task, from_agent=from_agent
)
raise ValueError(error_msg) from e
except APIConnectionError as e:
error_msg = f"Failed to connect to OpenAI API: {e}"
logging.error(error_msg)
self._emit_call_failed_event(
error=error_msg, from_task=from_task, from_agent=from_agent
)
raise ConnectionError(error_msg) from e
except Exception as e:
if is_context_length_exceeded(e):
logging.error(f"Context window exceeded: {e}")
raise LLMContextLengthExceededError(str(e)) from e
error_msg = f"OpenAI API call failed: {e!s}"
logging.error(error_msg)
self._emit_call_failed_event(
error=error_msg, from_task=from_task, from_agent=from_agent
)
raise e from e
return content
async def _ahandle_streaming_completion(
self,
params: dict[str, Any],
available_functions: dict[str, Any] | None = None,
from_task: Any | None = None,
from_agent: Any | None = None,
response_model: type[BaseModel] | None = None,
) -> str | list[dict[str, Any]] | BaseModel:
"""Handle async streaming chat completion."""
full_response = ""
tool_calls: dict[int, dict[str, Any]] = {}
if response_model:
completion_stream: AsyncIterator[
ChatCompletionChunk
] = await self.async_client.chat.completions.create(**params)
accumulated_content = ""
usage_data = {"total_tokens": 0}
async for chunk in completion_stream:
response_id_stream = chunk.id if hasattr(chunk, "id") else None
if hasattr(chunk, "usage") and chunk.usage:
usage_data = self._extract_openai_token_usage(chunk)
continue
if not chunk.choices:
continue
choice = chunk.choices[0]
delta: ChoiceDelta = choice.delta
if delta.content:
accumulated_content += delta.content
self._emit_stream_chunk_event(
chunk=delta.content,
from_task=from_task,
from_agent=from_agent,
response_id=response_id_stream,
)
self._track_token_usage_internal(usage_data)
try:
parsed_object = response_model.model_validate_json(accumulated_content)
self._emit_call_completed_event(
response=parsed_object.model_dump_json(),
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
return parsed_object
except Exception as e:
logging.error(f"Failed to parse structured output from stream: {e}")
self._emit_call_completed_event(
response=accumulated_content,
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
return accumulated_content
stream: AsyncIterator[
ChatCompletionChunk
] = await self.async_client.chat.completions.create(**params)
usage_data = {"total_tokens": 0}
async for chunk in stream:
response_id_stream = chunk.id if hasattr(chunk, "id") else None
if hasattr(chunk, "usage") and chunk.usage:
usage_data = self._extract_openai_token_usage(chunk)
continue
if not chunk.choices:
continue
choice = chunk.choices[0]
chunk_delta: ChoiceDelta = choice.delta
if chunk_delta.content:
full_response += chunk_delta.content
self._emit_stream_chunk_event(
chunk=chunk_delta.content,
from_task=from_task,
from_agent=from_agent,
response_id=response_id_stream,
)
if chunk_delta.tool_calls:
for tool_call in chunk_delta.tool_calls:
tool_index = tool_call.index if tool_call.index is not None else 0
if tool_index not in tool_calls:
tool_calls[tool_index] = {
"id": tool_call.id,
"name": "",
"arguments": "",
"index": tool_index,
}
elif tool_call.id and not tool_calls[tool_index]["id"]:
tool_calls[tool_index]["id"] = tool_call.id
if tool_call.function and tool_call.function.name:
tool_calls[tool_index]["name"] = tool_call.function.name
if tool_call.function and tool_call.function.arguments:
tool_calls[tool_index]["arguments"] += (
tool_call.function.arguments
)
self._emit_stream_chunk_event(
chunk=tool_call.function.arguments
if tool_call.function and tool_call.function.arguments
else "",
from_task=from_task,
from_agent=from_agent,
tool_call={
"id": tool_calls[tool_index]["id"],
"function": {
"name": tool_calls[tool_index]["name"],
"arguments": tool_calls[tool_index]["arguments"],
},
"type": "function",
"index": tool_calls[tool_index]["index"],
},
call_type=LLMCallType.TOOL_CALL,
response_id=response_id_stream,
)
return self._finalize_streaming_response(
full_response=full_response,
tool_calls=tool_calls,
usage_data=usage_data,
params=params,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
)
def supports_function_calling(self) -> bool:
"""Check if the model supports function calling."""
return not self.is_o1_model
def supports_stop_words(self) -> bool:
"""Check if the model supports stop words."""
return not self.is_o1_model
def get_context_window_size(self) -> int:
"""Get the context window size for the model."""
from crewai.llm import CONTEXT_WINDOW_USAGE_RATIO, LLM_CONTEXT_WINDOW_SIZES
min_context = 1024
max_context = 2097152
for key, value in LLM_CONTEXT_WINDOW_SIZES.items():
if value < min_context or value > max_context:
raise ValueError(
f"Context window for {key} must be between {min_context} and {max_context}"
)
# Context window sizes for OpenAI models
context_windows = {
"gpt-4": 8192,
"gpt-4o": 128000,
"gpt-4o-mini": 200000,
"gpt-4-turbo": 128000,
"gpt-4.1": 1047576,
"gpt-4.1-mini-2025-04-14": 1047576,
"gpt-4.1-nano-2025-04-14": 1047576,
"gpt-5": 1047576,
"gpt-5-mini": 1047576,
"gpt-5-nano": 1047576,
"o1-preview": 128000,
"o1-mini": 128000,
"o3-mini": 200000,
"o4-mini": 200000,
}
# Find the best match for the model name
for model_prefix, size in context_windows.items():
if self.model.startswith(model_prefix):
return int(size * CONTEXT_WINDOW_USAGE_RATIO)
# Default context window size
return int(8192 * CONTEXT_WINDOW_USAGE_RATIO)
def _extract_openai_token_usage(
self, response: ChatCompletion | ChatCompletionChunk
) -> dict[str, Any]:
"""Extract token usage from OpenAI ChatCompletion or ChatCompletionChunk response."""
if hasattr(response, "usage") and response.usage:
usage = response.usage
result = {
"prompt_tokens": getattr(usage, "prompt_tokens", 0),
"completion_tokens": getattr(usage, "completion_tokens", 0),
"total_tokens": getattr(usage, "total_tokens", 0),
}
# Extract cached prompt tokens from prompt_tokens_details
prompt_details = getattr(usage, "prompt_tokens_details", None)
if prompt_details:
result["cached_prompt_tokens"] = (
getattr(prompt_details, "cached_tokens", 0) or 0
)
return result
return {"total_tokens": 0}
def _format_messages(self, messages: str | list[LLMMessage]) -> list[LLMMessage]:
"""Format messages for OpenAI API."""
base_formatted = super()._format_messages(messages)
# Apply OpenAI-specific formatting
formatted_messages: list[LLMMessage] = []
for message in base_formatted:
if self.is_o1_model and message.get("role") == "system":
formatted_messages.append(
{"role": "user", "content": f"System: {message['content']}"}
)
else:
formatted_messages.append(message)
return formatted_messages
def supports_multimodal(self) -> bool:
"""Check if the model supports multimodal inputs.
OpenAI vision-enabled models include GPT-4o, GPT-4.1, GPT-5, and o-series.
Returns:
True if the model supports images.
"""
vision_models = (
"gpt-4o",
"gpt-4.1",
"gpt-4-turbo",
"gpt-4-vision",
"gpt-5",
"o1",
"o3",
"o4",
)
return any(self.model.lower().startswith(m) for m in vision_models)
def get_file_uploader(self) -> Any:
"""Get an OpenAI file uploader using this LLM's clients.
Returns:
OpenAIFileUploader instance with pre-configured sync and async clients.
"""
try:
from crewai_files.uploaders.openai import OpenAIFileUploader
return OpenAIFileUploader(
client=self.client,
async_client=self.async_client,
)
except ImportError:
return None
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/llms/providers/openai/completion.py",
"license": "MIT License",
"lines": 1995,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai/src/crewai/project/annotations.py | """Decorators for defining crew components and their behaviors."""
from __future__ import annotations
import asyncio
from collections.abc import Callable
from functools import wraps
import inspect
from typing import TYPE_CHECKING, Any, Concatenate, ParamSpec, TypeVar, overload
from crewai.project.utils import memoize
if TYPE_CHECKING:
from crewai import Agent, Crew, Task
from crewai.project.wrappers import (
AfterKickoffMethod,
AgentMethod,
BeforeKickoffMethod,
CacheHandlerMethod,
CallbackMethod,
CrewInstance,
LLMMethod,
OutputJsonClass,
OutputPydanticClass,
TaskMethod,
TaskResultT,
ToolMethod,
)
P = ParamSpec("P")
P2 = ParamSpec("P2")
R = TypeVar("R")
R2 = TypeVar("R2")
T = TypeVar("T")
SelfT = TypeVar("SelfT")
def before_kickoff(meth: Callable[P, R]) -> BeforeKickoffMethod[P, R]:
"""Marks a method to execute before crew kickoff.
Args:
meth: The method to mark.
Returns:
A wrapped method marked for before kickoff execution.
"""
return BeforeKickoffMethod(meth)
def after_kickoff(meth: Callable[P, R]) -> AfterKickoffMethod[P, R]:
"""Marks a method to execute after crew kickoff.
Args:
meth: The method to mark.
Returns:
A wrapped method marked for after kickoff execution.
"""
return AfterKickoffMethod(meth)
def task(meth: Callable[P, TaskResultT]) -> TaskMethod[P, TaskResultT]:
"""Marks a method as a crew task.
Args:
meth: The method to mark.
Returns:
A wrapped method marked as a task with memoization.
"""
return TaskMethod(memoize(meth))
def agent(meth: Callable[P, R]) -> AgentMethod[P, R]:
"""Marks a method as a crew agent.
Args:
meth: The method to mark.
Returns:
A wrapped method marked as an agent with memoization.
"""
return AgentMethod(memoize(meth))
def llm(meth: Callable[P, R]) -> LLMMethod[P, R]:
"""Marks a method as an LLM provider.
Args:
meth: The method to mark.
Returns:
A wrapped method marked as an LLM provider with memoization.
"""
return LLMMethod(memoize(meth))
def output_json(cls: type[T]) -> OutputJsonClass[T]:
"""Marks a class as JSON output format.
Args:
cls: The class to mark.
Returns:
A wrapped class marked as JSON output format.
"""
return OutputJsonClass(cls)
def output_pydantic(cls: type[T]) -> OutputPydanticClass[T]:
"""Marks a class as Pydantic output format.
Args:
cls: The class to mark.
Returns:
A wrapped class marked as Pydantic output format.
"""
return OutputPydanticClass(cls)
def tool(meth: Callable[P, R]) -> ToolMethod[P, R]:
"""Marks a method as a crew tool.
Args:
meth: The method to mark.
Returns:
A wrapped method marked as a tool with memoization.
"""
return ToolMethod(memoize(meth))
def callback(meth: Callable[P, R]) -> CallbackMethod[P, R]:
"""Marks a method as a crew callback.
Args:
meth: The method to mark.
Returns:
A wrapped method marked as a callback with memoization.
"""
return CallbackMethod(memoize(meth))
def cache_handler(meth: Callable[P, R]) -> CacheHandlerMethod[P, R]:
"""Marks a method as a cache handler.
Args:
meth: The method to mark.
Returns:
A wrapped method marked as a cache handler with memoization.
"""
return CacheHandlerMethod(memoize(meth))
def _call_method(method: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
"""Call a method, awaiting it if async and running in an event loop."""
result = method(*args, **kwargs)
if inspect.iscoroutine(result):
try:
loop = asyncio.get_running_loop()
except RuntimeError:
loop = None
if loop and loop.is_running():
import concurrent.futures
with concurrent.futures.ThreadPoolExecutor() as pool:
return pool.submit(asyncio.run, result).result()
return asyncio.run(result)
return result
@overload
def crew(
meth: Callable[Concatenate[SelfT, P], Crew],
) -> Callable[Concatenate[SelfT, P], Crew]: ...
@overload
def crew(
meth: Callable[Concatenate[CrewInstance, P], Crew],
) -> Callable[Concatenate[CrewInstance, P], Crew]: ...
def crew(
meth: Callable[..., Crew],
) -> Callable[..., Crew]:
"""Marks a method as the main crew execution point.
Args:
meth: The method to mark as crew execution point.
Returns:
A wrapped method that instantiates tasks and agents before execution.
"""
@wraps(meth)
def wrapper(self: CrewInstance, *args: Any, **kwargs: Any) -> Crew:
"""Wrapper that sets up crew before calling the decorated method.
Args:
self: The crew class instance.
*args: Additional positional arguments.
**kwargs: Keyword arguments to pass to the method.
Returns:
The configured Crew instance with callbacks attached.
"""
instantiated_tasks: list[Task] = []
instantiated_agents: list[Agent] = []
agent_roles: set[str] = set()
# Use the preserved task and agent information
tasks = self.__crew_metadata__["original_tasks"].items()
agents = self.__crew_metadata__["original_agents"].items()
# Instantiate tasks in order
for _, task_method in tasks:
task_instance = _call_method(task_method, self)
instantiated_tasks.append(task_instance)
agent_instance = getattr(task_instance, "agent", None)
if agent_instance and agent_instance.role not in agent_roles:
instantiated_agents.append(agent_instance)
agent_roles.add(agent_instance.role)
# Instantiate agents not included by tasks
for _, agent_method in agents:
agent_instance = _call_method(agent_method, self)
if agent_instance.role not in agent_roles:
instantiated_agents.append(agent_instance)
agent_roles.add(agent_instance.role)
self.agents = instantiated_agents
self.tasks = instantiated_tasks
crew_instance: Crew = _call_method(meth, self, *args, **kwargs)
def callback_wrapper(
hook: Callable[Concatenate[CrewInstance, P2], R2], instance: CrewInstance
) -> Callable[P2, R2]:
"""Bind a hook callback to an instance.
Args:
hook: The callback hook to bind.
instance: The instance to bind to.
Returns:
A bound callback function.
"""
def bound_callback(*cb_args: P2.args, **cb_kwargs: P2.kwargs) -> R2:
"""Execute the bound callback.
Args:
*cb_args: Positional arguments for the callback.
**cb_kwargs: Keyword arguments for the callback.
Returns:
The result of the callback execution.
"""
return hook(instance, *cb_args, **cb_kwargs)
return bound_callback
for hook_callback in self.__crew_metadata__["before_kickoff"].values():
crew_instance.before_kickoff_callbacks.append(
callback_wrapper(hook_callback, self)
)
for hook_callback in self.__crew_metadata__["after_kickoff"].values():
crew_instance.after_kickoff_callbacks.append(
callback_wrapper(hook_callback, self)
)
return crew_instance
return memoize(wrapper)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/project/annotations.py",
"license": "MIT License",
"lines": 204,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
crewAIInc/crewAI:lib/crewai/src/crewai/project/crew_base.py | """Base metaclass for creating crew classes with configuration and method management."""
from __future__ import annotations
from collections.abc import Callable
import inspect
import logging
from pathlib import Path
from typing import (
TYPE_CHECKING,
Any,
Literal,
TypeGuard,
TypeVar,
TypedDict,
cast,
)
from dotenv import load_dotenv
import yaml
from crewai.project.wrappers import CrewClass, CrewMetadata
from crewai.tools import BaseTool
if TYPE_CHECKING:
from crewai import Agent, Task
from crewai.agents.cache.cache_handler import CacheHandler
from crewai.crews.crew_output import CrewOutput
from crewai.hooks.llm_hooks import LLMCallHookContext
from crewai.hooks.tool_hooks import ToolCallHookContext
from crewai.project.wrappers import (
CrewInstance,
OutputJsonClass,
OutputPydanticClass,
)
from crewai.tasks.task_output import TaskOutput
_post_initialize_crew_hooks: list[Callable[[Any], None]] = []
class AgentConfig(TypedDict, total=False):
"""Type definition for agent configuration dictionary.
All fields are optional as they come from YAML configuration files.
Fields can be either string references (from YAML) or actual instances (after processing).
"""
# Core agent attributes (from BaseAgent)
role: str
goal: str
backstory: str
cache: bool
verbose: bool
max_rpm: int
allow_delegation: bool
max_iter: int
max_tokens: int
callbacks: list[str]
# LLM configuration
llm: str
function_calling_llm: str
use_system_prompt: bool
# Template configuration
system_template: str
prompt_template: str
response_template: str
# Tools and handlers (can be string references or instances)
tools: list[str] | list[BaseTool]
step_callback: str
cache_handler: str | CacheHandler
# Code execution
allow_code_execution: bool
code_execution_mode: Literal["safe", "unsafe"]
# Context and performance
respect_context_window: bool
max_retry_limit: int
# Multimodal and reasoning
multimodal: bool
reasoning: bool
max_reasoning_attempts: int
# Knowledge configuration
knowledge_sources: list[str] | list[Any]
knowledge_storage: str | Any
knowledge_config: dict[str, Any]
embedder: dict[str, Any]
agent_knowledge_context: str
crew_knowledge_context: str
knowledge_search_query: str
# Misc configuration
inject_date: bool
date_format: str
from_repository: str
guardrail: Callable[[Any], tuple[bool, Any]] | str
guardrail_max_retries: int
class TaskConfig(TypedDict, total=False):
"""Type definition for task configuration dictionary.
All fields are optional as they come from YAML configuration files.
Fields can be either string references (from YAML) or actual instances (after processing).
"""
# Core task attributes
name: str
description: str
expected_output: str
# Agent and context
agent: str
context: list[str]
# Tools and callbacks (can be string references or instances)
tools: list[str] | list[BaseTool]
callback: str
callbacks: list[str]
# Output configuration
output_json: str
output_pydantic: str
output_file: str
create_directory: bool
# Execution configuration
async_execution: bool
human_input: bool
markdown: bool
# Guardrail configuration
guardrail: Callable[[TaskOutput], tuple[bool, Any]] | str
guardrail_max_retries: int
# Misc configuration
allow_crewai_trigger_context: bool
load_dotenv()
CallableT = TypeVar("CallableT", bound=Callable[..., Any])
def _set_base_directory(cls: type[CrewClass]) -> None:
"""Set the base directory for the crew class.
Args:
cls: Crew class to configure.
"""
try:
cls.base_directory = Path(inspect.getfile(cls)).parent
except (TypeError, OSError):
cls.base_directory = Path.cwd()
def _set_config_paths(cls: type[CrewClass]) -> None:
"""Set the configuration file paths for the crew class.
Args:
cls: Crew class to configure.
"""
cls.original_agents_config_path = getattr(
cls, "agents_config", "config/agents.yaml"
)
cls.original_tasks_config_path = getattr(cls, "tasks_config", "config/tasks.yaml")
def _set_mcp_params(cls: type[CrewClass]) -> None:
"""Set the MCP server parameters for the crew class.
Args:
cls: Crew class to configure.
"""
cls.mcp_server_params = getattr(cls, "mcp_server_params", None)
cls.mcp_connect_timeout = getattr(cls, "mcp_connect_timeout", 30)
def _is_string_list(value: list[str] | list[BaseTool]) -> TypeGuard[list[str]]:
"""Type guard to check if list contains strings rather than BaseTool instances.
Args:
value: List that may contain strings or BaseTool instances.
Returns:
True if all elements are strings, False otherwise.
"""
return all(isinstance(item, str) for item in value)
def _is_string_value(value: str | CacheHandler) -> TypeGuard[str]:
"""Type guard to check if value is a string rather than a CacheHandler instance.
Args:
value: Value that may be a string or CacheHandler instance.
Returns:
True if value is a string, False otherwise.
"""
return isinstance(value, str)
class CrewBaseMeta(type):
"""Metaclass that adds crew functionality to classes."""
def __new__(
mcs,
name: str,
bases: tuple[type, ...],
namespace: dict[str, Any],
**kwargs: Any,
) -> type[CrewClass]:
"""Create crew class with configuration and method injection.
Args:
name: Class name.
bases: Base classes.
namespace: Class namespace dictionary.
**kwargs: Additional keyword arguments.
Returns:
New crew class with injected methods and attributes.
"""
cls = cast(
type[CrewClass], cast(object, super().__new__(mcs, name, bases, namespace))
)
cls.is_crew_class = True
cls._crew_name = name
for setup_fn in _CLASS_SETUP_FUNCTIONS:
setup_fn(cls)
for method in _METHODS_TO_INJECT:
setattr(cls, method.__name__, method)
return cls
def __call__(cls, *args: Any, **kwargs: Any) -> CrewInstance:
"""Intercept instance creation to initialize crew functionality.
Args:
*args: Positional arguments for instance creation.
**kwargs: Keyword arguments for instance creation.
Returns:
Initialized crew instance.
"""
instance: CrewInstance = super().__call__(*args, **kwargs)
CrewBaseMeta._initialize_crew_instance(instance, cls)
return instance
@staticmethod
def _initialize_crew_instance(instance: CrewInstance, cls: type) -> None:
"""Initialize crew instance attributes and load configurations.
Args:
instance: Crew instance to initialize.
cls: Crew class type.
"""
instance._mcp_server_adapter = None
instance.load_configurations()
instance._all_methods = _get_all_methods(instance)
instance.map_all_agent_variables()
instance.map_all_task_variables()
for hook in _post_initialize_crew_hooks:
hook(instance)
original_methods = {
name: method
for name, method in cls.__dict__.items()
if any(
hasattr(method, attr)
for attr in [
"is_task",
"is_agent",
"is_before_kickoff",
"is_after_kickoff",
"is_kickoff",
]
)
}
after_kickoff_callbacks = _filter_methods(original_methods, "is_after_kickoff")
after_kickoff_callbacks["close_mcp_server"] = instance.close_mcp_server
instance.__crew_metadata__ = CrewMetadata(
original_methods=original_methods,
original_tasks=_filter_methods(original_methods, "is_task"),
original_agents=_filter_methods(original_methods, "is_agent"),
before_kickoff=_filter_methods(original_methods, "is_before_kickoff"),
after_kickoff=after_kickoff_callbacks,
kickoff=_filter_methods(original_methods, "is_kickoff"),
)
_register_crew_hooks(instance, cls)
def close_mcp_server(
self: CrewInstance, _instance: CrewInstance, outputs: CrewOutput
) -> CrewOutput:
"""Stop MCP server adapter and return outputs.
Args:
self: Crew instance with MCP server adapter.
_instance: Crew instance (unused, required by callback signature).
outputs: Crew execution outputs.
Returns:
Unmodified crew outputs.
"""
if self._mcp_server_adapter is not None:
try:
self._mcp_server_adapter.stop()
except Exception as e:
logging.warning(f"Error stopping MCP server: {e}")
return outputs
def get_mcp_tools(self: CrewInstance, *tool_names: str) -> list[BaseTool]:
"""Get MCP tools filtered by name.
Args:
self: Crew instance with MCP server configuration.
*tool_names: Optional tool names to filter by.
Returns:
List of filtered MCP tools, or empty list if no MCP server configured.
"""
if not self.mcp_server_params:
return []
from crewai_tools import MCPServerAdapter
if self._mcp_server_adapter is None:
self._mcp_server_adapter = MCPServerAdapter(
self.mcp_server_params, connect_timeout=self.mcp_connect_timeout
)
return cast(
list[BaseTool],
self._mcp_server_adapter.tools.filter_by_names(tool_names or None),
)
def _load_config(
self: CrewInstance, config_path: str | None, config_type: Literal["agent", "task"]
) -> dict[str, Any]:
"""Load YAML config file or return empty dict if not found.
Args:
self: Crew instance with base directory and load_yaml method.
config_path: Relative path to config file.
config_type: Config type for logging, either "agent" or "task".
Returns:
Config dictionary or empty dict.
"""
if isinstance(config_path, str):
full_path = self.base_directory / config_path
try:
return self.load_yaml(full_path)
except FileNotFoundError:
logging.warning(
f"{config_type.capitalize()} config file not found at {full_path}. "
f"Proceeding with empty {config_type} configurations."
)
return {}
else:
logging.warning(
f"No {config_type} configuration path provided. "
f"Proceeding with empty {config_type} configurations."
)
return {}
def load_configurations(self: CrewInstance) -> None:
"""Load agent and task YAML configurations.
Args:
self: Crew instance with configuration paths.
"""
self.agents_config = self._load_config(self.original_agents_config_path, "agent")
self.tasks_config = self._load_config(self.original_tasks_config_path, "task")
def load_yaml(config_path: Path) -> dict[str, Any]:
"""Load and parse YAML configuration file.
Args:
config_path: Path to YAML configuration file.
Returns:
Parsed YAML content as a dictionary. Returns empty dict if file is empty.
Raises:
FileNotFoundError: If config file does not exist.
"""
try:
with open(config_path, encoding="utf-8") as file:
content = yaml.safe_load(file)
return content if isinstance(content, dict) else {}
except FileNotFoundError:
logging.warning(f"File not found: {config_path}")
raise
def _get_all_methods(self: CrewInstance) -> dict[str, Callable[..., Any]]:
"""Return all non-dunder callable attributes (methods).
Args:
self: Instance to inspect for callable attributes.
Returns:
Dictionary mapping method names to bound method objects.
"""
return {
name: getattr(self, name)
for name in dir(self)
if not (name.startswith("__") and name.endswith("__"))
and callable(getattr(self, name, None))
}
def _filter_methods(
methods: dict[str, CallableT], attribute: str
) -> dict[str, CallableT]:
"""Filter methods by attribute presence, preserving exact callable types.
Args:
methods: Dictionary of methods to filter.
attribute: Attribute name to check for.
Returns:
Dictionary containing only methods with the specified attribute.
The return type matches the input callable type exactly.
"""
return {
name: method for name, method in methods.items() if hasattr(method, attribute)
}
def _register_crew_hooks(instance: CrewInstance, cls: type) -> None:
"""Detect and register crew-scoped hook methods.
Args:
instance: Crew instance to register hooks for.
cls: Crew class type.
"""
hook_methods = {
name: method
for name, method in cls.__dict__.items()
if any(
hasattr(method, attr)
for attr in [
"is_before_llm_call_hook",
"is_after_llm_call_hook",
"is_before_tool_call_hook",
"is_after_tool_call_hook",
]
)
}
if not hook_methods:
return
from crewai.hooks import (
register_after_llm_call_hook,
register_after_tool_call_hook,
register_before_llm_call_hook,
register_before_tool_call_hook,
)
instance._registered_hook_functions = []
instance._hooks_being_registered = True
for hook_method in hook_methods.values():
bound_hook = hook_method.__get__(instance, cls)
has_tool_filter = hasattr(hook_method, "_filter_tools")
has_agent_filter = hasattr(hook_method, "_filter_agents")
if hasattr(hook_method, "is_before_llm_call_hook"):
if has_agent_filter:
agents_filter = hook_method._filter_agents
def make_filtered_before_llm(
bound_fn: Callable[[LLMCallHookContext], bool | None],
agents_list: list[str],
) -> Callable[[LLMCallHookContext], bool | None]:
def filtered(context: LLMCallHookContext) -> bool | None:
if context.agent and context.agent.role not in agents_list:
return None
return bound_fn(context)
return filtered
before_llm_hook = make_filtered_before_llm(bound_hook, agents_filter)
else:
before_llm_hook = bound_hook
register_before_llm_call_hook(before_llm_hook)
instance._registered_hook_functions.append(
("before_llm_call", before_llm_hook)
)
if hasattr(hook_method, "is_after_llm_call_hook"):
if has_agent_filter:
agents_filter = hook_method._filter_agents
def make_filtered_after_llm(
bound_fn: Callable[[LLMCallHookContext], str | None],
agents_list: list[str],
) -> Callable[[LLMCallHookContext], str | None]:
def filtered(context: LLMCallHookContext) -> str | None:
if context.agent and context.agent.role not in agents_list:
return None
return bound_fn(context)
return filtered
after_llm_hook = make_filtered_after_llm(bound_hook, agents_filter)
else:
after_llm_hook = bound_hook
register_after_llm_call_hook(after_llm_hook)
instance._registered_hook_functions.append(
("after_llm_call", after_llm_hook)
)
if hasattr(hook_method, "is_before_tool_call_hook"):
if has_tool_filter or has_agent_filter:
tools_filter = getattr(hook_method, "_filter_tools", None)
agents_filter = getattr(hook_method, "_filter_agents", None)
def make_filtered_before_tool(
bound_fn: Callable[[ToolCallHookContext], bool | None],
tools_list: list[str] | None,
agents_list: list[str] | None,
) -> Callable[[ToolCallHookContext], bool | None]:
def filtered(context: ToolCallHookContext) -> bool | None:
if tools_list and context.tool_name not in tools_list:
return None
if (
agents_list
and context.agent
and context.agent.role not in agents_list
):
return None
return bound_fn(context)
return filtered
before_tool_hook = make_filtered_before_tool(
bound_hook, tools_filter, agents_filter
)
else:
before_tool_hook = bound_hook
register_before_tool_call_hook(before_tool_hook)
instance._registered_hook_functions.append(
("before_tool_call", before_tool_hook)
)
if hasattr(hook_method, "is_after_tool_call_hook"):
if has_tool_filter or has_agent_filter:
tools_filter = getattr(hook_method, "_filter_tools", None)
agents_filter = getattr(hook_method, "_filter_agents", None)
def make_filtered_after_tool(
bound_fn: Callable[[ToolCallHookContext], str | None],
tools_list: list[str] | None,
agents_list: list[str] | None,
) -> Callable[[ToolCallHookContext], str | None]:
def filtered(context: ToolCallHookContext) -> str | None:
if tools_list and context.tool_name not in tools_list:
return None
if (
agents_list
and context.agent
and context.agent.role not in agents_list
):
return None
return bound_fn(context)
return filtered
after_tool_hook = make_filtered_after_tool(
bound_hook, tools_filter, agents_filter
)
else:
after_tool_hook = bound_hook
register_after_tool_call_hook(after_tool_hook)
instance._registered_hook_functions.append(
("after_tool_call", after_tool_hook)
)
instance._hooks_being_registered = False
def map_all_agent_variables(self: CrewInstance) -> None:
"""Map agent configuration variables to callable instances.
Args:
self: Crew instance with agent configurations to map.
"""
llms = _filter_methods(self._all_methods, "is_llm")
tool_functions = _filter_methods(self._all_methods, "is_tool")
cache_handler_functions = _filter_methods(self._all_methods, "is_cache_handler")
callbacks = _filter_methods(self._all_methods, "is_callback")
for agent_name, agent_info in self.agents_config.items():
self._map_agent_variables(
agent_name=agent_name,
agent_info=agent_info,
llms=llms,
tool_functions=tool_functions,
cache_handler_functions=cache_handler_functions,
callbacks=callbacks,
)
def _map_agent_variables(
self: CrewInstance,
agent_name: str,
agent_info: AgentConfig,
llms: dict[str, Callable[[], Any]],
tool_functions: dict[str, Callable[[], BaseTool]],
cache_handler_functions: dict[str, Callable[[], Any]],
callbacks: dict[str, Callable[..., Any]],
) -> None:
"""Resolve and map variables for a single agent.
Args:
self: Crew instance with agent configurations.
agent_name: Name of agent to configure.
agent_info: Agent configuration dictionary with optional fields.
llms: Dictionary mapping names to LLM factory functions.
tool_functions: Dictionary mapping names to tool factory functions.
cache_handler_functions: Dictionary mapping names to cache handler factory functions.
callbacks: Dictionary of available callbacks.
"""
if llm := agent_info.get("llm"):
factory = llms.get(llm)
self.agents_config[agent_name]["llm"] = factory() if factory else llm
if tools := agent_info.get("tools"):
if _is_string_list(tools):
self.agents_config[agent_name]["tools"] = [
tool_functions[tool]() for tool in tools
]
if function_calling_llm := agent_info.get("function_calling_llm"):
factory = llms.get(function_calling_llm)
self.agents_config[agent_name]["function_calling_llm"] = (
factory() if factory else function_calling_llm
)
if step_callback := agent_info.get("step_callback"):
self.agents_config[agent_name]["step_callback"] = callbacks[step_callback]()
if cache_handler := agent_info.get("cache_handler"):
if _is_string_value(cache_handler):
self.agents_config[agent_name]["cache_handler"] = cache_handler_functions[
cache_handler
]()
def map_all_task_variables(self: CrewInstance) -> None:
"""Map task configuration variables to callable instances.
Args:
self: Crew instance with task configurations to map.
"""
agents = _filter_methods(self._all_methods, "is_agent")
tasks = _filter_methods(self._all_methods, "is_task")
output_json_functions = _filter_methods(self._all_methods, "is_output_json")
tool_functions = _filter_methods(self._all_methods, "is_tool")
callback_functions = _filter_methods(self._all_methods, "is_callback")
output_pydantic_functions = _filter_methods(self._all_methods, "is_output_pydantic")
for task_name, task_info in self.tasks_config.items():
self._map_task_variables(
task_name=task_name,
task_info=task_info,
agents=agents,
tasks=tasks,
output_json_functions=output_json_functions,
tool_functions=tool_functions,
callback_functions=callback_functions,
output_pydantic_functions=output_pydantic_functions,
)
def _map_task_variables(
self: CrewInstance,
task_name: str,
task_info: TaskConfig,
agents: dict[str, Callable[[], Agent]],
tasks: dict[str, Callable[[], Task]],
output_json_functions: dict[str, OutputJsonClass[Any]],
tool_functions: dict[str, Callable[[], BaseTool]],
callback_functions: dict[str, Callable[..., Any]],
output_pydantic_functions: dict[str, OutputPydanticClass[Any]],
) -> None:
"""Resolve and map variables for a single task.
Args:
self: Crew instance with task configurations.
task_name: Name of task to configure.
task_info: Task configuration dictionary with optional fields.
agents: Dictionary mapping names to agent factory functions.
tasks: Dictionary mapping names to task factory functions.
output_json_functions: Dictionary of JSON output class wrappers.
tool_functions: Dictionary mapping names to tool factory functions.
callback_functions: Dictionary of available callbacks.
output_pydantic_functions: Dictionary of Pydantic output class wrappers.
"""
if context_list := task_info.get("context"):
self.tasks_config[task_name]["context"] = [
tasks[context_task_name]() for context_task_name in context_list
]
if tools := task_info.get("tools"):
if _is_string_list(tools):
self.tasks_config[task_name]["tools"] = [
tool_functions[tool]() for tool in tools
]
if agent_name := task_info.get("agent"):
self.tasks_config[task_name]["agent"] = agents[agent_name]()
if output_json := task_info.get("output_json"):
self.tasks_config[task_name]["output_json"] = output_json_functions[output_json]
if output_pydantic := task_info.get("output_pydantic"):
self.tasks_config[task_name]["output_pydantic"] = output_pydantic_functions[
output_pydantic
]
if callbacks := task_info.get("callbacks"):
self.tasks_config[task_name]["callbacks"] = [
callback_functions[callback]() for callback in callbacks
]
if guardrail := task_info.get("guardrail"):
self.tasks_config[task_name]["guardrail"] = guardrail
_CLASS_SETUP_FUNCTIONS: tuple[Callable[[type[CrewClass]], None], ...] = (
_set_base_directory,
_set_config_paths,
_set_mcp_params,
)
_METHODS_TO_INJECT = (
close_mcp_server,
get_mcp_tools,
_load_config,
load_configurations,
staticmethod(load_yaml),
map_all_agent_variables,
_map_agent_variables,
map_all_task_variables,
_map_task_variables,
)
class _CrewBaseType(type):
"""Metaclass for CrewBase that makes it callable as a decorator."""
def __call__(cls, decorated_cls: type) -> type[CrewClass]:
"""Apply CrewBaseMeta to the decorated class.
Args:
decorated_cls: Class to transform with CrewBaseMeta metaclass.
Returns:
New class with CrewBaseMeta metaclass applied.
"""
__name = str(decorated_cls.__name__)
__bases = tuple(decorated_cls.__bases__)
__dict = {
key: value
for key, value in decorated_cls.__dict__.items()
if key not in ("__dict__", "__weakref__")
}
for slot in __dict.get("__slots__", tuple()):
__dict.pop(slot, None)
__dict["__metaclass__"] = CrewBaseMeta
return cast(type[CrewClass], CrewBaseMeta(__name, __bases, __dict))
class CrewBase(metaclass=_CrewBaseType):
"""Class decorator that applies CrewBaseMeta metaclass.
Applies CrewBaseMeta metaclass to a class via decorator syntax rather than
explicit metaclass declaration. Use as @CrewBase instead of
class Foo(metaclass=CrewBaseMeta).
Note:
Reference: https://stackoverflow.com/questions/11091609/setting-a-class-metaclass-using-a-decorator
"""
# e
if TYPE_CHECKING:
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Type stub for decorator usage.
Args:
decorated_cls: Class to transform with CrewBaseMeta metaclass.
Returns:
New class with CrewBaseMeta metaclass applied.
"""
...
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/project/crew_base.py",
"license": "MIT License",
"lines": 657,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai/src/crewai/project/utils.py | """Utility functions for the crewai project module."""
from collections.abc import Callable, Coroutine
from functools import wraps
import inspect
from typing import Any, ParamSpec, TypeVar, cast
from pydantic import BaseModel
from crewai.agents.cache.cache_handler import CacheHandler
P = ParamSpec("P")
R = TypeVar("R")
cache = CacheHandler()
def _make_hashable(arg: Any) -> Any:
"""Convert argument to hashable form for caching.
Args:
arg: The argument to convert.
Returns:
Hashable representation of the argument.
"""
if isinstance(arg, BaseModel):
return arg.model_dump_json()
if isinstance(arg, dict):
return tuple(sorted((k, _make_hashable(v)) for k, v in arg.items()))
if isinstance(arg, list):
return tuple(_make_hashable(item) for item in arg)
if hasattr(arg, "__dict__"):
return ("__instance__", id(arg))
return arg
def memoize(meth: Callable[P, R]) -> Callable[P, R]:
"""Memoize a method by caching its results based on arguments.
Handles both sync and async methods. Pydantic BaseModel instances are
converted to JSON strings before hashing for cache lookup.
Args:
meth: The method to memoize.
Returns:
A memoized version of the method that caches results.
"""
if inspect.iscoroutinefunction(meth):
return cast(Callable[P, R], _memoize_async(meth))
return _memoize_sync(meth)
def _memoize_sync(meth: Callable[P, R]) -> Callable[P, R]:
"""Memoize a synchronous method."""
@wraps(meth)
def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
hashable_args = tuple(_make_hashable(arg) for arg in args)
hashable_kwargs = tuple(
sorted((k, _make_hashable(v)) for k, v in kwargs.items())
)
cache_key = str((hashable_args, hashable_kwargs))
cached_result: R | None = cache.read(tool=meth.__name__, input=cache_key)
if cached_result is not None:
return cached_result
result = meth(*args, **kwargs)
cache.add(tool=meth.__name__, input=cache_key, output=result)
return result
return cast(Callable[P, R], wrapper)
def _memoize_async(
meth: Callable[P, Coroutine[Any, Any, R]],
) -> Callable[P, Coroutine[Any, Any, R]]:
"""Memoize an async method."""
@wraps(meth)
async def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
hashable_args = tuple(_make_hashable(arg) for arg in args)
hashable_kwargs = tuple(
sorted((k, _make_hashable(v)) for k, v in kwargs.items())
)
cache_key = str((hashable_args, hashable_kwargs))
cached_result: R | None = cache.read(tool=meth.__name__, input=cache_key)
if cached_result is not None:
return cached_result
result = await meth(*args, **kwargs)
cache.add(tool=meth.__name__, input=cache_key, output=result)
return result
return wrapper
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/project/utils.py",
"license": "MIT License",
"lines": 72,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai/src/crewai/project/wrappers.py | """Wrapper classes for decorated methods with type-safe metadata."""
from __future__ import annotations
import asyncio
from collections.abc import Callable
from functools import partial
import inspect
from pathlib import Path
from typing import (
TYPE_CHECKING,
Any,
Generic,
Literal,
ParamSpec,
Protocol,
TypeVar,
TypedDict,
)
from typing_extensions import Self
if TYPE_CHECKING:
from crewai import Agent, Crew, Task
from crewai.crews.crew_output import CrewOutput
from crewai.tools import BaseTool
class CrewMetadata(TypedDict):
"""Type definition for crew metadata dictionary.
Stores framework-injected metadata about decorated methods and callbacks.
"""
original_methods: dict[str, Callable[..., Any]]
original_tasks: dict[str, Callable[..., Task]]
original_agents: dict[str, Callable[..., Agent]]
before_kickoff: dict[str, Callable[..., Any]]
after_kickoff: dict[str, Callable[..., Any]]
kickoff: dict[str, Callable[..., Any]]
P = ParamSpec("P")
R = TypeVar("R")
T = TypeVar("T")
class TaskResult(Protocol):
"""Protocol for task objects that have a name attribute."""
name: str | None
TaskResultT = TypeVar("TaskResultT", bound=TaskResult)
def _copy_method_metadata(wrapper: Any, meth: Callable[..., Any]) -> None:
"""Copy method metadata to a wrapper object.
Args:
wrapper: The wrapper object to update.
meth: The method to copy metadata from.
"""
wrapper.__name__ = meth.__name__
wrapper.__doc__ = meth.__doc__
class CrewInstance(Protocol):
"""Protocol for crew class instances with required attributes."""
__crew_metadata__: CrewMetadata
_mcp_server_adapter: Any
_all_methods: dict[str, Callable[..., Any]]
_registered_hook_functions: list[tuple[str, Callable[..., Any]]]
_hooks_being_registered: bool
agents: list[Agent]
tasks: list[Task]
base_directory: Path
original_agents_config_path: str
original_tasks_config_path: str
agents_config: dict[str, Any]
tasks_config: dict[str, Any]
mcp_server_params: Any
mcp_connect_timeout: int
def load_configurations(self) -> None: ...
def map_all_agent_variables(self) -> None: ...
def map_all_task_variables(self) -> None: ...
def close_mcp_server(self, instance: Self, outputs: CrewOutput) -> CrewOutput: ...
def _load_config(
self, config_path: str | None, config_type: Literal["agent", "task"]
) -> dict[str, Any]: ...
def _map_agent_variables(
self,
agent_name: str,
agent_info: dict[str, Any],
llms: dict[str, Callable[..., Any]],
tool_functions: dict[str, Callable[..., Any]],
cache_handler_functions: dict[str, Callable[..., Any]],
callbacks: dict[str, Callable[..., Any]],
) -> None: ...
def _map_task_variables(
self,
task_name: str,
task_info: dict[str, Any],
agents: dict[str, Callable[..., Any]],
tasks: dict[str, Callable[..., Any]],
output_json_functions: dict[str, Callable[..., Any]],
tool_functions: dict[str, Callable[..., Any]],
callback_functions: dict[str, Callable[..., Any]],
output_pydantic_functions: dict[str, Callable[..., Any]],
) -> None: ...
def load_yaml(self, config_path: Path) -> dict[str, Any]: ...
class CrewClass(Protocol):
"""Protocol describing class attributes injected by CrewBaseMeta."""
is_crew_class: bool
_crew_name: str
base_directory: Path
original_agents_config_path: str
original_tasks_config_path: str
mcp_server_params: Any
mcp_connect_timeout: int
close_mcp_server: Callable[..., Any]
get_mcp_tools: Callable[..., list[BaseTool]]
_load_config: Callable[..., dict[str, Any]]
load_configurations: Callable[..., None]
load_yaml: Callable[..., dict[str, Any]]
map_all_agent_variables: Callable[..., None]
_map_agent_variables: Callable[..., None]
map_all_task_variables: Callable[..., None]
_map_task_variables: Callable[..., None]
crew: Callable[..., Crew]
def _resolve_result(result: Any) -> Any:
"""Resolve a potentially async result to its value."""
if inspect.iscoroutine(result):
try:
loop = asyncio.get_running_loop()
except RuntimeError:
loop = None
if loop and loop.is_running():
import concurrent.futures
with concurrent.futures.ThreadPoolExecutor() as pool:
return pool.submit(asyncio.run, result).result()
return asyncio.run(result)
return result
class DecoratedMethod(Generic[P, R]):
"""Base wrapper for methods with decorator metadata.
This class provides a type-safe way to add metadata to methods
while preserving their callable signature and attributes.
"""
def __init__(self, meth: Callable[P, R]) -> None:
"""Initialize the decorated method wrapper.
Args:
meth: The method to wrap.
"""
self._meth = meth
_copy_method_metadata(self, meth)
def __get__(
self, obj: Any, objtype: type[Any] | None = None
) -> Self | Callable[..., R]:
"""Support instance methods by implementing the descriptor protocol.
Args:
obj: The instance that the method is accessed through.
objtype: The type of the instance.
Returns:
Self when accessed through class, bound method when accessed through instance.
"""
if obj is None:
return self
inner = partial(self._meth, obj)
def _bound(*args: Any, **kwargs: Any) -> R:
result: R = _resolve_result(inner(*args, **kwargs)) # type: ignore[call-arg]
return result
for attr in (
"is_agent",
"is_llm",
"is_tool",
"is_callback",
"is_cache_handler",
"is_before_kickoff",
"is_after_kickoff",
"is_crew",
):
if hasattr(self, attr):
setattr(_bound, attr, getattr(self, attr))
return _bound
def __call__(self, *args: P.args, **kwargs: P.kwargs) -> R:
"""Call the wrapped method.
Args:
*args: Positional arguments.
**kwargs: Keyword arguments.
Returns:
The result of calling the wrapped method.
"""
return self._meth(*args, **kwargs)
def unwrap(self) -> Callable[P, R]:
"""Get the original unwrapped method.
Returns:
The original method before decoration.
"""
return self._meth
class BeforeKickoffMethod(DecoratedMethod[P, R]):
"""Wrapper for methods marked to execute before crew kickoff."""
is_before_kickoff: bool = True
class AfterKickoffMethod(DecoratedMethod[P, R]):
"""Wrapper for methods marked to execute after crew kickoff."""
is_after_kickoff: bool = True
class BoundTaskMethod(Generic[TaskResultT]):
"""Bound task method with task marker attribute."""
is_task: bool = True
def __init__(self, task_method: TaskMethod[Any, TaskResultT], obj: Any) -> None:
"""Initialize the bound task method.
Args:
task_method: The TaskMethod descriptor instance.
obj: The instance to bind to.
"""
self._task_method = task_method
self._obj = obj
def __call__(self, *args: Any, **kwargs: Any) -> TaskResultT:
"""Execute the bound task method.
Args:
*args: Positional arguments.
**kwargs: Keyword arguments.
Returns:
The task result with name ensured.
"""
result = self._task_method.unwrap()(self._obj, *args, **kwargs)
result = _resolve_result(result)
return self._task_method.ensure_task_name(result)
class TaskMethod(Generic[P, TaskResultT]):
"""Wrapper for methods marked as crew tasks."""
is_task: bool = True
def __init__(self, meth: Callable[P, TaskResultT]) -> None:
"""Initialize the task method wrapper.
Args:
meth: The method to wrap.
"""
self._meth = meth
_copy_method_metadata(self, meth)
def ensure_task_name(self, result: TaskResultT) -> TaskResultT:
"""Ensure task result has a name set.
Args:
result: The task result to check.
Returns:
The task result with name ensured.
"""
if not result.name:
result.name = self._meth.__name__
return result
def __get__(
self, obj: Any, objtype: type[Any] | None = None
) -> Self | BoundTaskMethod[TaskResultT]:
"""Support instance methods by implementing the descriptor protocol.
Args:
obj: The instance that the method is accessed through.
objtype: The type of the instance.
Returns:
Self when accessed through class, bound method when accessed through instance.
"""
if obj is None:
return self
return BoundTaskMethod(self, obj)
def __call__(self, *args: P.args, **kwargs: P.kwargs) -> TaskResultT:
"""Call the wrapped method and set task name if not provided.
Args:
*args: Positional arguments.
**kwargs: Keyword arguments.
Returns:
The task instance with name set if not already provided.
"""
result = self._meth(*args, **kwargs)
result = _resolve_result(result)
return self.ensure_task_name(result)
def unwrap(self) -> Callable[P, TaskResultT]:
"""Get the original unwrapped method.
Returns:
The original method before decoration.
"""
return self._meth
class AgentMethod(DecoratedMethod[P, R]):
"""Wrapper for methods marked as crew agents."""
is_agent: bool = True
class LLMMethod(DecoratedMethod[P, R]):
"""Wrapper for methods marked as LLM providers."""
is_llm: bool = True
class ToolMethod(DecoratedMethod[P, R]):
"""Wrapper for methods marked as crew tools."""
is_tool: bool = True
class CallbackMethod(DecoratedMethod[P, R]):
"""Wrapper for methods marked as crew callbacks."""
is_callback: bool = True
class CacheHandlerMethod(DecoratedMethod[P, R]):
"""Wrapper for methods marked as cache handlers."""
is_cache_handler: bool = True
class CrewMethod(DecoratedMethod[P, R]):
"""Wrapper for methods marked as the main crew execution point."""
is_crew: bool = True
class OutputClass(Generic[T]):
"""Base wrapper for classes marked as output format."""
def __init__(self, cls: type[T]) -> None:
"""Initialize the output class wrapper.
Args:
cls: The class to wrap.
"""
self._cls = cls
self.__name__ = cls.__name__
self.__qualname__ = cls.__qualname__
self.__module__ = cls.__module__
self.__doc__ = cls.__doc__
def __call__(self, *args: Any, **kwargs: Any) -> T:
"""Create an instance of the wrapped class.
Args:
*args: Positional arguments for the class constructor.
**kwargs: Keyword arguments for the class constructor.
Returns:
An instance of the wrapped class.
"""
return self._cls(*args, **kwargs)
def __getattr__(self, name: str) -> Any:
"""Delegate attribute access to the wrapped class.
Args:
name: The attribute name.
Returns:
The attribute from the wrapped class.
"""
return getattr(self._cls, name)
class OutputJsonClass(OutputClass[T]):
"""Wrapper for classes marked as JSON output format."""
is_output_json: bool = True
class OutputPydanticClass(OutputClass[T]):
"""Wrapper for classes marked as Pydantic output format."""
is_output_pydantic: bool = True
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/project/wrappers.py",
"license": "MIT License",
"lines": 310,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai/src/crewai/tools/mcp_tool_wrapper.py | """MCP Tool Wrapper for on-demand MCP server connections."""
import asyncio
from crewai.tools import BaseTool
MCP_CONNECTION_TIMEOUT = 15
MCP_TOOL_EXECUTION_TIMEOUT = 60
MCP_DISCOVERY_TIMEOUT = 15
MCP_MAX_RETRIES = 3
class MCPToolWrapper(BaseTool):
"""Lightweight wrapper for MCP tools that connects on-demand."""
def __init__(
self,
mcp_server_params: dict,
tool_name: str,
tool_schema: dict,
server_name: str,
):
"""Initialize the MCP tool wrapper.
Args:
mcp_server_params: Parameters for connecting to the MCP server
tool_name: Original name of the tool on the MCP server
tool_schema: Schema information for the tool
server_name: Name of the MCP server for prefixing
"""
# Create tool name with server prefix to avoid conflicts
prefixed_name = f"{server_name}_{tool_name}"
# Handle args_schema properly - BaseTool expects a BaseModel subclass
args_schema = tool_schema.get("args_schema")
# Only pass args_schema if it's provided
kwargs = {
"name": prefixed_name,
"description": tool_schema.get(
"description", f"Tool {tool_name} from {server_name}"
),
}
if args_schema is not None:
kwargs["args_schema"] = args_schema
super().__init__(**kwargs)
# Set instance attributes after super().__init__
self._mcp_server_params = mcp_server_params
self._original_tool_name = tool_name
self._server_name = server_name
@property
def mcp_server_params(self) -> dict:
"""Get the MCP server parameters."""
return self._mcp_server_params
@property
def original_tool_name(self) -> str:
"""Get the original tool name."""
return self._original_tool_name
@property
def server_name(self) -> str:
"""Get the server name."""
return self._server_name
def _run(self, **kwargs) -> str:
"""Connect to MCP server and execute tool.
Args:
**kwargs: Arguments to pass to the MCP tool
Returns:
Result from the MCP tool execution
"""
try:
return asyncio.run(self._run_async(**kwargs))
except asyncio.TimeoutError:
return f"MCP tool '{self.original_tool_name}' timed out after {MCP_TOOL_EXECUTION_TIMEOUT} seconds"
except Exception as e:
return f"Error executing MCP tool {self.original_tool_name}: {e!s}"
async def _run_async(self, **kwargs) -> str:
"""Async implementation of MCP tool execution with timeouts and retry logic."""
return await self._retry_with_exponential_backoff(
self._execute_tool_with_timeout, **kwargs
)
async def _retry_with_exponential_backoff(self, operation_func, **kwargs) -> str:
"""Retry operation with exponential backoff, avoiding try-except in loop for performance."""
last_error = None
for attempt in range(MCP_MAX_RETRIES):
# Execute single attempt outside try-except loop structure
result, error, should_retry = await self._execute_single_attempt(
operation_func, **kwargs
)
# Success case - return immediately
if result is not None:
return result
# Non-retryable error - return immediately
if not should_retry:
return error
# Retryable error - continue with backoff
last_error = error
if attempt < MCP_MAX_RETRIES - 1:
wait_time = 2**attempt # Exponential backoff
await asyncio.sleep(wait_time)
return (
f"MCP tool execution failed after {MCP_MAX_RETRIES} attempts: {last_error}"
)
async def _execute_single_attempt(
self, operation_func, **kwargs
) -> tuple[str | None, str, bool]:
"""Execute single operation attempt and return (result, error_message, should_retry)."""
try:
result = await operation_func(**kwargs)
return result, "", False
except ImportError:
return (
None,
"MCP library not available. Please install with: pip install mcp",
False,
)
except asyncio.TimeoutError:
return (
None,
f"Connection timed out after {MCP_TOOL_EXECUTION_TIMEOUT} seconds",
True,
)
except Exception as e:
error_str = str(e).lower()
# Classify errors as retryable or non-retryable
if "authentication" in error_str or "unauthorized" in error_str:
return None, f"Authentication failed for MCP server: {e!s}", False
if "not found" in error_str:
return (
None,
f"Tool '{self.original_tool_name}' not found on MCP server",
False,
)
if "connection" in error_str or "network" in error_str:
return None, f"Network connection failed: {e!s}", True
if "json" in error_str or "parsing" in error_str:
return None, f"Server response parsing error: {e!s}", True
return None, f"MCP execution error: {e!s}", False
async def _execute_tool_with_timeout(self, **kwargs) -> str:
"""Execute tool with timeout wrapper."""
return await asyncio.wait_for(
self._execute_tool(**kwargs), timeout=MCP_TOOL_EXECUTION_TIMEOUT
)
async def _execute_tool(self, **kwargs) -> str:
"""Execute the actual MCP tool call."""
from mcp import ClientSession
from mcp.client.streamable_http import streamablehttp_client
server_url = self.mcp_server_params["url"]
try:
# Wrap entire operation with single timeout
async def _do_mcp_call():
async with streamablehttp_client(
server_url, terminate_on_close=True
) as (read, write, _):
async with ClientSession(read, write) as session:
await session.initialize()
result = await session.call_tool(
self.original_tool_name, kwargs
)
# Extract the result content
if hasattr(result, "content") and result.content:
if (
isinstance(result.content, list)
and len(result.content) > 0
):
content_item = result.content[0]
if hasattr(content_item, "text"):
return str(content_item.text)
return str(content_item)
return str(result.content)
return str(result)
return await asyncio.wait_for(
_do_mcp_call(), timeout=MCP_TOOL_EXECUTION_TIMEOUT
)
except asyncio.CancelledError as e:
raise asyncio.TimeoutError("MCP operation was cancelled") from e
except Exception as e:
if hasattr(e, "__cause__") and e.__cause__:
raise asyncio.TimeoutError(
f"MCP connection error: {e.__cause__}"
) from e.__cause__
if "TaskGroup" in str(e) or "unhandled errors" in str(e):
raise asyncio.TimeoutError(f"MCP connection error: {e}") from e
raise
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/tools/mcp_tool_wrapper.py",
"license": "MIT License",
"lines": 174,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai/src/crewai/utilities/guardrail_types.py | """Type aliases for guardrails."""
from __future__ import annotations
from collections.abc import Callable, Sequence
from typing import Any, TypeAlias
from crewai.lite_agent_output import LiteAgentOutput
from crewai.tasks.task_output import TaskOutput
GuardrailCallable: TypeAlias = Callable[
[TaskOutput | LiteAgentOutput], tuple[bool, Any]
]
GuardrailType: TypeAlias = GuardrailCallable | str
GuardrailsType: TypeAlias = Sequence[GuardrailType] | GuardrailType
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/utilities/guardrail_types.py",
"license": "MIT License",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai/src/crewai/utilities/rw_lock.py | """Read-write lock for thread-safe concurrent access.
This module provides a reader-writer lock implementation that allows multiple
concurrent readers or a single exclusive writer.
"""
from collections.abc import Generator
from contextlib import contextmanager
from threading import Condition
class RWLock:
"""Read-write lock for managing concurrent read and exclusive write access.
Allows multiple threads to acquire read locks simultaneously, but ensures
exclusive access for write operations. Writers are prioritized when waiting.
Attributes:
_cond: Condition variable for coordinating lock access
_readers: Count of active readers
_writer: Whether a writer currently holds the lock
"""
def __init__(self) -> None:
"""Initialize the read-write lock."""
self._cond = Condition()
self._readers = 0
self._writer = False
def r_acquire(self) -> None:
"""Acquire a read lock, blocking if a writer holds the lock."""
with self._cond:
while self._writer:
self._cond.wait()
self._readers += 1
def r_release(self) -> None:
"""Release a read lock and notify waiting writers if last reader."""
with self._cond:
self._readers -= 1
if self._readers == 0:
self._cond.notify_all()
@contextmanager
def r_locked(self) -> Generator[None, None, None]:
"""Context manager for acquiring a read lock.
Yields:
None
"""
try:
self.r_acquire()
yield
finally:
self.r_release()
def w_acquire(self) -> None:
"""Acquire a write lock, blocking if any readers or writers are active."""
with self._cond:
while self._writer or self._readers > 0:
self._cond.wait()
self._writer = True
def w_release(self) -> None:
"""Release a write lock and notify all waiting threads."""
with self._cond:
self._writer = False
self._cond.notify_all()
@contextmanager
def w_locked(self) -> Generator[None, None, None]:
"""Context manager for acquiring a write lock.
Yields:
None
"""
try:
self.w_acquire()
yield
finally:
self.w_release()
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/utilities/rw_lock.py",
"license": "MIT License",
"lines": 66,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai/tests/cli/triggers/test_main.py | import json
import subprocess
import unittest
from unittest.mock import Mock, patch
import httpx
from crewai.cli.triggers.main import TriggersCommand
class TestTriggersCommand(unittest.TestCase):
@patch("crewai.cli.command.get_auth_token")
@patch("crewai.cli.command.PlusAPI")
def setUp(self, mock_plus_api, mock_get_auth_token):
self.mock_get_auth_token = mock_get_auth_token
self.mock_plus_api = mock_plus_api
self.mock_get_auth_token.return_value = "test_token"
self.triggers_command = TriggersCommand()
self.mock_client = self.triggers_command.plus_api_client
@patch("crewai.cli.triggers.main.console.print")
def test_list_triggers_success(self, mock_console_print):
mock_response = Mock(spec=httpx.Response)
mock_response.status_code = 200
mock_response.ok = True
mock_response.json.return_value = {
"apps": [
{
"name": "Test App",
"slug": "test-app",
"description": "A test application",
"is_connected": True,
"triggers": [
{
"name": "Test Trigger",
"slug": "test-trigger",
"description": "A test trigger"
}
]
}
]
}
self.mock_client.get_triggers.return_value = mock_response
self.triggers_command.list_triggers()
self.mock_client.get_triggers.assert_called_once()
mock_console_print.assert_any_call("[bold blue]Fetching available triggers...[/bold blue]")
@patch("crewai.cli.triggers.main.console.print")
def test_list_triggers_no_apps(self, mock_console_print):
mock_response = Mock(spec=httpx.Response)
mock_response.status_code = 200
mock_response.ok = True
mock_response.json.return_value = {"apps": []}
self.mock_client.get_triggers.return_value = mock_response
self.triggers_command.list_triggers()
mock_console_print.assert_any_call("[yellow]No triggers found.[/yellow]")
@patch("crewai.cli.triggers.main.console.print")
def test_list_triggers_api_error(self, mock_console_print):
self.mock_client.get_triggers.side_effect = Exception("API Error")
with self.assertRaises(SystemExit):
self.triggers_command.list_triggers()
mock_console_print.assert_any_call("[bold red]Error fetching triggers: API Error[/bold red]")
@patch("crewai.cli.triggers.main.console.print")
def test_execute_with_trigger_invalid_format(self, mock_console_print):
with self.assertRaises(SystemExit):
self.triggers_command.execute_with_trigger("invalid-format")
mock_console_print.assert_called_with(
"[bold red]Error: Trigger must be in format 'app_slug/trigger_slug'[/bold red]"
)
@patch("crewai.cli.triggers.main.console.print")
@patch.object(TriggersCommand, "_run_crew_with_payload")
def test_execute_with_trigger_success(self, mock_run_crew, mock_console_print):
mock_response = Mock(spec=httpx.Response)
mock_response.status_code = 200
mock_response.ok = True
mock_response.json.return_value = {
"sample_payload": {"key": "value", "data": "test"}
}
self.mock_client.get_trigger_payload.return_value = mock_response
self.triggers_command.execute_with_trigger("test-app/test-trigger")
self.mock_client.get_trigger_payload.assert_called_once_with("test-app", "test-trigger")
mock_run_crew.assert_called_once_with({"key": "value", "data": "test"})
mock_console_print.assert_any_call(
"[bold blue]Fetching trigger payload for test-app/test-trigger...[/bold blue]"
)
@patch("crewai.cli.triggers.main.console.print")
def test_execute_with_trigger_not_found(self, mock_console_print):
mock_response = Mock(spec=httpx.Response)
mock_response.status_code = 404
mock_response.json.return_value = {"error": "Trigger not found"}
self.mock_client.get_trigger_payload.return_value = mock_response
with self.assertRaises(SystemExit):
self.triggers_command.execute_with_trigger("test-app/nonexistent-trigger")
mock_console_print.assert_any_call("[bold red]Error: Trigger not found[/bold red]")
@patch("crewai.cli.triggers.main.console.print")
def test_execute_with_trigger_api_error(self, mock_console_print):
self.mock_client.get_trigger_payload.side_effect = Exception("API Error")
with self.assertRaises(SystemExit):
self.triggers_command.execute_with_trigger("test-app/test-trigger")
mock_console_print.assert_any_call(
"[bold red]Error executing crew with trigger: API Error[/bold red]"
)
@patch("subprocess.run")
def test_run_crew_with_payload_success(self, mock_subprocess):
payload = {"key": "value", "data": "test"}
mock_subprocess.return_value = None
self.triggers_command._run_crew_with_payload(payload)
mock_subprocess.assert_called_once_with(
["uv", "run", "run_with_trigger", json.dumps(payload)],
capture_output=False,
text=True,
check=True
)
@patch("subprocess.run")
def test_run_crew_with_payload_failure(self, mock_subprocess):
payload = {"key": "value"}
mock_subprocess.side_effect = subprocess.CalledProcessError(1, "uv")
with self.assertRaises(SystemExit):
self.triggers_command._run_crew_with_payload(payload)
@patch("subprocess.run")
def test_run_crew_with_payload_empty_payload(self, mock_subprocess):
payload = {}
mock_subprocess.return_value = None
self.triggers_command._run_crew_with_payload(payload)
mock_subprocess.assert_called_once_with(
["uv", "run", "run_with_trigger", "{}"],
capture_output=False,
text=True,
check=True
)
@patch("crewai.cli.triggers.main.console.print")
def test_execute_with_trigger_with_default_error_message(self, mock_console_print):
mock_response = Mock(spec=httpx.Response)
mock_response.status_code = 404
mock_response.json.return_value = {}
self.mock_client.get_trigger_payload.return_value = mock_response
with self.assertRaises(SystemExit):
self.triggers_command.execute_with_trigger("test-app/test-trigger")
mock_console_print.assert_any_call("[bold red]Error: Trigger not found[/bold red]")
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/cli/triggers/test_main.py",
"license": "MIT License",
"lines": 133,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/tests/events/test_depends.py | """Tests for FastAPI-style dependency injection in event handlers."""
import asyncio
import pytest
from crewai.events import Depends, crewai_event_bus
from crewai.events.base_events import BaseEvent
class DependsTestEvent(BaseEvent):
"""Test event for dependency tests."""
value: int = 0
type: str = "test_event"
@pytest.mark.asyncio
async def test_basic_dependency():
"""Test that handler with dependency runs after its dependency."""
execution_order = []
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(DependsTestEvent)
def setup(source, event: DependsTestEvent):
execution_order.append("setup")
@crewai_event_bus.on(DependsTestEvent, Depends(setup))
def process(source, event: DependsTestEvent):
execution_order.append("process")
event = DependsTestEvent(value=1)
future = crewai_event_bus.emit("test_source", event)
if future:
await asyncio.wrap_future(future)
assert execution_order == ["setup", "process"]
@pytest.mark.asyncio
async def test_multiple_dependencies():
"""Test handler with multiple dependencies."""
execution_order = []
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(DependsTestEvent)
def setup_a(source, event: DependsTestEvent):
execution_order.append("setup_a")
@crewai_event_bus.on(DependsTestEvent)
def setup_b(source, event: DependsTestEvent):
execution_order.append("setup_b")
@crewai_event_bus.on(
DependsTestEvent, depends_on=[Depends(setup_a), Depends(setup_b)]
)
def process(source, event: DependsTestEvent):
execution_order.append("process")
event = DependsTestEvent(value=1)
future = crewai_event_bus.emit("test_source", event)
if future:
await asyncio.wrap_future(future)
# setup_a and setup_b can run in any order (same level)
assert "process" in execution_order
assert execution_order.index("process") > execution_order.index("setup_a")
assert execution_order.index("process") > execution_order.index("setup_b")
@pytest.mark.asyncio
async def test_chain_of_dependencies():
"""Test chain of dependencies (A -> B -> C)."""
execution_order = []
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(DependsTestEvent)
def handler_a(source, event: DependsTestEvent):
execution_order.append("handler_a")
@crewai_event_bus.on(DependsTestEvent, depends_on=Depends(handler_a))
def handler_b(source, event: DependsTestEvent):
execution_order.append("handler_b")
@crewai_event_bus.on(DependsTestEvent, depends_on=Depends(handler_b))
def handler_c(source, event: DependsTestEvent):
execution_order.append("handler_c")
event = DependsTestEvent(value=1)
future = crewai_event_bus.emit("test_source", event)
if future:
await asyncio.wrap_future(future)
assert execution_order == ["handler_a", "handler_b", "handler_c"]
@pytest.mark.asyncio
async def test_async_handler_with_dependency():
"""Test async handler with dependency on sync handler."""
execution_order = []
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(DependsTestEvent)
def sync_setup(source, event: DependsTestEvent):
execution_order.append("sync_setup")
@crewai_event_bus.on(DependsTestEvent, depends_on=Depends(sync_setup))
async def async_process(source, event: DependsTestEvent):
await asyncio.sleep(0.01)
execution_order.append("async_process")
event = DependsTestEvent(value=1)
future = crewai_event_bus.emit("test_source", event)
if future:
await asyncio.wrap_future(future)
assert execution_order == ["sync_setup", "async_process"]
@pytest.mark.asyncio
async def test_mixed_handlers_with_dependencies():
"""Test mix of sync and async handlers with dependencies."""
execution_order = []
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(DependsTestEvent)
def setup(source, event: DependsTestEvent):
execution_order.append("setup")
@crewai_event_bus.on(DependsTestEvent, depends_on=Depends(setup))
def sync_process(source, event: DependsTestEvent):
execution_order.append("sync_process")
@crewai_event_bus.on(DependsTestEvent, depends_on=Depends(setup))
async def async_process(source, event: DependsTestEvent):
await asyncio.sleep(0.01)
execution_order.append("async_process")
@crewai_event_bus.on(
DependsTestEvent, depends_on=[Depends(sync_process), Depends(async_process)]
)
def finalize(source, event: DependsTestEvent):
execution_order.append("finalize")
event = DependsTestEvent(value=1)
future = crewai_event_bus.emit("test_source", event)
if future:
await asyncio.wrap_future(future)
# Verify execution order
assert execution_order[0] == "setup"
assert "finalize" in execution_order
assert execution_order.index("finalize") > execution_order.index("sync_process")
assert execution_order.index("finalize") > execution_order.index("async_process")
@pytest.mark.asyncio
async def test_independent_handlers_run_concurrently():
"""Test that handlers without dependencies can run concurrently."""
execution_order = []
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(DependsTestEvent)
async def handler_a(source, event: DependsTestEvent):
await asyncio.sleep(0.01)
execution_order.append("handler_a")
@crewai_event_bus.on(DependsTestEvent)
async def handler_b(source, event: DependsTestEvent):
await asyncio.sleep(0.01)
execution_order.append("handler_b")
event = DependsTestEvent(value=1)
future = crewai_event_bus.emit("test_source", event)
if future:
await asyncio.wrap_future(future)
# Both handlers should have executed
assert len(execution_order) == 2
assert "handler_a" in execution_order
assert "handler_b" in execution_order
@pytest.mark.asyncio
async def test_circular_dependency_detection():
"""Test that circular dependencies are detected and raise an error."""
from crewai.events.handler_graph import CircularDependencyError, build_execution_plan
# Create circular dependency: handler_a -> handler_b -> handler_c -> handler_a
def handler_a(source, event: DependsTestEvent):
pass
def handler_b(source, event: DependsTestEvent):
pass
def handler_c(source, event: DependsTestEvent):
pass
# Build a dependency graph with a cycle
handlers = [handler_a, handler_b, handler_c]
dependencies = {
handler_a: [Depends(handler_b)],
handler_b: [Depends(handler_c)],
handler_c: [Depends(handler_a)], # Creates the cycle
}
# Should raise CircularDependencyError about circular dependency
with pytest.raises(CircularDependencyError, match="Circular dependency"):
build_execution_plan(handlers, dependencies)
@pytest.mark.asyncio
async def test_handler_without_dependency_runs_normally():
"""Test that handlers without dependencies still work as before."""
execution_order = []
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(DependsTestEvent)
def simple_handler(source, event: DependsTestEvent):
execution_order.append("simple_handler")
event = DependsTestEvent(value=1)
future = crewai_event_bus.emit("test_source", event)
if future:
await asyncio.wrap_future(future)
assert execution_order == ["simple_handler"]
@pytest.mark.asyncio
async def test_depends_equality():
"""Test Depends equality and hashing."""
def handler_a(source, event):
pass
def handler_b(source, event):
pass
dep_a1 = Depends(handler_a)
dep_a2 = Depends(handler_a)
dep_b = Depends(handler_b)
# Same handler should be equal
assert dep_a1 == dep_a2
assert hash(dep_a1) == hash(dep_a2)
# Different handlers should not be equal
assert dep_a1 != dep_b
assert hash(dep_a1) != hash(dep_b)
@pytest.mark.asyncio
async def test_aemit_ignores_dependencies():
"""Test that aemit only processes async handlers (no dependency support yet)."""
execution_order = []
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(DependsTestEvent)
def sync_handler(source, event: DependsTestEvent):
execution_order.append("sync_handler")
@crewai_event_bus.on(DependsTestEvent)
async def async_handler(source, event: DependsTestEvent):
execution_order.append("async_handler")
event = DependsTestEvent(value=1)
await crewai_event_bus.aemit("test_source", event)
# Only async handler should execute
assert execution_order == ["async_handler"]
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/events/test_depends.py",
"license": "MIT License",
"lines": 200,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/tests/experimental/evaluation/test_agent_evaluator.py | import threading
import pytest
from crewai.agent import Agent
from crewai.crew import Crew
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.agent_events import (
AgentEvaluationCompletedEvent,
AgentEvaluationFailedEvent,
AgentEvaluationStartedEvent,
)
from crewai.experimental.evaluation import (
EvaluationScore,
GoalAlignmentEvaluator,
MetricCategory,
ParameterExtractionEvaluator,
ReasoningEfficiencyEvaluator,
SemanticQualityEvaluator,
ToolInvocationEvaluator,
ToolSelectionEvaluator,
create_default_evaluator,
)
from crewai.experimental.evaluation.agent_evaluator import AgentEvaluator
from crewai.experimental.evaluation.base_evaluator import (
AgentEvaluationResult,
BaseEvaluator,
)
from crewai.task import Task
class TestAgentEvaluator:
@pytest.fixture
def mock_crew(self):
agent = Agent(
role="Test Agent",
goal="Complete test tasks successfully",
backstory="An agent created for testing purposes",
allow_delegation=False,
verbose=False,
)
task = Task(
description="Test task description",
agent=agent,
expected_output="Expected test output",
)
crew = Crew(agents=[agent], tasks=[task])
return crew
def test_set_iteration(self):
agent_evaluator = AgentEvaluator(agents=[])
agent_evaluator.set_iteration(3)
assert agent_evaluator._execution_state.iteration == 3
@pytest.mark.vcr()
def test_evaluate_current_iteration(self, mock_crew):
with crewai_event_bus.scoped_handlers():
agent_evaluator = AgentEvaluator(
agents=mock_crew.agents, evaluators=[GoalAlignmentEvaluator()]
)
evaluation_condition = threading.Condition()
evaluation_completed = False
@crewai_event_bus.on(AgentEvaluationCompletedEvent)
async def on_evaluation_completed(source, event):
nonlocal evaluation_completed
with evaluation_condition:
evaluation_completed = True
evaluation_condition.notify()
mock_crew.kickoff()
with evaluation_condition:
assert evaluation_condition.wait_for(
lambda: evaluation_completed, timeout=5
), "Timeout waiting for evaluation completion"
results = agent_evaluator.get_evaluation_results()
assert isinstance(results, dict)
(agent,) = mock_crew.agents
(task,) = mock_crew.tasks
assert len(mock_crew.agents) == 1
assert agent.role in results
assert len(results[agent.role]) == 1
(result,) = results[agent.role]
assert isinstance(result, AgentEvaluationResult)
assert result.agent_id == str(agent.id)
assert result.task_id == str(task.id)
(goal_alignment,) = result.metrics.values()
assert goal_alignment.score == 5.0
expected_feedback = "The agent's output demonstrates an understanding of the need for a comprehensive document outlining task"
assert expected_feedback in goal_alignment.feedback
assert goal_alignment.raw_response is not None
assert '"score": 5' in goal_alignment.raw_response
def test_create_default_evaluator(self, mock_crew):
agent_evaluator = create_default_evaluator(agents=mock_crew.agents)
assert isinstance(agent_evaluator, AgentEvaluator)
assert agent_evaluator.agents == mock_crew.agents
expected_types = [
GoalAlignmentEvaluator,
SemanticQualityEvaluator,
ToolSelectionEvaluator,
ParameterExtractionEvaluator,
ToolInvocationEvaluator,
ReasoningEfficiencyEvaluator,
]
assert len(agent_evaluator.evaluators) == len(expected_types)
for evaluator, expected_type in zip(
agent_evaluator.evaluators, expected_types, strict=False
):
assert isinstance(evaluator, expected_type)
@pytest.mark.vcr()
def test_eval_specific_agents_from_crew(self, mock_crew):
with crewai_event_bus.scoped_handlers():
agent = Agent(
role="Test Agent Eval",
goal="Complete test tasks successfully",
backstory="An agent created for testing purposes",
)
task = Task(
description="Test task description",
agent=agent,
expected_output="Expected test output",
)
mock_crew.agents.append(agent)
mock_crew.tasks.append(task)
events = {}
results_condition = threading.Condition()
completed_event_received = False
agent_evaluator = AgentEvaluator(
agents=[agent], evaluators=[GoalAlignmentEvaluator()]
)
@crewai_event_bus.on(AgentEvaluationStartedEvent)
async def capture_started(source, event):
if event.agent_id == str(agent.id):
events["started"] = event
@crewai_event_bus.on(AgentEvaluationCompletedEvent)
async def capture_completed(source, event):
nonlocal completed_event_received
if event.agent_id == str(agent.id):
events["completed"] = event
with results_condition:
completed_event_received = True
results_condition.notify()
@crewai_event_bus.on(AgentEvaluationFailedEvent)
def capture_failed(source, event):
events["failed"] = event
mock_crew.kickoff()
with results_condition:
assert results_condition.wait_for(
lambda: completed_event_received, timeout=5
), "Timeout waiting for evaluation completed event"
assert events.keys() == {"started", "completed"}
assert events["started"].agent_id == str(agent.id)
assert events["started"].agent_role == agent.role
assert events["started"].task_id == str(task.id)
assert events["started"].iteration == 1
assert events["completed"].agent_id == str(agent.id)
assert events["completed"].agent_role == agent.role
assert events["completed"].task_id == str(task.id)
assert events["completed"].iteration == 1
assert events["completed"].metric_category == MetricCategory.GOAL_ALIGNMENT
assert isinstance(events["completed"].score, EvaluationScore)
assert events["completed"].score.score == 5.0
results = agent_evaluator.get_evaluation_results()
assert isinstance(results, dict)
assert len(results.keys()) == 1
(result,) = results[agent.role]
assert isinstance(result, AgentEvaluationResult)
assert result.agent_id == str(agent.id)
assert result.task_id == str(task.id)
(goal_alignment,) = result.metrics.values()
assert goal_alignment.score == 5.0
expected_feedback = "The agent provided a thorough guide on how to conduct a test task but failed to produce specific expected output"
assert expected_feedback in goal_alignment.feedback
assert goal_alignment.raw_response is not None
assert '"score": 5' in goal_alignment.raw_response
@pytest.mark.vcr()
def test_failed_evaluation(self, mock_crew):
with crewai_event_bus.scoped_handlers():
(agent,) = mock_crew.agents
(task,) = mock_crew.tasks
events: dict[str, AgentEvaluationStartedEvent | AgentEvaluationCompletedEvent | AgentEvaluationFailedEvent] = {}
condition = threading.Condition()
@crewai_event_bus.on(AgentEvaluationStartedEvent)
def capture_started(source, event):
with condition:
events["started"] = event
condition.notify()
@crewai_event_bus.on(AgentEvaluationCompletedEvent)
def capture_completed(source, event):
with condition:
events["completed"] = event
condition.notify()
@crewai_event_bus.on(AgentEvaluationFailedEvent)
def capture_failed(source, event):
with condition:
events["failed"] = event
condition.notify()
class FailingEvaluator(BaseEvaluator):
metric_category = MetricCategory.GOAL_ALIGNMENT
def evaluate(self, agent, task, execution_trace, final_output):
raise ValueError("Forced evaluation failure")
agent_evaluator = AgentEvaluator(
agents=[agent], evaluators=[FailingEvaluator()]
)
mock_crew.kickoff()
with condition:
success = condition.wait_for(
lambda: "started" in events and "failed" in events,
timeout=10,
)
assert success, "Timeout waiting for evaluation events"
assert events.keys() == {"started", "failed"}
assert events["started"].agent_id == str(agent.id)
assert events["started"].agent_role == agent.role
assert events["started"].task_id == str(task.id)
assert events["started"].iteration == 1
assert events["failed"].agent_id == str(agent.id)
assert events["failed"].agent_role == agent.role
assert events["failed"].task_id == str(task.id)
assert events["failed"].iteration == 1
assert events["failed"].error == "Forced evaluation failure"
# Wait for results to be stored - the event is emitted before storage
with condition:
success = condition.wait_for(
lambda: agent.role in agent_evaluator.get_evaluation_results(),
timeout=5,
)
assert success, "Timeout waiting for evaluation results to be stored"
results = agent_evaluator.get_evaluation_results()
(result,) = results[agent.role]
assert isinstance(result, AgentEvaluationResult)
assert result.agent_id == str(agent.id)
assert result.task_id == str(task.id)
assert result.metrics == {}
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/experimental/evaluation/test_agent_evaluator.py",
"license": "MIT License",
"lines": 226,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/tests/llms/anthropic/test_anthropic.py | import os
import sys
import types
from unittest.mock import patch, MagicMock
import pytest
from crewai.llm import LLM
from crewai.crew import Crew
from crewai.agent import Agent
from crewai.task import Task
@pytest.fixture(autouse=True)
def mock_anthropic_api_key():
"""Automatically mock ANTHROPIC_API_KEY for all tests in this module if not already set."""
if "ANTHROPIC_API_KEY" not in os.environ:
with patch.dict(os.environ, {"ANTHROPIC_API_KEY": "test-key"}):
yield
else:
yield
def test_anthropic_completion_is_used_when_anthropic_provider():
"""
Test that AnthropicCompletion from completion.py is used when LLM uses provider 'anthropic'
"""
llm = LLM(model="anthropic/claude-3-5-sonnet-20241022")
assert llm.__class__.__name__ == "AnthropicCompletion"
assert llm.provider == "anthropic"
assert llm.model == "claude-3-5-sonnet-20241022"
def test_anthropic_completion_is_used_when_claude_provider():
"""
Test that AnthropicCompletion is used when provider is 'claude'
"""
llm = LLM(model="claude/claude-3-5-sonnet-20241022")
from crewai.llms.providers.anthropic.completion import AnthropicCompletion
assert isinstance(llm, AnthropicCompletion)
assert llm.provider == "anthropic"
assert llm.model == "claude-3-5-sonnet-20241022"
def test_anthropic_completion_module_is_imported():
"""
Test that the completion module is properly imported when using Anthropic provider
"""
module_name = "crewai.llms.providers.anthropic.completion"
# Remove module from cache if it exists
if module_name in sys.modules:
del sys.modules[module_name]
# Create LLM instance - this should trigger the import
LLM(model="anthropic/claude-3-5-sonnet-20241022")
# Verify the module was imported
assert module_name in sys.modules
completion_mod = sys.modules[module_name]
assert isinstance(completion_mod, types.ModuleType)
# Verify the class exists in the module
assert hasattr(completion_mod, 'AnthropicCompletion')
def test_native_anthropic_raises_error_when_initialization_fails():
"""
Test that LLM raises ImportError when native Anthropic completion fails to initialize.
This ensures we don't silently fall back when there's a configuration issue.
"""
# Mock the _get_native_provider to return a failing class
with patch('crewai.llm.LLM._get_native_provider') as mock_get_provider:
class FailingCompletion:
def __init__(self, *args, **kwargs):
raise Exception("Native Anthropic SDK failed")
mock_get_provider.return_value = FailingCompletion
# This should raise ImportError, not fall back to LiteLLM
with pytest.raises(ImportError) as excinfo:
LLM(model="anthropic/claude-3-5-sonnet-20241022")
assert "Error importing native provider" in str(excinfo.value)
assert "Native Anthropic SDK failed" in str(excinfo.value)
def test_anthropic_completion_initialization_parameters():
"""
Test that AnthropicCompletion is initialized with correct parameters
"""
llm = LLM(
model="anthropic/claude-3-5-sonnet-20241022",
temperature=0.7,
max_tokens=2000,
top_p=0.9,
api_key="test-key"
)
from crewai.llms.providers.anthropic.completion import AnthropicCompletion
assert isinstance(llm, AnthropicCompletion)
assert llm.model == "claude-3-5-sonnet-20241022"
assert llm.temperature == 0.7
assert llm.max_tokens == 2000
assert llm.top_p == 0.9
def test_anthropic_specific_parameters():
"""
Test Anthropic-specific parameters like stop_sequences and streaming
"""
llm = LLM(
model="anthropic/claude-3-5-sonnet-20241022",
stop_sequences=["Human:", "Assistant:"],
stream=True,
max_retries=5,
timeout=60
)
from crewai.llms.providers.anthropic.completion import AnthropicCompletion
assert isinstance(llm, AnthropicCompletion)
assert llm.stop_sequences == ["Human:", "Assistant:"]
assert llm.stream == True
assert llm.client.max_retries == 5
assert llm.client.timeout == 60
def test_anthropic_completion_call():
"""
Test that AnthropicCompletion call method works
"""
llm = LLM(model="anthropic/claude-3-5-sonnet-20241022")
# Mock the call method on the instance
with patch.object(llm, 'call', return_value="Hello! I'm Claude, ready to help.") as mock_call:
result = llm.call("Hello, how are you?")
assert result == "Hello! I'm Claude, ready to help."
mock_call.assert_called_once_with("Hello, how are you?")
def test_anthropic_completion_called_during_crew_execution():
"""
Test that AnthropicCompletion.call is actually invoked when running a crew
"""
# Create the LLM instance first
anthropic_llm = LLM(model="anthropic/claude-3-5-sonnet-20241022")
# Mock the call method on the specific instance
with patch.object(anthropic_llm, 'call', return_value="Tokyo has 14 million people.") as mock_call:
# Create agent with explicit LLM configuration
agent = Agent(
role="Research Assistant",
goal="Find population info",
backstory="You research populations.",
llm=anthropic_llm,
)
task = Task(
description="Find Tokyo population",
expected_output="Population number",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
result = crew.kickoff()
# Verify mock was called
assert mock_call.called
assert "14 million" in str(result)
def test_anthropic_completion_call_arguments():
"""
Test that AnthropicCompletion.call is invoked with correct arguments
"""
# Create LLM instance first
anthropic_llm = LLM(model="anthropic/claude-3-5-sonnet-20241022")
# Mock the instance method
with patch.object(anthropic_llm, 'call') as mock_call:
mock_call.return_value = "Task completed successfully."
agent = Agent(
role="Test Agent",
goal="Complete a simple task",
backstory="You are a test agent.",
llm=anthropic_llm # Use same instance
)
task = Task(
description="Say hello world",
expected_output="Hello world",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
crew.kickoff()
# Verify call was made
assert mock_call.called
# Check the arguments passed to the call method
call_args = mock_call.call_args
assert call_args is not None
# The first argument should be the messages
messages = call_args[0][0] # First positional argument
assert isinstance(messages, (str, list))
# Verify that the task description appears in the messages
if isinstance(messages, str):
assert "hello world" in messages.lower()
elif isinstance(messages, list):
message_content = str(messages).lower()
assert "hello world" in message_content
def test_multiple_anthropic_calls_in_crew():
"""
Test that AnthropicCompletion.call is invoked multiple times for multiple tasks
"""
# Create LLM instance first
anthropic_llm = LLM(model="anthropic/claude-3-5-sonnet-20241022")
# Mock the instance method
with patch.object(anthropic_llm, 'call') as mock_call:
mock_call.return_value = "Task completed."
agent = Agent(
role="Multi-task Agent",
goal="Complete multiple tasks",
backstory="You can handle multiple tasks.",
llm=anthropic_llm # Use same instance
)
task1 = Task(
description="First task",
expected_output="First result",
agent=agent,
)
task2 = Task(
description="Second task",
expected_output="Second result",
agent=agent,
)
crew = Crew(
agents=[agent],
tasks=[task1, task2]
)
crew.kickoff()
# Verify multiple calls were made
assert mock_call.call_count >= 2 # At least one call per task
# Verify each call had proper arguments
for call in mock_call.call_args_list:
assert len(call[0]) > 0 # Has positional arguments
messages = call[0][0]
assert messages is not None
def test_anthropic_completion_with_tools():
"""
Test that AnthropicCompletion.call is invoked with tools when agent has tools
"""
from crewai.tools import tool
@tool
def sample_tool(query: str) -> str:
"""A sample tool for testing"""
return f"Tool result for: {query}"
# Create LLM instance first
anthropic_llm = LLM(model="anthropic/claude-3-5-sonnet-20241022")
# Mock the instance method
with patch.object(anthropic_llm, 'call') as mock_call:
mock_call.return_value = "Task completed with tools."
agent = Agent(
role="Tool User",
goal="Use tools to complete tasks",
backstory="You can use tools.",
llm=anthropic_llm, # Use same instance
tools=[sample_tool]
)
task = Task(
description="Use the sample tool",
expected_output="Tool usage result",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
crew.kickoff()
assert mock_call.called
call_args = mock_call.call_args
call_kwargs = call_args[1] if len(call_args) > 1 else {}
if 'tools' in call_kwargs:
assert call_kwargs['tools'] is not None
assert len(call_kwargs['tools']) > 0
def test_anthropic_raises_error_when_model_not_supported():
"""Test that AnthropicCompletion raises ValueError when model not supported"""
# Mock the Anthropic client to raise an error
with patch('crewai.llms.providers.anthropic.completion.Anthropic') as mock_anthropic_class:
mock_client = MagicMock()
mock_anthropic_class.return_value = mock_client
# Mock the error that Anthropic would raise for unsupported models
from anthropic import NotFoundError
mock_client.messages.create.side_effect = NotFoundError(
message="The model `model-doesnt-exist` does not exist",
response=MagicMock(),
body={}
)
llm = LLM(model="anthropic/model-doesnt-exist")
with pytest.raises(Exception): # Should raise some error for unsupported model
llm.call("Hello")
def test_anthropic_client_params_setup():
"""
Test that client_params are properly merged with default client parameters
"""
# Use only valid Anthropic client parameters
custom_client_params = {
"default_headers": {"X-Custom-Header": "test-value"},
}
with patch.dict(os.environ, {"ANTHROPIC_API_KEY": "test-key"}):
llm = LLM(
model="anthropic/claude-3-5-sonnet-20241022",
api_key="test-key",
base_url="https://custom-api.com",
timeout=45,
max_retries=5,
client_params=custom_client_params
)
from crewai.llms.providers.anthropic.completion import AnthropicCompletion
assert isinstance(llm, AnthropicCompletion)
assert llm.client_params == custom_client_params
merged_params = llm._get_client_params()
assert merged_params["api_key"] == "test-key"
assert merged_params["base_url"] == "https://custom-api.com"
assert merged_params["timeout"] == 45
assert merged_params["max_retries"] == 5
assert merged_params["default_headers"] == {"X-Custom-Header": "test-value"}
def test_anthropic_client_params_override_defaults():
"""
Test that client_params can override default client parameters
"""
override_client_params = {
"timeout": 120, # Override the timeout parameter
"max_retries": 10, # Override the max_retries parameter
"default_headers": {"X-Override": "true"} # Valid custom parameter
}
with patch.dict(os.environ, {"ANTHROPIC_API_KEY": "test-key"}):
llm = LLM(
model="anthropic/claude-3-5-sonnet-20241022",
api_key="test-key",
timeout=30,
max_retries=3,
client_params=override_client_params
)
# Verify this is actually AnthropicCompletion, not LiteLLM fallback
from crewai.llms.providers.anthropic.completion import AnthropicCompletion
assert isinstance(llm, AnthropicCompletion)
merged_params = llm._get_client_params()
# client_params should override the individual parameters
assert merged_params["timeout"] == 120
assert merged_params["max_retries"] == 10
assert merged_params["default_headers"] == {"X-Override": "true"}
def test_anthropic_client_params_none():
"""
Test that client_params=None works correctly (no additional parameters)
"""
with patch.dict(os.environ, {"ANTHROPIC_API_KEY": "test-key"}):
llm = LLM(
model="anthropic/claude-3-5-sonnet-20241022",
api_key="test-key",
base_url="https://api.anthropic.com",
timeout=60,
max_retries=2,
client_params=None
)
from crewai.llms.providers.anthropic.completion import AnthropicCompletion
assert isinstance(llm, AnthropicCompletion)
assert llm.client_params is None
merged_params = llm._get_client_params()
expected_keys = {"api_key", "base_url", "timeout", "max_retries"}
assert set(merged_params.keys()) == expected_keys
# Fixed assertions - all should be inside the with block and use correct values
assert merged_params["api_key"] == "test-key" # Not "test-anthropic-key"
assert merged_params["base_url"] == "https://api.anthropic.com"
assert merged_params["timeout"] == 60
assert merged_params["max_retries"] == 2
def test_anthropic_client_params_empty_dict():
"""
Test that client_params={} works correctly (empty additional parameters)
"""
with patch.dict(os.environ, {"ANTHROPIC_API_KEY": "test-key"}):
llm = LLM(
model="anthropic/claude-3-5-sonnet-20241022",
api_key="test-key",
client_params={}
)
from crewai.llms.providers.anthropic.completion import AnthropicCompletion
assert isinstance(llm, AnthropicCompletion)
assert llm.client_params == {}
merged_params = llm._get_client_params()
assert "api_key" in merged_params
assert merged_params["api_key"] == "test-key"
def test_anthropic_model_detection():
"""
Test that various Anthropic model formats are properly detected
"""
# Test Anthropic model naming patterns that actually work with provider detection
anthropic_test_cases = [
"anthropic/claude-3-5-sonnet-20241022",
"claude/claude-3-5-sonnet-20241022"
]
for model_name in anthropic_test_cases:
llm = LLM(model=model_name)
from crewai.llms.providers.anthropic.completion import AnthropicCompletion
assert isinstance(llm, AnthropicCompletion), f"Failed for model: {model_name}"
def test_anthropic_supports_stop_words():
"""
Test that Anthropic models support stop sequences
"""
llm = LLM(model="anthropic/claude-3-5-sonnet-20241022")
assert llm.supports_stop_words() == True
def test_anthropic_context_window_size():
"""
Test that Anthropic models return correct context window sizes
"""
llm = LLM(model="anthropic/claude-3-5-sonnet-20241022")
context_size = llm.get_context_window_size()
# Should return a reasonable context window size (Claude 3.5 has 200k tokens)
assert context_size > 100000 # Should be substantial
assert context_size <= 200000 # But not exceed the actual limit
def test_anthropic_message_formatting():
"""
Test that messages are properly formatted for Anthropic API
"""
llm = LLM(model="anthropic/claude-3-5-sonnet-20241022")
# Test message formatting
test_messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello"},
{"role": "assistant", "content": "Hi there!"},
{"role": "user", "content": "How are you?"}
]
formatted_messages, system_message = llm._format_messages_for_anthropic(test_messages)
# System message should be extracted
assert system_message == "You are a helpful assistant."
# Remaining messages should start with user
assert formatted_messages[0]["role"] == "user"
assert len(formatted_messages) >= 3 # Should have user, assistant, user messages
def test_anthropic_streaming_parameter():
"""
Test that streaming parameter is properly handled
"""
# Test non-streaming
llm_no_stream = LLM(model="anthropic/claude-3-5-sonnet-20241022", stream=False)
assert llm_no_stream.stream == False
# Test streaming
llm_stream = LLM(model="anthropic/claude-3-5-sonnet-20241022", stream=True)
assert llm_stream.stream == True
def test_anthropic_tool_conversion():
"""
Test that tools are properly converted to Anthropic format
"""
llm = LLM(model="anthropic/claude-3-5-sonnet-20241022")
# Mock tool in CrewAI format
crewai_tools = [{
"type": "function",
"function": {
"name": "test_tool",
"description": "A test tool",
"parameters": {
"type": "object",
"properties": {
"query": {"type": "string", "description": "Search query"}
},
"required": ["query"]
}
}
}]
# Test tool conversion
anthropic_tools = llm._convert_tools_for_interference(crewai_tools)
assert len(anthropic_tools) == 1
assert anthropic_tools[0]["name"] == "test_tool"
assert anthropic_tools[0]["description"] == "A test tool"
assert "input_schema" in anthropic_tools[0]
def test_anthropic_environment_variable_api_key():
"""
Test that Anthropic API key is properly loaded from environment
"""
with patch.dict(os.environ, {"ANTHROPIC_API_KEY": "test-anthropic-key"}):
llm = LLM(model="anthropic/claude-3-5-sonnet-20241022")
assert llm.client is not None
assert hasattr(llm.client, 'messages')
def test_anthropic_token_usage_tracking():
"""
Test that token usage is properly tracked for Anthropic responses
"""
llm = LLM(model="anthropic/claude-3-5-sonnet-20241022")
# Mock the Anthropic response with usage information
with patch.object(llm.client.messages, 'create') as mock_create:
mock_response = MagicMock()
mock_response.content = [MagicMock(text="test response")]
mock_response.usage = MagicMock(input_tokens=50, output_tokens=25)
mock_create.return_value = mock_response
result = llm.call("Hello")
# Verify the response
assert result == "test response"
# Verify token usage was extracted
usage = llm._extract_anthropic_token_usage(mock_response)
assert usage["input_tokens"] == 50
assert usage["output_tokens"] == 25
assert usage["total_tokens"] == 75
def test_anthropic_stop_sequences_sync():
"""Test that stop and stop_sequences attributes stay synchronized."""
llm = LLM(model="anthropic/claude-3-5-sonnet-20241022")
# Test setting stop as a list
llm.stop = ["\nObservation:", "\nThought:"]
assert llm.stop_sequences == ["\nObservation:", "\nThought:"]
assert llm.stop == ["\nObservation:", "\nThought:"]
# Test setting stop as a string
llm.stop = "\nFinal Answer:"
assert llm.stop_sequences == ["\nFinal Answer:"]
assert llm.stop == ["\nFinal Answer:"]
# Test setting stop as None
llm.stop = None
assert llm.stop_sequences == []
assert llm.stop == []
@pytest.mark.vcr()
def test_anthropic_stop_sequences_sent_to_api():
"""Test that stop_sequences are properly sent to the Anthropic API."""
llm = LLM(model="anthropic/claude-3-5-haiku-20241022")
llm.stop = ["\nObservation:", "\nThought:"]
result = llm.call("Say hello in one word")
assert result is not None
assert isinstance(result, str)
assert len(result) > 0
@pytest.mark.vcr(filter_headers=["authorization", "x-api-key"])
def test_anthropic_thinking():
"""Test that thinking is properly handled and thinking params are passed to messages.create"""
from unittest.mock import patch
from crewai.llms.providers.anthropic.completion import AnthropicCompletion
llm = LLM(
model="anthropic/claude-sonnet-4-5",
thinking={"type": "enabled", "budget_tokens": 5000},
max_tokens=10000
)
assert isinstance(llm, AnthropicCompletion)
original_create = llm.client.messages.create
captured_params = {}
def capture_and_call(**kwargs):
captured_params.update(kwargs)
return original_create(**kwargs)
with patch.object(llm.client.messages, 'create', side_effect=capture_and_call):
result = llm.call("What is the weather in Tokyo?")
assert result is not None
assert isinstance(result, str)
assert len(result) > 0
assert "thinking" in captured_params
assert captured_params["thinking"] == {"type": "enabled", "budget_tokens": 5000}
assert captured_params["model"] == "claude-sonnet-4-5"
assert captured_params["max_tokens"] == 10000
assert "messages" in captured_params
assert len(captured_params["messages"]) > 0
@pytest.mark.vcr(filter_headers=["authorization", "x-api-key"])
def test_anthropic_thinking_blocks_preserved_across_turns():
"""Test that thinking blocks are stored and included in subsequent API calls across turns"""
from unittest.mock import patch
from crewai.llms.providers.anthropic.completion import AnthropicCompletion
llm = LLM(
model="anthropic/claude-sonnet-4-5",
thinking={"type": "enabled", "budget_tokens": 5000},
max_tokens=10000
)
assert isinstance(llm, AnthropicCompletion)
# Capture all messages.create calls to verify thinking blocks are included
original_create = llm.client.messages.create
captured_calls = []
def capture_and_call(**kwargs):
captured_calls.append(kwargs)
return original_create(**kwargs)
with patch.object(llm.client.messages, 'create', side_effect=capture_and_call):
# First call - establishes context and generates thinking blocks
messages = [{"role": "user", "content": "What is 2+2?"}]
first_result = llm.call(messages)
# Verify first call completed
assert first_result is not None
assert isinstance(first_result, str)
assert len(first_result) > 0
# Verify thinking blocks were stored after first response
assert len(llm.previous_thinking_blocks) > 0, "No thinking blocks stored after first call"
first_thinking = llm.previous_thinking_blocks[0]
assert first_thinking["type"] == "thinking"
assert "thinking" in first_thinking
assert "signature" in first_thinking
# Store the thinking block content for comparison
stored_thinking_content = first_thinking["thinking"]
stored_signature = first_thinking["signature"]
# Second call - should include thinking blocks from first call
messages.append({"role": "assistant", "content": first_result})
messages.append({"role": "user", "content": "Now what is 3+3?"})
second_result = llm.call(messages)
# Verify second call completed
assert second_result is not None
assert isinstance(second_result, str)
# Verify at least 2 API calls were made
assert len(captured_calls) >= 2, f"Expected at least 2 API calls, got {len(captured_calls)}"
# Verify second call includes thinking blocks in assistant message
second_call_messages = captured_calls[1]["messages"]
# Should have: user message + assistant message (with thinking blocks) + follow-up user message
assert len(second_call_messages) >= 2
# Find the assistant message in the second call
assistant_message = None
for msg in second_call_messages:
if msg["role"] == "assistant" and isinstance(msg.get("content"), list):
assistant_message = msg
break
assert assistant_message is not None, "Assistant message with list content not found in second call"
assert isinstance(assistant_message["content"], list)
# Verify thinking block is included in assistant message content
thinking_found = False
for block in assistant_message["content"]:
if isinstance(block, dict) and block.get("type") == "thinking":
thinking_found = True
assert "thinking" in block
assert "signature" in block
# Verify it matches what was stored from the first call
assert block["thinking"] == stored_thinking_content
assert block["signature"] == stored_signature
break
assert thinking_found, "Thinking block not found in assistant message content in second call"
@pytest.mark.vcr(filter_headers=["authorization", "x-api-key"])
def test_anthropic_function_calling():
"""Test that function calling is properly handled"""
llm = LLM(model="anthropic/claude-sonnet-4-5")
def get_weather(location: str) -> str:
return f"The weather in {location} is sunny and 72°F"
tools = [
{
"name": "get_weather",
"description": "Get the current weather in a given location",
"input_schema": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA"
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"],
"description": "The unit of temperature"
}
},
"required": ["location"]
}
}
]
result = llm.call(
"What is the weather in Tokyo? Use the get_weather tool.",
tools=tools,
available_functions={"get_weather": get_weather}
)
assert result is not None
assert isinstance(result, str)
assert len(result) > 0
# Verify the response includes information about Tokyo's weather
assert "tokyo" in result.lower() or "72" in result
# =============================================================================
# Agent Kickoff Structured Output Tests
# =============================================================================
@pytest.mark.vcr(filter_headers=["authorization", "x-api-key"])
def test_anthropic_tool_execution_with_available_functions():
"""
Test that Anthropic provider correctly executes tools when available_functions is provided.
This specifically tests the fix for double llm_call_completed emission - when
available_functions is provided, _handle_tool_execution is called which already
emits llm_call_completed, so the caller should not emit it again.
The test verifies:
1. The tool is called with correct arguments
2. The tool result is returned directly (not wrapped in conversation)
3. The result is valid JSON matching the tool output format
"""
import json
llm = LLM(model="anthropic/claude-3-5-haiku-20241022")
# Simple tool that returns a formatted string
def create_reasoning_plan(plan: str, steps: list, ready: bool) -> str:
"""Create a reasoning plan with steps."""
return json.dumps({"plan": plan, "steps": steps, "ready": ready})
tools = [
{
"name": "create_reasoning_plan",
"description": "Create a structured reasoning plan for completing a task",
"input_schema": {
"type": "object",
"properties": {
"plan": {
"type": "string",
"description": "High-level plan description"
},
"steps": {
"type": "array",
"items": {"type": "object"},
"description": "List of steps to execute"
},
"ready": {
"type": "boolean",
"description": "Whether the plan is ready to execute"
}
},
"required": ["plan", "steps", "ready"]
}
}
]
result = llm.call(
messages=[{"role": "user", "content": "Create a simple plan to say hello. Use the create_reasoning_plan tool."}],
tools=tools,
available_functions={"create_reasoning_plan": create_reasoning_plan}
)
# Verify result is valid JSON from the tool
assert result is not None
assert isinstance(result, str)
# Parse the result to verify it's valid JSON
parsed_result = json.loads(result)
assert "plan" in parsed_result
assert "steps" in parsed_result
assert "ready" in parsed_result
@pytest.mark.vcr(filter_headers=["authorization", "x-api-key"])
def test_anthropic_tool_execution_returns_tool_result_directly():
"""
Test that when available_functions is provided, the tool result is returned directly
without additional LLM conversation (matching OpenAI behavior for reasoning_handler).
"""
llm = LLM(model="anthropic/claude-3-5-haiku-20241022")
call_count = 0
def simple_calculator(operation: str, a: int, b: int) -> str:
"""Perform a simple calculation."""
nonlocal call_count
call_count += 1
if operation == "add":
return str(a + b)
elif operation == "multiply":
return str(a * b)
return "Unknown operation"
tools = [
{
"name": "simple_calculator",
"description": "Perform simple math operations",
"input_schema": {
"type": "object",
"properties": {
"operation": {
"type": "string",
"enum": ["add", "multiply"],
"description": "The operation to perform"
},
"a": {"type": "integer", "description": "First number"},
"b": {"type": "integer", "description": "Second number"}
},
"required": ["operation", "a", "b"]
}
}
]
result = llm.call(
messages=[{"role": "user", "content": "Calculate 5 + 3 using the simple_calculator tool with operation 'add'."}],
tools=tools,
available_functions={"simple_calculator": simple_calculator}
)
# Tool should have been called exactly once
assert call_count == 1, f"Expected tool to be called once, got {call_count}"
# Result should be the direct tool output
assert result == "8", f"Expected '8' but got '{result}'"
@pytest.mark.vcr()
def test_anthropic_agent_kickoff_structured_output_without_tools():
"""
Test that agent kickoff returns structured output without tools.
This tests native structured output handling for Anthropic models.
"""
from pydantic import BaseModel, Field
class AnalysisResult(BaseModel):
"""Structured output for analysis results."""
topic: str = Field(description="The topic analyzed")
key_points: list[str] = Field(description="Key insights from the analysis")
summary: str = Field(description="Brief summary of findings")
agent = Agent(
role="Analyst",
goal="Provide structured analysis on topics",
backstory="You are an expert analyst who provides clear, structured insights.",
llm=LLM(model="anthropic/claude-3-5-haiku-20241022"),
tools=[],
verbose=True,
)
result = agent.kickoff(
messages="Analyze the benefits of remote work briefly. Keep it concise.",
response_format=AnalysisResult,
)
assert result.pydantic is not None, "Expected pydantic output but got None"
assert isinstance(result.pydantic, AnalysisResult), f"Expected AnalysisResult but got {type(result.pydantic)}"
assert result.pydantic.topic, "Topic should not be empty"
assert len(result.pydantic.key_points) > 0, "Should have at least one key point"
assert result.pydantic.summary, "Summary should not be empty"
@pytest.mark.vcr()
def test_anthropic_agent_kickoff_structured_output_with_tools():
"""
Test that agent kickoff returns structured output after using tools.
This tests post-tool-call structured output handling for Anthropic models.
"""
from pydantic import BaseModel, Field
from crewai.tools import tool
class CalculationResult(BaseModel):
"""Structured output for calculation results."""
operation: str = Field(description="The mathematical operation performed")
result: int = Field(description="The result of the calculation")
explanation: str = Field(description="Brief explanation of the calculation")
@tool
def add_numbers(a: int, b: int) -> int:
"""Add two numbers together and return the sum."""
return a + b
agent = Agent(
role="Calculator",
goal="Perform calculations using available tools",
backstory="You are a calculator assistant that uses tools to compute results.",
llm=LLM(model="anthropic/claude-3-5-haiku-20241022"),
tools=[add_numbers],
verbose=True,
)
result = agent.kickoff(
messages="Calculate 15 + 27 using your add_numbers tool. Report the result.",
response_format=CalculationResult,
)
assert result.pydantic is not None, "Expected pydantic output but got None"
assert isinstance(result.pydantic, CalculationResult), f"Expected CalculationResult but got {type(result.pydantic)}"
assert result.pydantic.result == 42, f"Expected result 42 but got {result.pydantic.result}"
assert result.pydantic.operation, "Operation should not be empty"
assert result.pydantic.explanation, "Explanation should not be empty"
@pytest.mark.vcr()
def test_anthropic_cached_prompt_tokens():
"""
Test that Anthropic correctly extracts and tracks cached_prompt_tokens
from cache_read_input_tokens. Uses cache_control to enable prompt caching
and sends the same large prompt twice so the second call hits the cache.
"""
# Anthropic requires cache_control blocks and >=1024 tokens for caching
padding = "This is padding text to ensure the prompt is large enough for caching. " * 80
system_msg = f"You are a helpful assistant. {padding}"
llm = LLM(model="anthropic/claude-sonnet-4-5-20250929")
def _ephemeral_user(text: str):
return [{"type": "text", "text": text, "cache_control": {"type": "ephemeral"}}]
# First call: creates the cache
llm.call([
{"role": "system", "content": system_msg},
{"role": "user", "content": _ephemeral_user("Say hello in one word.")},
])
# Second call: same system prompt should hit the cache
llm.call([
{"role": "system", "content": system_msg},
{"role": "user", "content": _ephemeral_user("Say goodbye in one word.")},
])
usage = llm.get_token_usage_summary()
assert usage.total_tokens > 0
assert usage.prompt_tokens > 0
assert usage.completion_tokens > 0
assert usage.successful_requests == 2
# The second call should have cached prompt tokens
assert usage.cached_prompt_tokens > 0
@pytest.mark.vcr()
def test_anthropic_streaming_cached_prompt_tokens():
"""
Test that Anthropic streaming correctly extracts and tracks cached_prompt_tokens.
"""
padding = "This is padding text to ensure the prompt is large enough for caching. " * 80
system_msg = f"You are a helpful assistant. {padding}"
llm = LLM(model="anthropic/claude-sonnet-4-5-20250929", stream=True)
def _ephemeral_user(text: str):
return [{"type": "text", "text": text, "cache_control": {"type": "ephemeral"}}]
# First call: creates the cache
llm.call([
{"role": "system", "content": system_msg},
{"role": "user", "content": _ephemeral_user("Say hello in one word.")},
])
# Second call: same system prompt should hit the cache
llm.call([
{"role": "system", "content": system_msg},
{"role": "user", "content": _ephemeral_user("Say goodbye in one word.")},
])
usage = llm.get_token_usage_summary()
assert usage.total_tokens > 0
assert usage.successful_requests == 2
# The second call should have cached prompt tokens
assert usage.cached_prompt_tokens > 0
@pytest.mark.vcr()
def test_anthropic_cached_prompt_tokens_with_tools():
"""
Test that Anthropic correctly tracks cached_prompt_tokens when tools are used.
The large system prompt should be cached across tool-calling requests.
"""
padding = "This is padding text to ensure the prompt is large enough for caching. " * 80
system_msg = f"You are a helpful assistant that uses tools. {padding}"
def get_weather(location: str) -> str:
return f"The weather in {location} is sunny and 72°F"
tools = [
{
"name": "get_weather",
"description": "Get the current weather for a location",
"input_schema": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name"
}
},
"required": ["location"],
},
}
]
llm = LLM(model="anthropic/claude-sonnet-4-5-20250929")
def _ephemeral_user(text: str):
return [{"type": "text", "text": text, "cache_control": {"type": "ephemeral"}}]
# First call with tool: creates the cache
llm.call(
[
{"role": "system", "content": system_msg},
{"role": "user", "content": _ephemeral_user("What is the weather in Tokyo?")},
],
tools=tools,
available_functions={"get_weather": get_weather},
)
# Second call with same system prompt + tools: should hit the cache
llm.call(
[
{"role": "system", "content": system_msg},
{"role": "user", "content": _ephemeral_user("What is the weather in Paris?")},
],
tools=tools,
available_functions={"get_weather": get_weather},
)
usage = llm.get_token_usage_summary()
assert usage.total_tokens > 0
assert usage.prompt_tokens > 0
assert usage.successful_requests == 2
# The second call should have cached prompt tokens
assert usage.cached_prompt_tokens > 0
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/llms/anthropic/test_anthropic.py",
"license": "MIT License",
"lines": 888,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/tests/llms/azure/test_azure.py | import os
import sys
import types
from unittest.mock import patch, MagicMock, Mock
import pytest
from crewai.llm import LLM
from crewai.crew import Crew
from crewai.agent import Agent
from crewai.task import Task
@pytest.fixture
def mock_azure_credentials():
"""Mock Azure credentials for tests that need them."""
with patch.dict(os.environ, {
"AZURE_API_KEY": "test-key",
"AZURE_ENDPOINT": "https://test.openai.azure.com"
}):
yield
@pytest.mark.usefixtures("mock_azure_credentials")
def test_azure_completion_is_used_when_azure_provider():
"""
Test that AzureCompletion from completion.py is used when LLM uses provider 'azure'
"""
llm = LLM(model="azure/gpt-4")
assert llm.__class__.__name__ == "AzureCompletion"
assert llm.provider == "azure"
assert llm.model == "gpt-4"
@pytest.mark.usefixtures("mock_azure_credentials")
def test_azure_completion_is_used_when_azure_openai_provider():
"""
Test that AzureCompletion is used when provider is 'azure_openai'
"""
llm = LLM(model="azure_openai/gpt-4")
from crewai.llms.providers.azure.completion import AzureCompletion
assert isinstance(llm, AzureCompletion)
assert llm.provider == "azure"
assert llm.model == "gpt-4"
def test_azure_tool_use_conversation_flow():
"""
Test that the Azure completion properly handles tool use conversation flow
"""
from crewai.llms.providers.azure.completion import AzureCompletion
from azure.ai.inference.models import ChatCompletionsToolCall
# Create AzureCompletion instance
completion = AzureCompletion(
model="gpt-4",
api_key="test-key",
endpoint="https://test.openai.azure.com"
)
# Mock tool function
def mock_weather_tool(location: str) -> str:
return f"The weather in {location} is sunny and 75°F"
available_functions = {"get_weather": mock_weather_tool}
# Mock the Azure client responses
with patch.object(completion.client, 'complete') as mock_complete:
# Mock tool call in response with proper type
mock_tool_call = MagicMock(spec=ChatCompletionsToolCall)
mock_tool_call.function.name = "get_weather"
mock_tool_call.function.arguments = '{"location": "San Francisco"}'
mock_message = MagicMock()
mock_message.content = None
mock_message.tool_calls = [mock_tool_call]
mock_choice = MagicMock()
mock_choice.message = mock_message
mock_response = MagicMock()
mock_response.choices = [mock_choice]
mock_response.usage = MagicMock(
prompt_tokens=100,
completion_tokens=50,
total_tokens=150
)
mock_complete.return_value = mock_response
# Test the call
messages = [{"role": "user", "content": "What's the weather like in San Francisco?"}]
result = completion.call(
messages=messages,
available_functions=available_functions
)
# Verify the tool was executed and returned the result
assert result == "The weather in San Francisco is sunny and 75°F"
# Verify that the API was called
assert mock_complete.called
@pytest.mark.usefixtures("mock_azure_credentials")
def test_azure_completion_module_is_imported():
"""
Test that the completion module is properly imported when using Azure provider
"""
module_name = "crewai.llms.providers.azure.completion"
# Remove module from cache if it exists
if module_name in sys.modules:
del sys.modules[module_name]
# Create LLM instance - this should trigger the import
LLM(model="azure/gpt-4")
# Verify the module was imported
assert module_name in sys.modules
completion_mod = sys.modules[module_name]
assert isinstance(completion_mod, types.ModuleType)
# Verify the class exists in the module
assert hasattr(completion_mod, 'AzureCompletion')
def test_native_azure_raises_error_when_initialization_fails():
"""
Test that LLM raises ImportError when native Azure completion fails to initialize.
This ensures we don't silently fall back when there's a configuration issue.
"""
# Mock the _get_native_provider to return a failing class
with patch('crewai.llm.LLM._get_native_provider') as mock_get_provider:
class FailingCompletion:
def __init__(self, *args, **kwargs):
raise Exception("Native Azure AI Inference SDK failed")
mock_get_provider.return_value = FailingCompletion
# This should raise ImportError, not fall back to LiteLLM
with pytest.raises(ImportError) as excinfo:
LLM(model="azure/gpt-4")
assert "Error importing native provider" in str(excinfo.value)
assert "Native Azure AI Inference SDK failed" in str(excinfo.value)
def test_azure_completion_initialization_parameters():
"""
Test that AzureCompletion is initialized with correct parameters
"""
llm = LLM(
model="azure/gpt-4",
temperature=0.7,
max_tokens=2000,
top_p=0.9,
frequency_penalty=0.5,
presence_penalty=0.3,
api_key="test-key",
endpoint="https://test.openai.azure.com"
)
from crewai.llms.providers.azure.completion import AzureCompletion
assert isinstance(llm, AzureCompletion)
assert llm.model == "gpt-4"
assert llm.temperature == 0.7
assert llm.max_tokens == 2000
assert llm.top_p == 0.9
assert llm.frequency_penalty == 0.5
assert llm.presence_penalty == 0.3
def test_azure_specific_parameters():
"""
Test Azure-specific parameters like stop sequences, streaming, and API version
"""
llm = LLM(
model="azure/gpt-4",
stop=["Human:", "Assistant:"],
stream=True,
api_version="2024-02-01",
endpoint="https://test.openai.azure.com"
)
from crewai.llms.providers.azure.completion import AzureCompletion
assert isinstance(llm, AzureCompletion)
assert llm.stop == ["Human:", "Assistant:"]
assert llm.stream == True
assert llm.api_version == "2024-02-01"
@pytest.mark.usefixtures("mock_azure_credentials")
def test_azure_completion_call():
"""
Test that AzureCompletion call method works
"""
llm = LLM(model="azure/gpt-4")
# Mock the call method on the instance
with patch.object(llm, 'call', return_value="Hello! I'm Azure OpenAI, ready to help.") as mock_call:
result = llm.call("Hello, how are you?")
assert result == "Hello! I'm Azure OpenAI, ready to help."
mock_call.assert_called_once_with("Hello, how are you?")
@pytest.mark.usefixtures("mock_azure_credentials")
def test_azure_completion_called_during_crew_execution():
"""
Test that AzureCompletion.call is actually invoked when running a crew
"""
# Create the LLM instance first
azure_llm = LLM(model="azure/gpt-4")
# Mock the call method on the specific instance
with patch.object(azure_llm, 'call', return_value="Tokyo has 14 million people.") as mock_call:
# Create agent with explicit LLM configuration
agent = Agent(
role="Research Assistant",
goal="Find population info",
backstory="You research populations.",
llm=azure_llm,
)
task = Task(
description="Find Tokyo population",
expected_output="Population number",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
result = crew.kickoff()
# Verify mock was called
assert mock_call.called
assert "14 million" in str(result)
@pytest.mark.usefixtures("mock_azure_credentials")
def test_azure_completion_call_arguments():
"""
Test that AzureCompletion.call is invoked with correct arguments
"""
# Create LLM instance first
azure_llm = LLM(model="azure/gpt-4")
# Mock the instance method
with patch.object(azure_llm, 'call') as mock_call:
mock_call.return_value = "Task completed successfully."
agent = Agent(
role="Test Agent",
goal="Complete a simple task",
backstory="You are a test agent.",
llm=azure_llm # Use same instance
)
task = Task(
description="Say hello world",
expected_output="Hello world",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
crew.kickoff()
# Verify call was made
assert mock_call.called
# Check the arguments passed to the call method
call_args = mock_call.call_args
assert call_args is not None
# The first argument should be the messages
messages = call_args[0][0] # First positional argument
assert isinstance(messages, (str, list))
# Verify that the task description appears in the messages
if isinstance(messages, str):
assert "hello world" in messages.lower()
elif isinstance(messages, list):
message_content = str(messages).lower()
assert "hello world" in message_content
def test_multiple_azure_calls_in_crew():
"""
Test that AzureCompletion.call is invoked multiple times for multiple tasks
"""
# Create LLM instance first
azure_llm = LLM(model="azure/gpt-4")
# Mock the instance method
with patch.object(azure_llm, 'call') as mock_call:
mock_call.return_value = "Task completed."
agent = Agent(
role="Multi-task Agent",
goal="Complete multiple tasks",
backstory="You can handle multiple tasks.",
llm=azure_llm # Use same instance
)
task1 = Task(
description="First task",
expected_output="First result",
agent=agent,
)
task2 = Task(
description="Second task",
expected_output="Second result",
agent=agent,
)
crew = Crew(
agents=[agent],
tasks=[task1, task2]
)
crew.kickoff()
# Verify multiple calls were made
assert mock_call.call_count >= 2 # At least one call per task
# Verify each call had proper arguments
for call in mock_call.call_args_list:
assert len(call[0]) > 0 # Has positional arguments
messages = call[0][0]
assert messages is not None
def test_azure_completion_with_tools():
"""
Test that AzureCompletion.call is invoked with tools when agent has tools
"""
from crewai.tools import tool
@tool
def sample_tool(query: str) -> str:
"""A sample tool for testing"""
return f"Tool result for: {query}"
# Create LLM instance first
azure_llm = LLM(model="azure/gpt-4")
# Mock the instance method
with patch.object(azure_llm, 'call') as mock_call:
mock_call.return_value = "Task completed with tools."
agent = Agent(
role="Tool User",
goal="Use tools to complete tasks",
backstory="You can use tools.",
llm=azure_llm, # Use same instance
tools=[sample_tool]
)
task = Task(
description="Use the sample tool",
expected_output="Tool usage result",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
crew.kickoff()
assert mock_call.called
call_args = mock_call.call_args
call_kwargs = call_args[1] if len(call_args) > 1 else {}
if 'tools' in call_kwargs:
assert call_kwargs['tools'] is not None
assert len(call_kwargs['tools']) > 0
def test_azure_raises_error_when_endpoint_missing():
"""Test that AzureCompletion raises ValueError when endpoint is missing"""
from crewai.llms.providers.azure.completion import AzureCompletion
# Clear environment variables
with patch.dict(os.environ, {}, clear=True):
with pytest.raises(ValueError, match="Azure endpoint is required"):
AzureCompletion(model="gpt-4", api_key="test-key")
def test_azure_raises_error_when_api_key_missing():
"""Test that AzureCompletion raises ValueError when API key is missing"""
from crewai.llms.providers.azure.completion import AzureCompletion
# Clear environment variables
with patch.dict(os.environ, {}, clear=True):
with pytest.raises(ValueError, match="Azure API key is required"):
AzureCompletion(model="gpt-4", endpoint="https://test.openai.azure.com")
def test_azure_endpoint_configuration():
"""
Test that Azure endpoint configuration works with multiple environment variable names
"""
# Test with AZURE_ENDPOINT
with patch.dict(os.environ, {
"AZURE_API_KEY": "test-key",
"AZURE_ENDPOINT": "https://test1.openai.azure.com"
}):
llm = LLM(model="azure/gpt-4")
from crewai.llms.providers.azure.completion import AzureCompletion
assert isinstance(llm, AzureCompletion)
assert llm.endpoint == "https://test1.openai.azure.com/openai/deployments/gpt-4"
# Test with AZURE_OPENAI_ENDPOINT
with patch.dict(os.environ, {
"AZURE_API_KEY": "test-key",
"AZURE_OPENAI_ENDPOINT": "https://test2.openai.azure.com"
}, clear=True):
llm = LLM(model="azure/gpt-4")
assert isinstance(llm, AzureCompletion)
# Endpoint should be auto-constructed for Azure OpenAI
assert llm.endpoint == "https://test2.openai.azure.com/openai/deployments/gpt-4"
def test_azure_api_key_configuration():
"""
Test that API key configuration works from AZURE_API_KEY environment variable
"""
with patch.dict(os.environ, {
"AZURE_API_KEY": "test-azure-key",
"AZURE_ENDPOINT": "https://test.openai.azure.com"
}):
llm = LLM(model="azure/gpt-4")
from crewai.llms.providers.azure.completion import AzureCompletion
assert isinstance(llm, AzureCompletion)
assert llm.api_key == "test-azure-key"
def test_azure_model_capabilities():
"""
Test that model capabilities are correctly identified
"""
# Test GPT-4 model (supports function calling)
llm_gpt4 = LLM(model="azure/gpt-4")
from crewai.llms.providers.azure.completion import AzureCompletion
assert isinstance(llm_gpt4, AzureCompletion)
assert llm_gpt4.is_openai_model == True
assert llm_gpt4.supports_function_calling() == True
# Test GPT-3.5 model
llm_gpt35 = LLM(model="azure/gpt-35-turbo")
assert isinstance(llm_gpt35, AzureCompletion)
assert llm_gpt35.is_openai_model == True
assert llm_gpt35.supports_function_calling() == True
def test_azure_completion_params_preparation():
"""
Test that completion parameters are properly prepared
"""
with patch.dict(os.environ, {
"AZURE_API_KEY": "test-key",
"AZURE_ENDPOINT": "https://models.inference.ai.azure.com"
}):
llm = LLM(
model="azure/gpt-4",
temperature=0.7,
top_p=0.9,
frequency_penalty=0.5,
presence_penalty=0.3,
max_tokens=1000
)
from crewai.llms.providers.azure.completion import AzureCompletion
assert isinstance(llm, AzureCompletion)
messages = [{"role": "user", "content": "Hello"}]
params = llm._prepare_completion_params(messages)
assert params["model"] == "gpt-4"
assert params["temperature"] == 0.7
assert params["top_p"] == 0.9
assert params["frequency_penalty"] == 0.5
assert params["presence_penalty"] == 0.3
assert params["max_tokens"] == 1000
def test_azure_model_detection():
"""
Test that various Azure model formats are properly detected
"""
# Test Azure model naming patterns
azure_test_cases = [
"azure/gpt-4",
"azure_openai/gpt-4",
"azure/gpt-4o",
"azure/gpt-35-turbo"
]
for model_name in azure_test_cases:
llm = LLM(model=model_name)
from crewai.llms.providers.azure.completion import AzureCompletion
assert isinstance(llm, AzureCompletion), f"Failed for model: {model_name}"
def test_azure_supports_stop_words():
"""
Test that Azure models support stop sequences
"""
llm = LLM(model="azure/gpt-4")
assert llm.supports_stop_words() == True
def test_azure_gpt5_models_do_not_support_stop_words():
"""
Test that GPT-5 family models do not support stop words.
GPT-5 models use the Responses API which doesn't support stop sequences.
See: https://learn.microsoft.com/en-us/azure/ai-foundry/foundry-models/concepts/models-sold-directly-by-azure
"""
# GPT-5 base models
gpt5_models = [
"azure/gpt-5",
"azure/gpt-5-mini",
"azure/gpt-5-nano",
"azure/gpt-5-chat",
# GPT-5.1 series
"azure/gpt-5.1",
"azure/gpt-5.1-chat",
"azure/gpt-5.1-codex",
"azure/gpt-5.1-codex-mini",
# GPT-5.2 series
"azure/gpt-5.2",
"azure/gpt-5.2-chat",
]
for model_name in gpt5_models:
llm = LLM(model=model_name)
assert llm.supports_stop_words() == False, f"Expected {model_name} to NOT support stop words"
def test_azure_o_series_models_do_not_support_stop_words():
"""
Test that o-series reasoning models do not support stop words.
"""
o_series_models = [
"azure/o1",
"azure/o1-mini",
"azure/o3",
"azure/o3-mini",
"azure/o4",
"azure/o4-mini",
]
for model_name in o_series_models:
llm = LLM(model=model_name)
assert llm.supports_stop_words() == False, f"Expected {model_name} to NOT support stop words"
def test_azure_responses_api_models_do_not_support_stop_words():
"""
Test that models using the Responses API do not support stop words.
"""
responses_api_models = [
"azure/computer-use-preview",
]
for model_name in responses_api_models:
llm = LLM(model=model_name)
assert llm.supports_stop_words() == False, f"Expected {model_name} to NOT support stop words"
def test_azure_stop_words_not_included_for_unsupported_models():
"""
Test that stop words are not included in completion params for models that don't support them.
"""
with patch.dict(os.environ, {
"AZURE_API_KEY": "test-key",
"AZURE_ENDPOINT": "https://models.inference.ai.azure.com"
}):
# Test GPT-5 model - stop should NOT be included even if set
llm_gpt5 = LLM(
model="azure/gpt-5-nano",
stop=["STOP", "END"]
)
params = llm_gpt5._prepare_completion_params(
messages=[{"role": "user", "content": "test"}]
)
assert "stop" not in params, "stop should not be included for GPT-5 models"
# Test regular model - stop SHOULD be included
llm_gpt4 = LLM(
model="azure/gpt-4",
stop=["STOP", "END"]
)
params = llm_gpt4._prepare_completion_params(
messages=[{"role": "user", "content": "test"}]
)
assert "stop" in params, "stop should be included for GPT-4 models"
assert params["stop"] == ["STOP", "END"]
def test_azure_context_window_size():
"""
Test that Azure models return correct context window sizes
"""
# Test GPT-4
llm_gpt4 = LLM(model="azure/gpt-4")
context_size_gpt4 = llm_gpt4.get_context_window_size()
assert context_size_gpt4 > 0 # Should return valid context size
# Test GPT-4o
llm_gpt4o = LLM(model="azure/gpt-4o")
context_size_gpt4o = llm_gpt4o.get_context_window_size()
assert context_size_gpt4o > context_size_gpt4 # GPT-4o has larger context
def test_azure_message_formatting():
"""
Test that messages are properly formatted for Azure API
"""
llm = LLM(model="azure/gpt-4")
# Test message formatting
test_messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello"},
{"role": "assistant", "content": "Hi there!"},
{"role": "user", "content": "How are you?"}
]
formatted_messages = llm._format_messages_for_azure(test_messages)
# All messages should be formatted as dictionaries with content
assert len(formatted_messages) == 4
# Verify each message is a dict with content
for msg in formatted_messages:
assert isinstance(msg, dict)
assert "content" in msg
def test_azure_streaming_parameter():
"""
Test that streaming parameter is properly handled
"""
# Test non-streaming
llm_no_stream = LLM(model="azure/gpt-4", stream=False)
assert llm_no_stream.stream == False
# Test streaming
llm_stream = LLM(model="azure/gpt-4", stream=True)
assert llm_stream.stream == True
def test_azure_tool_conversion():
"""
Test that tools are properly converted to Azure OpenAI format
"""
llm = LLM(model="azure/gpt-4")
# Mock tool in CrewAI format
crewai_tools = [{
"type": "function",
"function": {
"name": "test_tool",
"description": "A test tool",
"parameters": {
"type": "object",
"properties": {
"query": {"type": "string", "description": "Search query"}
},
"required": ["query"]
}
}
}]
# Test tool conversion
azure_tools = llm._convert_tools_for_interference(crewai_tools)
assert len(azure_tools) == 1
# Azure tools should maintain the function calling format
assert azure_tools[0]["type"] == "function"
assert azure_tools[0]["function"]["name"] == "test_tool"
assert azure_tools[0]["function"]["description"] == "A test tool"
assert "parameters" in azure_tools[0]["function"]
def test_azure_environment_variable_endpoint():
"""
Test that Azure endpoint is properly loaded from environment
"""
with patch.dict(os.environ, {
"AZURE_API_KEY": "test-key",
"AZURE_ENDPOINT": "https://test.openai.azure.com"
}):
llm = LLM(model="azure/gpt-4")
assert llm.client is not None
assert llm.endpoint == "https://test.openai.azure.com/openai/deployments/gpt-4"
def test_azure_token_usage_tracking():
"""
Test that token usage is properly tracked for Azure responses
"""
llm = LLM(model="azure/gpt-4")
# Mock the Azure response with usage information
with patch.object(llm.client, 'complete') as mock_complete:
mock_message = MagicMock()
mock_message.content = "test response"
mock_message.tool_calls = None
mock_choice = MagicMock()
mock_choice.message = mock_message
mock_response = MagicMock()
mock_response.choices = [mock_choice]
mock_response.usage = MagicMock(
prompt_tokens=50,
completion_tokens=25,
total_tokens=75
)
mock_complete.return_value = mock_response
result = llm.call("Hello")
# Verify the response
assert result == "test response"
# Verify token usage was extracted
usage = llm._extract_azure_token_usage(mock_response)
assert usage["prompt_tokens"] == 50
assert usage["completion_tokens"] == 25
assert usage["total_tokens"] == 75
def test_azure_http_error_handling():
"""
Test that Azure HTTP errors are properly handled
"""
from azure.core.exceptions import HttpResponseError
llm = LLM(model="azure/gpt-4")
# Mock an HTTP error
with patch.object(llm.client, 'complete') as mock_complete:
mock_complete.side_effect = HttpResponseError(message="Rate limit exceeded", response=MagicMock(status_code=429))
with pytest.raises(HttpResponseError):
llm.call("Hello")
@pytest.mark.vcr()
def test_azure_streaming_completion():
"""
Test that streaming completions work properly
"""
llm = LLM(model="azure/gpt-4o-mini", stream=True)
result = llm.call("Say hello")
assert result is not None
assert isinstance(result, str)
assert len(result) > 0
def test_azure_api_version_default():
"""
Test that Azure API version defaults correctly
"""
llm = LLM(model="azure/gpt-4")
from crewai.llms.providers.azure.completion import AzureCompletion
assert isinstance(llm, AzureCompletion)
# Should use default or environment variable
assert llm.api_version is not None
def test_azure_function_calling_support():
"""
Test that function calling is supported for OpenAI models
"""
# Test with GPT-4 (supports function calling)
llm_gpt4 = LLM(model="azure/gpt-4")
assert llm_gpt4.supports_function_calling() == True
# Test with GPT-3.5 (supports function calling)
llm_gpt35 = LLM(model="azure/gpt-35-turbo")
assert llm_gpt35.supports_function_calling() == True
def test_azure_openai_endpoint_url_construction():
"""
Test that Azure OpenAI endpoint URLs are automatically constructed correctly
"""
from crewai.llms.providers.azure.completion import AzureCompletion
with patch.dict(os.environ, {
"AZURE_API_KEY": "test-key",
"AZURE_ENDPOINT": "https://test-resource.openai.azure.com"
}):
llm = LLM(model="azure/gpt-4o-mini")
assert "/openai/deployments/gpt-4o-mini" in llm.endpoint
assert llm.endpoint == "https://test-resource.openai.azure.com/openai/deployments/gpt-4o-mini"
assert llm.is_azure_openai_endpoint == True
def test_azure_openai_endpoint_url_with_trailing_slash():
"""
Test that trailing slashes are handled correctly in endpoint URLs
"""
from crewai.llms.providers.azure.completion import AzureCompletion
with patch.dict(os.environ, {
"AZURE_API_KEY": "test-key",
"AZURE_ENDPOINT": "https://test-resource.openai.azure.com/" # trailing slash
}):
llm = LLM(model="azure/gpt-4o")
assert llm.endpoint == "https://test-resource.openai.azure.com/openai/deployments/gpt-4o"
assert not llm.endpoint.endswith("//")
def test_azure_openai_endpoint_already_complete():
"""
Test that already complete Azure OpenAI endpoint URLs are not modified
"""
with patch.dict(os.environ, {
"AZURE_API_KEY": "test-key",
"AZURE_ENDPOINT": "https://test-resource.openai.azure.com/openai/deployments/my-deployment"
}):
llm = LLM(model="azure/gpt-4")
assert llm.endpoint == "https://test-resource.openai.azure.com/openai/deployments/my-deployment"
assert llm.is_azure_openai_endpoint == True
def test_non_azure_openai_endpoint_unchanged():
"""
Test that non-Azure OpenAI endpoints are not modified
"""
with patch.dict(os.environ, {
"AZURE_API_KEY": "test-key",
"AZURE_ENDPOINT": "https://models.inference.ai.azure.com"
}):
llm = LLM(model="azure/mistral-large")
assert llm.endpoint == "https://models.inference.ai.azure.com"
assert llm.is_azure_openai_endpoint == False
def test_azure_openai_model_parameter_excluded():
"""
Test that model parameter is NOT included for Azure OpenAI endpoints
"""
with patch.dict(os.environ, {
"AZURE_API_KEY": "test-key",
"AZURE_ENDPOINT": "https://test.openai.azure.com/openai/deployments/gpt-4"
}):
llm = LLM(model="azure/gpt-4")
# Prepare params to check model parameter handling
params = llm._prepare_completion_params(
messages=[{"role": "user", "content": "test"}]
)
# Model parameter should NOT be included for Azure OpenAI endpoints
assert "model" not in params
assert "messages" in params
assert params["stream"] == False
def test_non_azure_openai_model_parameter_included():
"""
Test that model parameter IS included for non-Azure OpenAI endpoints
"""
from crewai.llms.providers.azure.completion import AzureCompletion
with patch.dict(os.environ, {
"AZURE_API_KEY": "test-key",
"AZURE_ENDPOINT": "https://models.inference.ai.azure.com"
}):
llm = LLM(model="azure/mistral-large")
params = llm._prepare_completion_params(
messages=[{"role": "user", "content": "test"}]
)
assert "model" in params
assert params["model"] == "mistral-large"
def test_azure_message_formatting_with_role():
"""
Test that messages are formatted with both 'role' and 'content' fields
"""
from crewai.llms.providers.azure.completion import AzureCompletion
llm = LLM(model="azure/gpt-4")
# Test with string message
formatted = llm._format_messages_for_azure("Hello world")
assert isinstance(formatted, list)
assert len(formatted) > 0
assert "role" in formatted[0]
assert "content" in formatted[0]
messages = [
{"role": "system", "content": "You are helpful"},
{"role": "user", "content": "Hello"},
{"role": "assistant", "content": "Hi there"}
]
formatted = llm._format_messages_for_azure(messages)
for msg in formatted:
assert "role" in msg
assert "content" in msg
assert msg["role"] in ["system", "user", "assistant"]
def test_azure_message_formatting_default_role():
"""
Test that messages without a role default to 'user'
"""
llm = LLM(model="azure/gpt-4")
# Test with message that has role but tests default behavior
messages = [{"role": "user", "content": "test message"}]
formatted = llm._format_messages_for_azure(messages)
assert formatted[0]["role"] == "user"
assert formatted[0]["content"] == "test message"
def test_azure_endpoint_detection_flags():
"""
Test that is_azure_openai_endpoint flag is set correctly
"""
with patch.dict(os.environ, {
"AZURE_API_KEY": "test-key",
"AZURE_ENDPOINT": "https://test.openai.azure.com/openai/deployments/gpt-4"
}):
llm_openai = LLM(model="azure/gpt-4")
assert llm_openai.is_azure_openai_endpoint == True
with patch.dict(os.environ, {
"AZURE_API_KEY": "test-key",
"AZURE_ENDPOINT": "https://models.inference.ai.azure.com"
}):
llm_other = LLM(model="azure/mistral-large")
assert llm_other.is_azure_openai_endpoint == False
def test_azure_improved_error_messages():
"""
Test that improved error messages are provided for common HTTP errors
"""
from crewai.llms.providers.azure.completion import AzureCompletion
from azure.core.exceptions import HttpResponseError
llm = LLM(model="azure/gpt-4")
with patch.object(llm.client, 'complete') as mock_complete:
error_401 = HttpResponseError(message="Unauthorized")
error_401.status_code = 401
mock_complete.side_effect = error_401
with pytest.raises(HttpResponseError):
llm.call("test")
error_404 = HttpResponseError(message="Not Found")
error_404.status_code = 404
mock_complete.side_effect = error_404
with pytest.raises(HttpResponseError):
llm.call("test")
error_429 = HttpResponseError(message="Rate Limited")
error_429.status_code = 429
mock_complete.side_effect = error_429
with pytest.raises(HttpResponseError):
llm.call("test")
def test_azure_api_version_properly_passed():
"""
Test that api_version is properly passed to the client
"""
from crewai.llms.providers.azure.completion import AzureCompletion
with patch.dict(os.environ, {
"AZURE_API_KEY": "test-key",
"AZURE_ENDPOINT": "https://test.openai.azure.com",
"AZURE_API_VERSION": "" # Clear env var to test default
}, clear=False):
llm = LLM(model="azure/gpt-4", api_version="2024-08-01")
assert llm.api_version == "2024-08-01"
with patch.dict(os.environ, {
"AZURE_API_KEY": "test-key",
"AZURE_ENDPOINT": "https://test.openai.azure.com"
}, clear=True):
llm_default = LLM(model="azure/gpt-4")
assert llm_default.api_version == "2024-06-01" # Current default
def test_azure_timeout_and_max_retries_stored():
"""
Test that timeout and max_retries parameters are stored
"""
from crewai.llms.providers.azure.completion import AzureCompletion
with patch.dict(os.environ, {
"AZURE_API_KEY": "test-key",
"AZURE_ENDPOINT": "https://test.openai.azure.com"
}):
llm = LLM(
model="azure/gpt-4",
timeout=60.0,
max_retries=5
)
assert llm.timeout == 60.0
assert llm.max_retries == 5
def test_azure_complete_params_include_optional_params():
"""
Test that optional parameters are included in completion params when set
"""
from crewai.llms.providers.azure.completion import AzureCompletion
with patch.dict(os.environ, {
"AZURE_API_KEY": "test-key",
"AZURE_ENDPOINT": "https://models.inference.ai.azure.com"
}):
llm = LLM(
model="azure/gpt-4",
temperature=0.7,
top_p=0.9,
frequency_penalty=0.5,
presence_penalty=0.3,
max_tokens=1000,
stop=["STOP", "END"]
)
params = llm._prepare_completion_params(
messages=[{"role": "user", "content": "test"}]
)
assert params["temperature"] == 0.7
assert params["top_p"] == 0.9
assert params["frequency_penalty"] == 0.5
assert params["presence_penalty"] == 0.3
assert params["max_tokens"] == 1000
assert params["stop"] == ["STOP", "END"]
def test_azure_endpoint_validation_with_azure_prefix():
"""
Test that 'azure/' prefix is properly stripped when constructing endpoint
"""
from crewai.llms.providers.azure.completion import AzureCompletion
with patch.dict(os.environ, {
"AZURE_API_KEY": "test-key",
"AZURE_ENDPOINT": "https://test.openai.azure.com"
}):
llm = LLM(model="azure/gpt-4o-mini")
# Should strip 'azure/' prefix and use 'gpt-4o-mini' as deployment name
assert "gpt-4o-mini" in llm.endpoint
assert "azure/gpt-4o-mini" not in llm.endpoint
def test_azure_message_formatting_preserves_all_roles():
"""
Test that all message roles (system, user, assistant) are preserved correctly
"""
from crewai.llms.providers.azure.completion import AzureCompletion
llm = LLM(model="azure/gpt-4")
messages = [
{"role": "system", "content": "System message"},
{"role": "user", "content": "User message"},
{"role": "assistant", "content": "Assistant message"},
{"role": "user", "content": "Another user message"}
]
formatted = llm._format_messages_for_azure(messages)
assert formatted[0]["role"] == "system"
assert formatted[0]["content"] == "System message"
assert formatted[1]["role"] == "user"
assert formatted[1]["content"] == "User message"
assert formatted[2]["role"] == "assistant"
assert formatted[2]["content"] == "Assistant message"
assert formatted[3]["role"] == "user"
assert formatted[3]["content"] == "Another user message"
def test_azure_deepseek_model_support():
"""
Test that DeepSeek and other non-OpenAI models work correctly with Azure AI Inference
"""
with patch.dict(os.environ, {
"AZURE_API_KEY": "test-key",
"AZURE_ENDPOINT": "https://models.inference.ai.azure.com"
}):
# Test DeepSeek model
llm_deepseek = LLM(model="azure/deepseek-chat")
# Endpoint should not be modified for non-OpenAI endpoints
assert llm_deepseek.endpoint == "https://models.inference.ai.azure.com"
assert llm_deepseek.is_azure_openai_endpoint == False
# Model parameter should be included in completion params
params = llm_deepseek._prepare_completion_params(
messages=[{"role": "user", "content": "test"}]
)
assert "model" in params
assert params["model"] == "deepseek-chat"
# Should not be detected as OpenAI model (no function calling)
assert llm_deepseek.is_openai_model == False
assert llm_deepseek.supports_function_calling() == False
def test_azure_mistral_and_other_models():
"""
Test that various non-OpenAI models (Mistral, Llama, etc.) work with Azure AI Inference
"""
test_models = [
"mistral-large-latest",
"llama-3-70b-instruct",
"cohere-command-r-plus"
]
for model_name in test_models:
with patch.dict(os.environ, {
"AZURE_API_KEY": "test-key",
"AZURE_ENDPOINT": "https://models.inference.ai.azure.com"
}):
llm = LLM(model=f"azure/{model_name}")
# Verify endpoint is not modified
assert llm.endpoint == "https://models.inference.ai.azure.com"
assert llm.is_azure_openai_endpoint == False
# Verify model parameter is included
params = llm._prepare_completion_params(
messages=[{"role": "user", "content": "test"}]
)
assert "model" in params
assert params["model"] == model_name
def test_azure_completion_params_preparation_with_drop_params():
"""
Test that completion parameters are properly prepared with drop paramaeters attribute respected
"""
with patch.dict(os.environ, {
"AZURE_API_KEY": "test-key",
"AZURE_ENDPOINT": "https://models.inference.ai.azure.com"
}):
llm = LLM(
model="azure/o4-mini",
drop_params=True,
additional_drop_params=["stop"],
max_tokens=1000
)
from crewai.llms.providers.azure.completion import AzureCompletion
assert isinstance(llm, AzureCompletion)
messages = [{"role": "user", "content": "Hello"}]
params = llm._prepare_completion_params(messages)
assert params.get('stop') == None
@pytest.mark.vcr()
def test_azure_streaming_returns_usage_metrics():
"""
Test that Azure streaming calls return proper token usage metrics.
"""
agent = Agent(
role="Research Assistant",
goal="Find information about the capital of Spain",
backstory="You are a helpful research assistant.",
llm=LLM(model="azure/gpt-4o-mini", stream=True),
verbose=True,
)
task = Task(
description="What is the capital of Spain?",
expected_output="The capital of Spain",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
result = crew.kickoff()
assert result.token_usage is not None
assert result.token_usage.total_tokens > 0
assert result.token_usage.prompt_tokens > 0
assert result.token_usage.completion_tokens > 0
assert result.token_usage.successful_requests >= 1
# =============================================================================
# Agent Kickoff Structured Output Tests
# =============================================================================
@pytest.mark.vcr()
def test_azure_agent_kickoff_structured_output_without_tools():
"""
Test that agent kickoff returns structured output without tools.
This tests native structured output handling for Azure OpenAI models.
"""
from pydantic import BaseModel, Field
class AnalysisResult(BaseModel):
"""Structured output for analysis results."""
topic: str = Field(description="The topic analyzed")
key_points: list[str] = Field(description="Key insights from the analysis")
summary: str = Field(description="Brief summary of findings")
agent = Agent(
role="Analyst",
goal="Provide structured analysis on topics",
backstory="You are an expert analyst who provides clear, structured insights.",
llm=LLM(model="azure/gpt-4o-mini"),
tools=[],
verbose=True,
)
result = agent.kickoff(
messages="Analyze the benefits of remote work briefly. Keep it concise.",
response_format=AnalysisResult,
)
assert result.pydantic is not None, "Expected pydantic output but got None"
assert isinstance(result.pydantic, AnalysisResult), f"Expected AnalysisResult but got {type(result.pydantic)}"
assert result.pydantic.topic, "Topic should not be empty"
assert len(result.pydantic.key_points) > 0, "Should have at least one key point"
assert result.pydantic.summary, "Summary should not be empty"
@pytest.mark.vcr()
def test_azure_agent_kickoff_structured_output_with_tools():
"""
Test that agent kickoff returns structured output after using tools.
This tests post-tool-call structured output handling for Azure OpenAI models.
"""
from pydantic import BaseModel, Field
from crewai.tools import tool
class CalculationResult(BaseModel):
"""Structured output for calculation results."""
operation: str = Field(description="The mathematical operation performed")
result: int = Field(description="The result of the calculation")
explanation: str = Field(description="Brief explanation of the calculation")
@tool
def add_numbers(a: int, b: int) -> int:
"""Add two numbers together and return the sum."""
return a + b
agent = Agent(
role="Calculator",
goal="Perform calculations using available tools",
backstory="You are a calculator assistant that uses tools to compute results.",
llm=LLM(model="azure/gpt-4o-mini"),
tools=[add_numbers],
verbose=True,
)
result = agent.kickoff(
messages="Calculate 15 + 27 using your add_numbers tool. Report the result.",
response_format=CalculationResult,
)
assert result.pydantic is not None, "Expected pydantic output but got None"
assert isinstance(result.pydantic, CalculationResult), f"Expected CalculationResult but got {type(result.pydantic)}"
assert result.pydantic.result == 42, f"Expected result 42 but got {result.pydantic.result}"
assert result.pydantic.operation, "Operation should not be empty"
assert result.pydantic.explanation, "Explanation should not be empty"
def test_azure_stop_words_not_applied_to_structured_output():
"""
Test that stop words are NOT applied when response_model is provided.
This ensures JSON responses containing stop word patterns (like "Observation:")
are not truncated, which would cause JSON validation to fail.
"""
from pydantic import BaseModel, Field
from crewai.llms.providers.azure.completion import AzureCompletion
class ResearchResult(BaseModel):
"""Research result that may contain stop word patterns in string fields."""
finding: str = Field(description="The research finding")
observation: str = Field(description="Observation about the finding")
# Create AzureCompletion instance with stop words configured
llm = AzureCompletion(
model="gpt-4",
api_key="test-key",
endpoint="https://test.openai.azure.com",
stop=["Observation:", "Final Answer:"], # Common stop words
)
# JSON response that contains a stop word pattern in a string field
# Without the fix, this would be truncated at "Observation:" breaking the JSON
json_response = '{"finding": "The data shows growth", "observation": "Observation: This confirms the hypothesis"}'
with patch.object(llm.client, 'complete') as mock_complete:
mock_message = MagicMock()
mock_message.content = json_response
mock_message.tool_calls = None
mock_choice = MagicMock()
mock_choice.message = mock_message
mock_response = MagicMock()
mock_response.choices = [mock_choice]
mock_response.usage = MagicMock(
prompt_tokens=100,
completion_tokens=50,
total_tokens=150
)
mock_complete.return_value = mock_response
# Call with response_model - stop words should NOT be applied
result = llm.call(
messages=[{"role": "user", "content": "Analyze the data"}],
response_model=ResearchResult,
)
# Should successfully parse the full JSON without truncation
assert isinstance(result, ResearchResult)
assert result.finding == "The data shows growth"
# The observation field should contain the full text including "Observation:"
assert "Observation:" in result.observation
def test_azure_stop_words_still_applied_to_regular_responses():
"""
Test that stop words ARE still applied for regular (non-structured) responses.
This ensures the fix didn't break normal stop word behavior.
"""
from crewai.llms.providers.azure.completion import AzureCompletion
# Create AzureCompletion instance with stop words configured
llm = AzureCompletion(
model="gpt-4",
api_key="test-key",
endpoint="https://test.openai.azure.com",
stop=["Observation:", "Final Answer:"],
)
# Response that contains a stop word - should be truncated
response_with_stop_word = "I need to search for more information.\n\nAction: search\nObservation: Found results"
with patch.object(llm.client, 'complete') as mock_complete:
mock_message = MagicMock()
mock_message.content = response_with_stop_word
mock_message.tool_calls = None
mock_choice = MagicMock()
mock_choice.message = mock_message
mock_response = MagicMock()
mock_response.choices = [mock_choice]
mock_response.usage = MagicMock(
prompt_tokens=100,
completion_tokens=50,
total_tokens=150
)
mock_complete.return_value = mock_response
# Call WITHOUT response_model - stop words SHOULD be applied
result = llm.call(
messages=[{"role": "user", "content": "Search for something"}],
)
# Response should be truncated at the stop word
assert "Observation:" not in result
assert "Found results" not in result
assert "I need to search for more information" in result
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/llms/azure/test_azure.py",
"license": "MIT License",
"lines": 1114,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/tests/llms/bedrock/test_bedrock.py | import os
import sys
import types
from unittest.mock import patch, MagicMock
import pytest
from crewai.llm import LLM
from crewai.crew import Crew
from crewai.agent import Agent
from crewai.task import Task
def _create_bedrock_mocks():
"""Helper to create Bedrock mocks."""
mock_session_class = MagicMock()
mock_session_instance = MagicMock()
mock_client = MagicMock()
# Set up default mock responses to prevent hanging
default_response = {
'output': {
'message': {
'role': 'assistant',
'content': [
{'text': 'Test response'}
]
}
},
'usage': {
'inputTokens': 10,
'outputTokens': 5,
'totalTokens': 15
}
}
mock_client.converse.return_value = default_response
mock_client.converse_stream.return_value = {'stream': []}
# Configure the mock session instance to return the mock client
mock_session_instance.client.return_value = mock_client
# Configure the mock Session class to return the mock session instance
mock_session_class.return_value = mock_session_instance
return mock_session_class, mock_client
@pytest.fixture(autouse=True)
def mock_aws_credentials():
"""Mock AWS credentials and boto3 Session for tests only if real credentials are not set."""
# If real AWS credentials exist, don't mock - allow real API calls
if "AWS_ACCESS_KEY_ID" in os.environ and "AWS_SECRET_ACCESS_KEY" in os.environ:
yield None, None
return
with patch.dict(os.environ, {
"AWS_ACCESS_KEY_ID": "test-access-key",
"AWS_SECRET_ACCESS_KEY": "test-secret-key",
"AWS_DEFAULT_REGION": "us-east-1"
}):
# Mock boto3 Session to prevent actual AWS connections
with patch('crewai.llms.providers.bedrock.completion.Session') as mock_session_class:
mock_session_instance = MagicMock()
mock_client = MagicMock()
# Set up default mock responses to prevent hanging
default_response = {
'output': {
'message': {
'role': 'assistant',
'content': [
{'text': 'Test response'}
]
}
},
'usage': {
'inputTokens': 10,
'outputTokens': 5,
'totalTokens': 15
}
}
mock_client.converse.return_value = default_response
mock_client.converse_stream.return_value = {'stream': []}
# Configure the mock session instance to return the mock client
mock_session_instance.client.return_value = mock_client
# Configure the mock Session class to return the mock session instance
mock_session_class.return_value = mock_session_instance
yield mock_session_class, mock_client
@pytest.fixture
def bedrock_mocks():
"""Fixture that always provides Bedrock mocks, regardless of real credentials.
Use this fixture for tests that explicitly need to test mock behavior.
"""
with patch.dict(os.environ, {
"AWS_ACCESS_KEY_ID": "test-access-key",
"AWS_SECRET_ACCESS_KEY": "test-secret-key",
"AWS_DEFAULT_REGION": "us-east-1"
}):
with patch('crewai.llms.providers.bedrock.completion.Session') as mock_session_class:
mock_session_instance = MagicMock()
mock_client = MagicMock()
default_response = {
'output': {
'message': {
'role': 'assistant',
'content': [
{'text': 'Test response'}
]
}
},
'usage': {
'inputTokens': 10,
'outputTokens': 5,
'totalTokens': 15
}
}
mock_client.converse.return_value = default_response
mock_client.converse_stream.return_value = {'stream': []}
mock_session_instance.client.return_value = mock_client
mock_session_class.return_value = mock_session_instance
yield mock_session_class, mock_client
def test_bedrock_completion_is_used_when_bedrock_provider():
"""
Test that BedrockCompletion from completion.py is used when LLM uses provider 'bedrock'
"""
llm = LLM(model="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0")
assert llm.__class__.__name__ == "BedrockCompletion"
assert llm.provider == "bedrock"
assert llm.model == "anthropic.claude-3-5-sonnet-20241022-v2:0"
def test_bedrock_completion_module_is_imported():
"""
Test that the completion module is properly imported when using Bedrock provider
"""
module_name = "crewai.llms.providers.bedrock.completion"
# Remove module from cache if it exists
if module_name in sys.modules:
del sys.modules[module_name]
# Create LLM instance - this should trigger the import
LLM(model="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0")
# Verify the module was imported
assert module_name in sys.modules
completion_mod = sys.modules[module_name]
assert isinstance(completion_mod, types.ModuleType)
# Verify the class exists in the module
assert hasattr(completion_mod, 'BedrockCompletion')
def test_native_bedrock_raises_error_when_initialization_fails():
"""
Test that LLM raises ImportError when native Bedrock completion fails.
With the new behavior, when a native provider is in SUPPORTED_NATIVE_PROVIDERS
but fails to instantiate, we raise an ImportError instead of silently falling back.
This provides clearer error messages to users about missing dependencies.
"""
# Mock the _get_native_provider to return a failing class
with patch('crewai.llm.LLM._get_native_provider') as mock_get_provider:
class FailingCompletion:
def __init__(self, *args, **kwargs):
raise Exception("Native AWS Bedrock SDK failed")
mock_get_provider.return_value = FailingCompletion
# This should raise ImportError with clear message
with pytest.raises(ImportError) as excinfo:
LLM(model="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0")
# Verify the error message is helpful
assert "Error importing native provider" in str(excinfo.value)
assert "Native AWS Bedrock SDK failed" in str(excinfo.value)
def test_bedrock_completion_initialization_parameters():
"""
Test that BedrockCompletion is initialized with correct parameters
"""
llm = LLM(
model="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0",
temperature=0.7,
max_tokens=2000,
top_p=0.9,
top_k=40,
region_name="us-west-2"
)
from crewai.llms.providers.bedrock.completion import BedrockCompletion
assert isinstance(llm, BedrockCompletion)
assert llm.model == "anthropic.claude-3-5-sonnet-20241022-v2:0"
assert llm.temperature == 0.7
assert llm.max_tokens == 2000
assert llm.top_p == 0.9
assert llm.top_k == 40
assert llm.region_name == "us-west-2"
def test_bedrock_specific_parameters():
"""
Test Bedrock-specific parameters like stop_sequences and streaming
"""
llm = LLM(
model="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0",
stop_sequences=["Human:", "Assistant:"],
stream=True,
region_name="us-east-1"
)
from crewai.llms.providers.bedrock.completion import BedrockCompletion
assert isinstance(llm, BedrockCompletion)
assert llm.stop_sequences == ["Human:", "Assistant:"]
assert llm.stream == True
assert llm.region_name == "us-east-1"
def test_bedrock_completion_call():
"""
Test that BedrockCompletion call method works
"""
llm = LLM(model="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0")
# Mock the call method on the instance
with patch.object(llm, 'call', return_value="Hello! I'm Claude on Bedrock, ready to help.") as mock_call:
result = llm.call("Hello, how are you?")
assert result == "Hello! I'm Claude on Bedrock, ready to help."
mock_call.assert_called_once_with("Hello, how are you?")
def test_bedrock_completion_called_during_crew_execution():
"""
Test that BedrockCompletion.call is actually invoked when running a crew
"""
# Create the LLM instance first
bedrock_llm = LLM(model="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0")
# Mock the call method on the specific instance
with patch.object(bedrock_llm, 'call', return_value="Tokyo has 14 million people.") as mock_call:
# Create agent with explicit LLM configuration
agent = Agent(
role="Research Assistant",
goal="Find population info",
backstory="You research populations.",
llm=bedrock_llm,
)
task = Task(
description="Find Tokyo population",
expected_output="Population number",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
result = crew.kickoff()
# Verify mock was called
assert mock_call.called
assert "14 million" in str(result)
@pytest.mark.skip(reason="Crew execution test - may hang, needs investigation")
def test_bedrock_completion_call_arguments():
"""
Test that BedrockCompletion.call is invoked with correct arguments
"""
# Create LLM instance first
bedrock_llm = LLM(model="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0")
# Mock the instance method
with patch.object(bedrock_llm, 'call') as mock_call:
mock_call.return_value = "Task completed successfully."
agent = Agent(
role="Test Agent",
goal="Complete a simple task",
backstory="You are a test agent.",
llm=bedrock_llm # Use same instance
)
task = Task(
description="Say hello world",
expected_output="Hello world",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
crew.kickoff()
# Verify call was made
assert mock_call.called
# Check the arguments passed to the call method
call_args = mock_call.call_args
assert call_args is not None
# The first argument should be the messages
messages = call_args[0][0] # First positional argument
assert isinstance(messages, (str, list))
# Verify that the task description appears in the messages
if isinstance(messages, str):
assert "hello world" in messages.lower()
elif isinstance(messages, list):
message_content = str(messages).lower()
assert "hello world" in message_content
def test_multiple_bedrock_calls_in_crew():
"""
Test that BedrockCompletion.call is invoked multiple times for multiple tasks
"""
# Create LLM instance first
bedrock_llm = LLM(model="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0")
# Mock the instance method
with patch.object(bedrock_llm, 'call') as mock_call:
mock_call.return_value = "Task completed."
agent = Agent(
role="Multi-task Agent",
goal="Complete multiple tasks",
backstory="You can handle multiple tasks.",
llm=bedrock_llm # Use same instance
)
task1 = Task(
description="First task",
expected_output="First result",
agent=agent,
)
task2 = Task(
description="Second task",
expected_output="Second result",
agent=agent,
)
crew = Crew(
agents=[agent],
tasks=[task1, task2]
)
crew.kickoff()
# Verify multiple calls were made
assert mock_call.call_count >= 2 # At least one call per task
# Verify each call had proper arguments
for call in mock_call.call_args_list:
assert len(call[0]) > 0 # Has positional arguments
messages = call[0][0]
assert messages is not None
def test_bedrock_completion_with_tools():
"""
Test that BedrockCompletion.call is invoked with tools when agent has tools
"""
from crewai.tools import tool
@tool
def sample_tool(query: str) -> str:
"""A sample tool for testing"""
return f"Tool result for: {query}"
# Create LLM instance first
bedrock_llm = LLM(model="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0")
# Mock the instance method
with patch.object(bedrock_llm, 'call') as mock_call:
mock_call.return_value = "Task completed with tools."
agent = Agent(
role="Tool User",
goal="Use tools to complete tasks",
backstory="You can use tools.",
llm=bedrock_llm, # Use same instance
tools=[sample_tool]
)
task = Task(
description="Use the sample tool",
expected_output="Tool usage result",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
crew.kickoff()
assert mock_call.called
call_args = mock_call.call_args
call_kwargs = call_args[1] if len(call_args) > 1 else {}
if 'tools' in call_kwargs:
assert call_kwargs['tools'] is not None
assert len(call_kwargs['tools']) > 0
def test_bedrock_raises_error_when_model_not_found(bedrock_mocks):
"""Test that BedrockCompletion raises appropriate error when model not found"""
from botocore.exceptions import ClientError
# Get the mock client from the fixture
_, mock_client = bedrock_mocks
error_response = {
'Error': {
'Code': 'ResourceNotFoundException',
'Message': 'Could not resolve the foundation model from the model identifier'
}
}
mock_client.converse.side_effect = ClientError(error_response, 'converse')
llm = LLM(model="bedrock/model-doesnt-exist")
with pytest.raises(Exception): # Should raise some error for unsupported model
llm.call("Hello")
def test_bedrock_aws_credentials_configuration():
"""
Test that AWS credentials configuration works properly
"""
aws_access_key_id = "test-access-key"
aws_secret_access_key = "test-secret-key"
aws_region_name = "us-east-1"
# Test with environment variables
with patch.dict(os.environ, {
"AWS_ACCESS_KEY_ID": aws_access_key_id,
"AWS_SECRET_ACCESS_KEY": aws_secret_access_key,
"AWS_DEFAULT_REGION": aws_region_name
}):
llm = LLM(model="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0")
from crewai.llms.providers.bedrock.completion import BedrockCompletion
assert isinstance(llm, BedrockCompletion)
assert llm.region_name == aws_region_name
assert llm.aws_access_key_id == aws_access_key_id
assert llm.aws_secret_access_key == aws_secret_access_key
# Test with litellm environment variables
with patch.dict(os.environ, {
"AWS_ACCESS_KEY_ID": aws_access_key_id,
"AWS_SECRET_ACCESS_KEY": aws_secret_access_key,
"AWS_REGION_NAME": aws_region_name
}):
llm = LLM(model="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0")
from crewai.llms.providers.bedrock.completion import BedrockCompletion
assert isinstance(llm, BedrockCompletion)
assert llm.region_name == aws_region_name
# Test with explicit credentials
llm_explicit = LLM(
model="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0",
aws_access_key_id="explicit-key",
aws_secret_access_key="explicit-secret",
region_name="us-west-2"
)
assert isinstance(llm_explicit, BedrockCompletion)
assert llm_explicit.region_name == "us-west-2"
def test_bedrock_model_capabilities():
"""
Test that model capabilities are correctly identified
"""
# Test Claude model
llm_claude = LLM(model="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0")
from crewai.llms.providers.bedrock.completion import BedrockCompletion
assert isinstance(llm_claude, BedrockCompletion)
assert llm_claude.is_claude_model == True
assert llm_claude.supports_tools == True
# Test other Bedrock model
llm_titan = LLM(model="bedrock/amazon.titan-text-express-v1")
assert isinstance(llm_titan, BedrockCompletion)
assert llm_titan.supports_tools == True
def test_bedrock_inference_config():
"""
Test that inference config is properly prepared
"""
llm = LLM(
model="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0",
temperature=0.7,
top_p=0.9,
top_k=40,
max_tokens=1000
)
from crewai.llms.providers.bedrock.completion import BedrockCompletion
assert isinstance(llm, BedrockCompletion)
# Test config preparation
config = llm._get_inference_config()
# Verify config has the expected parameters
assert 'temperature' in config
assert config['temperature'] == 0.7
assert 'topP' in config
assert config['topP'] == 0.9
assert 'maxTokens' in config
assert config['maxTokens'] == 1000
assert 'topK' in config
assert config['topK'] == 40
def test_bedrock_model_detection():
"""
Test that various Bedrock model formats are properly detected
"""
# Test Bedrock model naming patterns
bedrock_test_cases = [
"bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0",
"bedrock/anthropic.claude-3-haiku-20240307-v1:0",
"bedrock/amazon.titan-text-express-v1",
"bedrock/meta.llama3-70b-instruct-v1:0"
]
for model_name in bedrock_test_cases:
llm = LLM(model=model_name)
from crewai.llms.providers.bedrock.completion import BedrockCompletion
assert isinstance(llm, BedrockCompletion), f"Failed for model: {model_name}"
def test_bedrock_supports_stop_words():
"""
Test that Bedrock models support stop sequences
"""
llm = LLM(model="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0")
assert llm.supports_stop_words() == True
def test_bedrock_context_window_size():
"""
Test that Bedrock models return correct context window sizes
"""
# Test Claude 3.5 Sonnet
llm_claude = LLM(model="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0")
context_size_claude = llm_claude.get_context_window_size()
assert context_size_claude > 150000 # Should be substantial (200K tokens with ratio)
# Test Titan
llm_titan = LLM(model="bedrock/amazon.titan-text-express-v1")
context_size_titan = llm_titan.get_context_window_size()
assert context_size_titan > 5000 # Should have 8K context window
def test_bedrock_message_formatting():
"""
Test that messages are properly formatted for Bedrock Converse API
"""
llm = LLM(model="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0")
# Test message formatting
test_messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello"},
{"role": "assistant", "content": "Hi there!"},
{"role": "user", "content": "How are you?"}
]
formatted_messages, system_message = llm._format_messages_for_converse(test_messages)
# System message should be extracted
assert system_message == "You are a helpful assistant."
# Remaining messages should be in Converse format
assert len(formatted_messages) >= 3 # Should have user, assistant, user messages
# First message should be user role
assert formatted_messages[0]["role"] == "user"
# Second should be assistant
assert formatted_messages[1]["role"] == "assistant"
# Messages should have content array with text
assert isinstance(formatted_messages[0]["content"], list)
assert "text" in formatted_messages[0]["content"][0]
def test_bedrock_streaming_parameter():
"""
Test that streaming parameter is properly handled
"""
# Test non-streaming
llm_no_stream = LLM(model="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0", stream=False)
assert llm_no_stream.stream == False
# Test streaming
llm_stream = LLM(model="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0", stream=True)
assert llm_stream.stream == True
def test_bedrock_tool_conversion():
"""
Test that tools are properly converted to Bedrock Converse format
"""
llm = LLM(model="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0")
# Mock tool in CrewAI format
crewai_tools = [{
"type": "function",
"function": {
"name": "test_tool",
"description": "A test tool",
"parameters": {
"type": "object",
"properties": {
"query": {"type": "string", "description": "Search query"}
},
"required": ["query"]
}
}
}]
# Test tool conversion
bedrock_tools = llm._format_tools_for_converse(crewai_tools)
assert len(bedrock_tools) == 1
# Bedrock tools should have toolSpec structure
assert "toolSpec" in bedrock_tools[0]
assert bedrock_tools[0]["toolSpec"]["name"] == "test_tool"
assert bedrock_tools[0]["toolSpec"]["description"] == "A test tool"
assert "inputSchema" in bedrock_tools[0]["toolSpec"]
def test_bedrock_environment_variable_credentials(bedrock_mocks):
"""
Test that AWS credentials are properly loaded from environment
"""
mock_session_class, _ = bedrock_mocks
# Reset the mock to clear any previous calls
mock_session_class.reset_mock()
with patch.dict(os.environ, {
"AWS_ACCESS_KEY_ID": "test-access-key-123",
"AWS_SECRET_ACCESS_KEY": "test-secret-key-456"
}):
llm = LLM(model="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0")
# Verify Session was called with environment credentials
assert mock_session_class.called
# Get the most recent call - Session is called as Session(...)
call_kwargs = mock_session_class.call_args[1] if mock_session_class.call_args else {}
assert call_kwargs.get('aws_access_key_id') == "test-access-key-123"
assert call_kwargs.get('aws_secret_access_key') == "test-secret-key-456"
def test_bedrock_token_usage_tracking():
"""
Test that token usage is properly tracked for Bedrock responses
"""
llm = LLM(model="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0")
# Mock the Bedrock response with usage information
with patch.object(llm.client, 'converse') as mock_converse:
mock_response = {
'output': {
'message': {
'role': 'assistant',
'content': [
{'text': 'test response'}
]
}
},
'usage': {
'inputTokens': 50,
'outputTokens': 25,
'totalTokens': 75
}
}
mock_converse.return_value = mock_response
result = llm.call("Hello")
# Verify the response
assert result == "test response"
# Verify token usage was tracked
assert llm._token_usage['prompt_tokens'] == 50
assert llm._token_usage['completion_tokens'] == 25
assert llm._token_usage['total_tokens'] == 75
def test_bedrock_tool_use_conversation_flow():
"""
Test that the Bedrock completion properly handles tool use conversation flow
"""
from unittest.mock import Mock
# Create BedrockCompletion instance
llm = LLM(model="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0")
# Mock tool function
def mock_weather_tool(location: str) -> str:
return f"The weather in {location} is sunny and 75°F"
available_functions = {"get_weather": mock_weather_tool}
# Mock the Bedrock client responses
with patch.object(llm.client, 'converse') as mock_converse:
# First response: tool use request
tool_use_response = {
'output': {
'message': {
'role': 'assistant',
'content': [
{
'toolUse': {
'toolUseId': 'tool-123',
'name': 'get_weather',
'input': {'location': 'San Francisco'}
}
}
]
}
},
'usage': {
'inputTokens': 100,
'outputTokens': 50,
'totalTokens': 150
}
}
# Second response: final answer after tool execution
final_response = {
'output': {
'message': {
'role': 'assistant',
'content': [
{'text': 'Based on the weather data, it is sunny and 75°F in San Francisco.'}
]
}
},
'usage': {
'inputTokens': 120,
'outputTokens': 30,
'totalTokens': 150
}
}
# Configure mock to return different responses on successive calls
mock_converse.side_effect = [tool_use_response, final_response]
# Test the call
messages = [{"role": "user", "content": "What's the weather like in San Francisco?"}]
result = llm.call(
messages=messages,
available_functions=available_functions
)
# Verify the final response contains the weather information
assert "sunny" in result.lower() or "75" in result
# Verify that the API was called twice (once for tool use, once for final answer)
assert mock_converse.call_count == 2
def test_bedrock_handles_cohere_conversation_requirements():
"""
Test that Bedrock properly handles Cohere model's requirement for user message at end
"""
llm = LLM(model="bedrock/cohere.command-r-plus-v1:0")
# Test message formatting with conversation ending in assistant message
test_messages = [
{"role": "user", "content": "Hello"},
{"role": "assistant", "content": "Hi there!"}
]
formatted_messages, system_message = llm._format_messages_for_converse(test_messages)
# For Cohere models, should add a user message at the end
assert formatted_messages[-1]["role"] == "user"
assert "continue" in formatted_messages[-1]["content"][0]["text"].lower()
def test_bedrock_client_error_handling():
"""
Test that Bedrock properly handles various AWS client errors
"""
from botocore.exceptions import ClientError
llm = LLM(model="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0")
# Test ValidationException
with patch.object(llm.client, 'converse') as mock_converse:
error_response = {
'Error': {
'Code': 'ValidationException',
'Message': 'Invalid request format'
}
}
mock_converse.side_effect = ClientError(error_response, 'converse')
with pytest.raises(ValueError) as exc_info:
llm.call("Hello")
assert "validation" in str(exc_info.value).lower()
# Test ThrottlingException
with patch.object(llm.client, 'converse') as mock_converse:
error_response = {
'Error': {
'Code': 'ThrottlingException',
'Message': 'Rate limit exceeded'
}
}
mock_converse.side_effect = ClientError(error_response, 'converse')
with pytest.raises(RuntimeError) as exc_info:
llm.call("Hello")
assert "throttled" in str(exc_info.value).lower()
def test_bedrock_stop_sequences_sync():
"""Test that stop and stop_sequences attributes stay synchronized."""
llm = LLM(model="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0")
# Test setting stop as a list
llm.stop = ["\nObservation:", "\nThought:"]
assert list(llm.stop_sequences) == ["\nObservation:", "\nThought:"]
assert llm.stop == ["\nObservation:", "\nThought:"]
# Test setting stop as a string
llm.stop = "\nFinal Answer:"
assert list(llm.stop_sequences) == ["\nFinal Answer:"]
assert llm.stop == ["\nFinal Answer:"]
# Test setting stop as None
llm.stop = None
assert list(llm.stop_sequences) == []
assert llm.stop == []
def test_bedrock_stop_sequences_sent_to_api():
"""Test that stop_sequences are properly sent to the Bedrock API."""
llm = LLM(model="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0")
# Set stop sequences via the stop attribute (simulating CrewAgentExecutor)
llm.stop = ["\nObservation:", "\nThought:"]
# Patch the API call to capture parameters without making real call
with patch.object(llm.client, 'converse') as mock_converse:
mock_response = {
'output': {
'message': {
'role': 'assistant',
'content': [{'text': 'Hello'}]
}
},
'usage': {
'inputTokens': 10,
'outputTokens': 5,
'totalTokens': 15
}
}
mock_converse.return_value = mock_response
llm.call("Say hello in one word")
# Verify stop_sequences were passed to the API in the inference config
call_kwargs = mock_converse.call_args[1]
assert "inferenceConfig" in call_kwargs
assert "stopSequences" in call_kwargs["inferenceConfig"]
assert call_kwargs["inferenceConfig"]["stopSequences"] == ["\nObservation:", "\nThought:"]
# =============================================================================
# Agent Kickoff Structured Output Tests
# =============================================================================
@pytest.mark.vcr()
def test_bedrock_agent_kickoff_structured_output_without_tools():
"""
Test that agent kickoff returns structured output without tools.
This tests native structured output handling for Bedrock models.
"""
from pydantic import BaseModel, Field
class AnalysisResult(BaseModel):
"""Structured output for analysis results."""
topic: str = Field(description="The topic analyzed")
key_points: list[str] = Field(description="Key insights from the analysis")
summary: str = Field(description="Brief summary of findings")
agent = Agent(
role="Analyst",
goal="Provide structured analysis on topics",
backstory="You are an expert analyst who provides clear, structured insights.",
llm=LLM(model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0"),
tools=[],
verbose=True,
)
result = agent.kickoff(
messages="Analyze the benefits of remote work briefly. Keep it concise.",
response_format=AnalysisResult,
)
assert result.pydantic is not None, "Expected pydantic output but got None"
assert isinstance(result.pydantic, AnalysisResult), f"Expected AnalysisResult but got {type(result.pydantic)}"
assert result.pydantic.topic, "Topic should not be empty"
assert len(result.pydantic.key_points) > 0, "Should have at least one key point"
assert result.pydantic.summary, "Summary should not be empty"
@pytest.mark.vcr()
def test_bedrock_agent_kickoff_structured_output_with_tools():
"""
Test that agent kickoff returns structured output after using tools.
This tests post-tool-call structured output handling for Bedrock models.
"""
from pydantic import BaseModel, Field
from crewai.tools import tool
class CalculationResult(BaseModel):
"""Structured output for calculation results."""
operation: str = Field(description="The mathematical operation performed")
result: int = Field(description="The result of the calculation")
explanation: str = Field(description="Brief explanation of the calculation")
@tool
def add_numbers(a: int, b: int) -> int:
"""Add two numbers together and return the sum."""
return a + b
agent = Agent(
role="Calculator",
goal="Perform calculations using available tools",
backstory="You are a calculator assistant that uses tools to compute results.",
llm=LLM(model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0"),
tools=[add_numbers],
verbose=True,
)
result = agent.kickoff(
messages="Calculate 15 + 27 using your add_numbers tool. Report the result.",
response_format=CalculationResult,
)
assert result.pydantic is not None, "Expected pydantic output but got None"
assert isinstance(result.pydantic, CalculationResult), f"Expected CalculationResult but got {type(result.pydantic)}"
assert result.pydantic.result == 42, f"Expected result 42 but got {result.pydantic.result}"
assert result.pydantic.operation, "Operation should not be empty"
assert result.pydantic.explanation, "Explanation should not be empty"
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/llms/bedrock/test_bedrock.py",
"license": "MIT License",
"lines": 779,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/tests/llms/google/test_google.py | import os
import sys
import types
from unittest.mock import patch, MagicMock
import pytest
from crewai.llm import LLM
from crewai.crew import Crew
from crewai.agent import Agent
from crewai.task import Task
@pytest.fixture(autouse=True)
def mock_google_api_key():
"""Mock GOOGLE_API_KEY for tests only if real keys are not set."""
if "GOOGLE_API_KEY" not in os.environ and "GEMINI_API_KEY" not in os.environ:
with patch.dict(os.environ, {"GOOGLE_API_KEY": "test-key"}):
yield
else:
yield
def test_gemini_completion_is_used_when_google_provider():
"""
Test that GeminiCompletion from completion.py is used when LLM uses provider 'google'
"""
llm = LLM(model="google/gemini-2.0-flash-001")
assert llm.__class__.__name__ == "GeminiCompletion"
assert llm.provider == "gemini"
assert llm.model == "gemini-2.0-flash-001"
def test_gemini_completion_is_used_when_gemini_provider():
"""
Test that GeminiCompletion is used when provider is 'gemini'
"""
llm = LLM(model="gemini/gemini-2.0-flash-001")
from crewai.llms.providers.gemini.completion import GeminiCompletion
assert isinstance(llm, GeminiCompletion)
assert llm.provider == "gemini"
assert llm.model == "gemini-2.0-flash-001"
def test_gemini_completion_module_is_imported():
"""
Test that the completion module is properly imported when using Google provider
"""
module_name = "crewai.llms.providers.gemini.completion"
# Remove module from cache if it exists
if module_name in sys.modules:
del sys.modules[module_name]
# Create LLM instance - this should trigger the import
LLM(model="google/gemini-2.0-flash-001")
# Verify the module was imported
assert module_name in sys.modules
completion_mod = sys.modules[module_name]
assert isinstance(completion_mod, types.ModuleType)
# Verify the class exists in the module
assert hasattr(completion_mod, 'GeminiCompletion')
def test_native_gemini_raises_error_when_initialization_fails():
"""
Test that LLM raises ImportError when native Gemini completion fails.
With the new behavior, when a native provider is in SUPPORTED_NATIVE_PROVIDERS
but fails to instantiate, we raise an ImportError instead of silently falling back.
This provides clearer error messages to users about missing dependencies.
"""
# Mock the _get_native_provider to return a failing class
with patch('crewai.llm.LLM._get_native_provider') as mock_get_provider:
class FailingCompletion:
def __init__(self, *args, **kwargs):
raise Exception("Native Google Gen AI SDK failed")
mock_get_provider.return_value = FailingCompletion
# This should raise ImportError with clear message
with pytest.raises(ImportError) as excinfo:
LLM(model="google/gemini-2.0-flash-001")
# Verify the error message is helpful
assert "Error importing native provider" in str(excinfo.value)
assert "Native Google Gen AI SDK failed" in str(excinfo.value)
def test_gemini_completion_initialization_parameters():
"""
Test that GeminiCompletion is initialized with correct parameters
"""
llm = LLM(
model="google/gemini-2.0-flash-001",
temperature=0.7,
max_output_tokens=2000,
top_p=0.9,
top_k=40,
api_key="test-key"
)
from crewai.llms.providers.gemini.completion import GeminiCompletion
assert isinstance(llm, GeminiCompletion)
assert llm.model == "gemini-2.0-flash-001"
assert llm.temperature == 0.7
assert llm.max_output_tokens == 2000
assert llm.top_p == 0.9
assert llm.top_k == 40
def test_gemini_specific_parameters():
"""
Test Gemini-specific parameters like stop_sequences, streaming, and safety settings
"""
safety_settings = {
"HARM_CATEGORY_HARASSMENT": "BLOCK_MEDIUM_AND_ABOVE",
"HARM_CATEGORY_HATE_SPEECH": "BLOCK_MEDIUM_AND_ABOVE"
}
llm = LLM(
model="google/gemini-2.0-flash-001",
stop_sequences=["Human:", "Assistant:"],
stream=True,
safety_settings=safety_settings,
project="test-project",
location="us-central1"
)
from crewai.llms.providers.gemini.completion import GeminiCompletion
assert isinstance(llm, GeminiCompletion)
assert llm.stop_sequences == ["Human:", "Assistant:"]
assert llm.stream == True
assert llm.safety_settings == safety_settings
assert llm.project == "test-project"
assert llm.location == "us-central1"
def test_gemini_completion_call():
"""
Test that GeminiCompletion call method works
"""
llm = LLM(model="google/gemini-2.0-flash-001")
# Mock the call method on the instance
with patch.object(llm, 'call', return_value="Hello! I'm Gemini, ready to help.") as mock_call:
result = llm.call("Hello, how are you?")
assert result == "Hello! I'm Gemini, ready to help."
mock_call.assert_called_once_with("Hello, how are you?")
def test_gemini_completion_called_during_crew_execution():
"""
Test that GeminiCompletion.call is actually invoked when running a crew
"""
# Create the LLM instance first
gemini_llm = LLM(model="google/gemini-2.0-flash-001")
# Mock the call method on the specific instance
with patch.object(gemini_llm, 'call', return_value="Tokyo has 14 million people.") as mock_call:
# Create agent with explicit LLM configuration
agent = Agent(
role="Research Assistant",
goal="Find population info",
backstory="You research populations.",
llm=gemini_llm,
)
task = Task(
description="Find Tokyo population",
expected_output="Population number",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
result = crew.kickoff()
# Verify mock was called
assert mock_call.called
assert "14 million" in str(result)
def test_gemini_completion_call_arguments():
"""
Test that GeminiCompletion.call is invoked with correct arguments
"""
# Create LLM instance first
gemini_llm = LLM(model="google/gemini-2.0-flash-001")
# Mock the instance method
with patch.object(gemini_llm, 'call') as mock_call:
mock_call.return_value = "Task completed successfully."
agent = Agent(
role="Test Agent",
goal="Complete a simple task",
backstory="You are a test agent.",
llm=gemini_llm # Use same instance
)
task = Task(
description="Say hello world",
expected_output="Hello world",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
crew.kickoff()
# Verify call was made
assert mock_call.called
# Check the arguments passed to the call method
call_args = mock_call.call_args
assert call_args is not None
# The first argument should be the messages
messages = call_args[0][0] # First positional argument
assert isinstance(messages, (str, list))
# Verify that the task description appears in the messages
if isinstance(messages, str):
assert "hello world" in messages.lower()
elif isinstance(messages, list):
message_content = str(messages).lower()
assert "hello world" in message_content
def test_multiple_gemini_calls_in_crew():
"""
Test that GeminiCompletion.call is invoked multiple times for multiple tasks
"""
# Create LLM instance first
gemini_llm = LLM(model="google/gemini-2.0-flash-001")
# Mock the instance method
with patch.object(gemini_llm, 'call') as mock_call:
mock_call.return_value = "Task completed."
agent = Agent(
role="Multi-task Agent",
goal="Complete multiple tasks",
backstory="You can handle multiple tasks.",
llm=gemini_llm # Use same instance
)
task1 = Task(
description="First task",
expected_output="First result",
agent=agent,
)
task2 = Task(
description="Second task",
expected_output="Second result",
agent=agent,
)
crew = Crew(
agents=[agent],
tasks=[task1, task2]
)
crew.kickoff()
# Verify multiple calls were made
assert mock_call.call_count >= 2 # At least one call per task
# Verify each call had proper arguments
for call in mock_call.call_args_list:
assert len(call[0]) > 0 # Has positional arguments
messages = call[0][0]
assert messages is not None
def test_gemini_completion_with_tools():
"""
Test that GeminiCompletion.call is invoked with tools when agent has tools
"""
from crewai.tools import tool
@tool
def sample_tool(query: str) -> str:
"""A sample tool for testing"""
return f"Tool result for: {query}"
# Create LLM instance first
gemini_llm = LLM(model="google/gemini-2.0-flash-001")
# Mock the instance method
with patch.object(gemini_llm, 'call') as mock_call:
mock_call.return_value = "Task completed with tools."
agent = Agent(
role="Tool User",
goal="Use tools to complete tasks",
backstory="You can use tools.",
llm=gemini_llm, # Use same instance
tools=[sample_tool]
)
task = Task(
description="Use the sample tool",
expected_output="Tool usage result",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
crew.kickoff()
assert mock_call.called
call_args = mock_call.call_args
call_kwargs = call_args[1] if len(call_args) > 1 else {}
if 'tools' in call_kwargs:
assert call_kwargs['tools'] is not None
assert len(call_kwargs['tools']) > 0
def test_gemini_raises_error_when_model_not_supported():
"""Test that GeminiCompletion raises ValueError when model not supported"""
# Mock the Google client to raise an error
with patch('crewai.llms.providers.gemini.completion.genai') as mock_genai:
mock_client = MagicMock()
mock_genai.Client.return_value = mock_client
from google.genai.errors import ClientError # type: ignore
mock_response = MagicMock()
mock_response.body_segments = [{
'error': {
'code': 404,
'message': 'models/model-doesnt-exist is not found for API version v1beta, or is not supported for generateContent.',
'status': 'NOT_FOUND'
}
}]
mock_response.status_code = 404
mock_client.models.generate_content.side_effect = ClientError(404, mock_response)
llm = LLM(model="google/model-doesnt-exist")
with pytest.raises(Exception): # Should raise some error for unsupported model
llm.call("Hello")
def test_gemini_vertex_ai_setup():
"""
Test that Vertex AI configuration is properly handled
"""
with patch.dict(os.environ, {
"GOOGLE_CLOUD_PROJECT": "test-project",
"GOOGLE_CLOUD_LOCATION": "us-west1"
}):
llm = LLM(
model="google/gemini-2.0-flash-001",
project="test-project",
location="us-west1"
)
from crewai.llms.providers.gemini.completion import GeminiCompletion
assert isinstance(llm, GeminiCompletion)
assert llm.project == "test-project"
assert llm.location == "us-west1"
def test_gemini_api_key_configuration():
"""
Test that API key configuration works for both GOOGLE_API_KEY and GEMINI_API_KEY
"""
# Test with GOOGLE_API_KEY
with patch.dict(os.environ, {"GOOGLE_API_KEY": "test-google-key"}):
llm = LLM(model="google/gemini-2.0-flash-001")
from crewai.llms.providers.gemini.completion import GeminiCompletion
assert isinstance(llm, GeminiCompletion)
assert llm.api_key == "test-google-key"
# Test with GEMINI_API_KEY
with patch.dict(os.environ, {"GEMINI_API_KEY": "test-gemini-key"}, clear=True):
llm = LLM(model="google/gemini-2.0-flash-001")
assert isinstance(llm, GeminiCompletion)
assert llm.api_key == "test-gemini-key"
def test_gemini_model_capabilities():
"""
Test that model capabilities are correctly identified
"""
# Test Gemini 2.0 model
llm_2_0 = LLM(model="google/gemini-2.0-flash-001")
from crewai.llms.providers.gemini.completion import GeminiCompletion
assert isinstance(llm_2_0, GeminiCompletion)
assert llm_2_0.supports_tools == True
# Test Gemini 1.5 model
llm_1_5 = LLM(model="google/gemini-1.5-pro")
assert isinstance(llm_1_5, GeminiCompletion)
assert llm_1_5.supports_tools == True
def test_gemini_generation_config():
"""
Test that generation config is properly prepared
"""
llm = LLM(
model="google/gemini-2.0-flash-001",
temperature=0.7,
top_p=0.9,
top_k=40,
max_output_tokens=1000
)
from crewai.llms.providers.gemini.completion import GeminiCompletion
assert isinstance(llm, GeminiCompletion)
# Test config preparation
config = llm._prepare_generation_config()
# Verify config has the expected parameters
assert hasattr(config, 'temperature') or 'temperature' in str(config)
assert hasattr(config, 'top_p') or 'top_p' in str(config)
assert hasattr(config, 'top_k') or 'top_k' in str(config)
assert hasattr(config, 'max_output_tokens') or 'max_output_tokens' in str(config)
def test_gemini_model_detection():
"""
Test that various Gemini model formats are properly detected
"""
# Test Gemini model naming patterns that actually work with provider detection
gemini_test_cases = [
"google/gemini-2.0-flash-001",
"gemini/gemini-2.0-flash-001",
"google/gemini-1.5-pro",
"gemini/gemini-1.5-flash"
]
for model_name in gemini_test_cases:
llm = LLM(model=model_name)
from crewai.llms.providers.gemini.completion import GeminiCompletion
assert isinstance(llm, GeminiCompletion), f"Failed for model: {model_name}"
def test_gemini_supports_stop_words():
"""
Test that Gemini models support stop sequences
"""
llm = LLM(model="google/gemini-2.0-flash-001")
assert llm.supports_stop_words() == True
def test_gemini_context_window_size():
"""
Test that Gemini models return correct context window sizes
"""
# Test Gemini 2.0 Flash
llm_2_0 = LLM(model="google/gemini-2.0-flash-001")
context_size_2_0 = llm_2_0.get_context_window_size()
assert context_size_2_0 > 500000 # Should be substantial (1M tokens)
# Test Gemini 1.5 Pro
llm_1_5 = LLM(model="google/gemini-1.5-pro")
context_size_1_5 = llm_1_5.get_context_window_size()
assert context_size_1_5 > 1000000 # Should be very large (2M tokens)
def test_gemini_message_formatting():
"""
Test that messages are properly formatted for Gemini API
"""
llm = LLM(model="google/gemini-2.0-flash-001")
# Test message formatting
test_messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello"},
{"role": "assistant", "content": "Hi there!"},
{"role": "user", "content": "How are you?"}
]
formatted_contents, system_instruction = llm._format_messages_for_gemini(test_messages)
# System message should be extracted
assert system_instruction == "You are a helpful assistant."
# Remaining messages should be Content objects
assert len(formatted_contents) >= 3 # Should have user, model, user messages
# First content should be user role
assert formatted_contents[0].role == "user"
# Second should be model (converted from assistant)
assert formatted_contents[1].role == "model"
def test_gemini_streaming_parameter():
"""
Test that streaming parameter is properly handled
"""
# Test non-streaming
llm_no_stream = LLM(model="google/gemini-2.0-flash-001", stream=False)
assert llm_no_stream.stream == False
# Test streaming
llm_stream = LLM(model="google/gemini-2.0-flash-001", stream=True)
assert llm_stream.stream == True
def test_gemini_tool_conversion():
"""
Test that tools are properly converted to Gemini format
"""
llm = LLM(model="google/gemini-2.0-flash-001")
# Mock tool in CrewAI format
crewai_tools = [{
"type": "function",
"function": {
"name": "test_tool",
"description": "A test tool",
"parameters": {
"type": "object",
"properties": {
"query": {"type": "string", "description": "Search query"}
},
"required": ["query"]
}
}
}]
# Test tool conversion
gemini_tools = llm._convert_tools_for_interference(crewai_tools)
assert len(gemini_tools) == 1
# Gemini tools are Tool objects with function_declarations
assert hasattr(gemini_tools[0], 'function_declarations')
assert len(gemini_tools[0].function_declarations) == 1
func_decl = gemini_tools[0].function_declarations[0]
assert func_decl.name == "test_tool"
assert func_decl.description == "A test tool"
def test_gemini_environment_variable_api_key():
"""
Test that Google API key is properly loaded from environment
"""
with patch.dict(os.environ, {"GOOGLE_API_KEY": "test-google-key"}):
llm = LLM(model="google/gemini-2.0-flash-001")
assert llm.client is not None
assert hasattr(llm.client, 'models')
assert llm.api_key == "test-google-key"
@pytest.mark.vcr()
def test_gemini_token_usage_tracking():
"""
Test that token usage is properly tracked for Gemini responses
"""
llm = LLM(model="google/gemini-2.0-flash-001")
result = llm.call("Hello")
assert result.strip() == "Hi there! How can I help you today?"
usage = llm.get_token_usage_summary()
assert usage.successful_requests == 1
assert usage.prompt_tokens > 0
assert usage.completion_tokens > 0
assert usage.total_tokens > 0
@pytest.mark.vcr()
def test_gemini_tool_returning_float():
"""
Test that Gemini properly handles tools that return non-dict values like floats.
This is an end-to-end test that verifies the agent can use a tool that returns
a float (which gets wrapped in {"result": value} for Gemini's FunctionResponse).
"""
from pydantic import BaseModel, Field
from typing import Type
from crewai.tools import BaseTool
class SumNumbersToolInput(BaseModel):
a: float = Field(..., description="The first number to add")
b: float = Field(..., description="The second number to add")
class SumNumbersTool(BaseTool):
name: str = "sum_numbers"
description: str = "Add two numbers together and return the result"
args_schema: Type[BaseModel] = SumNumbersToolInput
def _run(self, a: float, b: float) -> float:
return a + b
sum_tool = SumNumbersTool()
agent = Agent(
role="Calculator",
goal="Calculate numbers accurately",
backstory="You are a calculator that adds numbers.",
llm=LLM(model="google/gemini-2.0-flash-001"),
tools=[sum_tool],
verbose=True,
)
task = Task(
description="What is 10000 + 20000? Use the sum_numbers tool to calculate this.",
expected_output="The sum of the two numbers",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task], verbose=True)
result = crew.kickoff()
# The result should contain 30000 (the sum)
assert "30000" in result.raw
def test_gemini_stop_sequences_sync():
"""Test that stop and stop_sequences attributes stay synchronized."""
llm = LLM(model="google/gemini-2.0-flash-001")
# Test setting stop as a list
llm.stop = ["\nObservation:", "\nThought:"]
assert llm.stop_sequences == ["\nObservation:", "\nThought:"]
assert llm.stop == ["\nObservation:", "\nThought:"]
# Test setting stop as a string
llm.stop = "\nFinal Answer:"
assert llm.stop_sequences == ["\nFinal Answer:"]
assert llm.stop == ["\nFinal Answer:"]
# Test setting stop as None
llm.stop = None
assert llm.stop_sequences == []
assert llm.stop == []
def test_gemini_stop_sequences_sent_to_api():
"""Test that stop_sequences are properly sent to the Gemini API."""
llm = LLM(model="google/gemini-2.0-flash-001")
# Set stop sequences via the stop attribute (simulating CrewAgentExecutor)
llm.stop = ["\nObservation:", "\nThought:"]
# Patch the API call to capture parameters without making real call
with patch.object(llm.client.models, 'generate_content') as mock_generate:
mock_response = MagicMock()
mock_response.text = "Hello"
mock_response.candidates = []
mock_response.usage_metadata = MagicMock(
prompt_token_count=10,
candidates_token_count=5,
total_token_count=15
)
mock_generate.return_value = mock_response
llm.call("Say hello in one word")
# Verify stop_sequences were passed to the API in the config
call_kwargs = mock_generate.call_args[1]
assert "config" in call_kwargs
# The config object should have stop_sequences set
config = call_kwargs["config"]
# Check if the config has stop_sequences attribute
assert hasattr(config, 'stop_sequences') or 'stop_sequences' in config.__dict__
if hasattr(config, 'stop_sequences'):
assert config.stop_sequences == ["\nObservation:", "\nThought:"]
@pytest.mark.vcr()
@pytest.mark.skip(reason="VCR cannot replay SSE streaming responses")
def test_google_streaming_returns_usage_metrics():
"""
Test that Google Gemini streaming calls return proper token usage metrics.
"""
agent = Agent(
role="Research Assistant",
goal="Find information about the capital of Japan",
backstory="You are a helpful research assistant.",
llm=LLM(model="gemini/gemini-2.0-flash-exp", stream=True),
verbose=True,
)
task = Task(
description="What is the capital of Japan?",
expected_output="The capital of Japan",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
result = crew.kickoff()
assert result.token_usage is not None
assert result.token_usage.total_tokens > 0
assert result.token_usage.prompt_tokens > 0
assert result.token_usage.completion_tokens > 0
assert result.token_usage.successful_requests >= 1
@pytest.mark.vcr()
def test_google_express_mode_works() -> None:
"""
Test Google Vertex AI Express mode with API key authentication.
This tests Vertex AI Express mode (aiplatform.googleapis.com) with API key
authentication.
"""
with patch.dict(os.environ, {"GOOGLE_GENAI_USE_VERTEXAI": "true"}):
agent = Agent(
role="Research Assistant",
goal="Find information about the capital of Japan",
backstory="You are a helpful research assistant.",
llm=LLM(
model="gemini/gemini-2.0-flash-exp",
),
verbose=True,
)
task = Task(
description="What is the capital of Japan?",
expected_output="The capital of Japan",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
result = crew.kickoff()
assert result.token_usage is not None
assert result.token_usage.total_tokens > 0
assert result.token_usage.prompt_tokens > 0
assert result.token_usage.completion_tokens > 0
assert result.token_usage.successful_requests >= 1
def test_gemini_2_0_model_detection():
"""Test that Gemini 2.0 models are properly detected."""
# Test Gemini 2.0 models
llm_2_0 = LLM(model="google/gemini-2.0-flash-001")
from crewai.llms.providers.gemini.completion import GeminiCompletion
assert isinstance(llm_2_0, GeminiCompletion)
assert llm_2_0.is_gemini_2_0 is True
llm_2_5 = LLM(model="google/gemini-2.5-flash")
assert isinstance(llm_2_5, GeminiCompletion)
assert llm_2_5.is_gemini_2_0 is True
# Test non-2.0 models
llm_1_5 = LLM(model="google/gemini-1.5-pro")
assert isinstance(llm_1_5, GeminiCompletion)
assert llm_1_5.is_gemini_2_0 is False
def test_add_property_ordering_to_schema():
"""Test that _add_property_ordering correctly adds propertyOrdering to schemas."""
from crewai.llms.providers.gemini.completion import GeminiCompletion
# Test simple object schema
simple_schema = {
"type": "object",
"properties": {
"name": {"type": "string"},
"age": {"type": "integer"},
"email": {"type": "string"}
}
}
result = GeminiCompletion._add_property_ordering(simple_schema)
assert "propertyOrdering" in result
assert result["propertyOrdering"] == ["name", "age", "email"]
# Test nested object schema
nested_schema = {
"type": "object",
"properties": {
"user": {
"type": "object",
"properties": {
"name": {"type": "string"},
"contact": {
"type": "object",
"properties": {
"email": {"type": "string"},
"phone": {"type": "string"}
}
}
}
},
"id": {"type": "integer"}
}
}
result = GeminiCompletion._add_property_ordering(nested_schema)
assert "propertyOrdering" in result
assert result["propertyOrdering"] == ["user", "id"]
assert "propertyOrdering" in result["properties"]["user"]
assert result["properties"]["user"]["propertyOrdering"] == ["name", "contact"]
assert "propertyOrdering" in result["properties"]["user"]["properties"]["contact"]
assert result["properties"]["user"]["properties"]["contact"]["propertyOrdering"] == ["email", "phone"]
def test_gemini_2_0_response_model_with_property_ordering():
"""Test that Gemini 2.0 models include propertyOrdering in response schemas."""
from pydantic import BaseModel, Field
class TestResponse(BaseModel):
"""Test response model."""
name: str = Field(..., description="The name")
age: int = Field(..., description="The age")
email: str = Field(..., description="The email")
llm = LLM(model="google/gemini-2.0-flash-001")
# Prepare generation config with response model
config = llm._prepare_generation_config(response_model=TestResponse)
# Verify that the config has response_json_schema
assert hasattr(config, 'response_json_schema') or 'response_json_schema' in config.__dict__
# Get the schema
if hasattr(config, 'response_json_schema'):
schema = config.response_json_schema
else:
schema = config.__dict__.get('response_json_schema', {})
# Verify propertyOrdering is present for Gemini 2.0
assert "propertyOrdering" in schema
assert "name" in schema["propertyOrdering"]
assert "age" in schema["propertyOrdering"]
assert "email" in schema["propertyOrdering"]
def test_gemini_1_5_response_model_uses_response_schema():
"""Test that Gemini 1.5 models use response_schema parameter (not response_json_schema)."""
from pydantic import BaseModel, Field
class TestResponse(BaseModel):
"""Test response model."""
name: str = Field(..., description="The name")
age: int = Field(..., description="The age")
llm = LLM(model="google/gemini-1.5-pro")
# Prepare generation config with response model
config = llm._prepare_generation_config(response_model=TestResponse)
# Verify that the config uses response_schema (not response_json_schema)
assert hasattr(config, 'response_schema') or 'response_schema' in config.__dict__
assert not (hasattr(config, 'response_json_schema') and config.response_json_schema is not None)
# Get the schema
if hasattr(config, 'response_schema'):
schema = config.response_schema
else:
schema = config.__dict__.get('response_schema')
# For Gemini 1.5, response_schema should be the Pydantic model itself
# The SDK handles conversion internally
assert schema is TestResponse or isinstance(schema, type)
# =============================================================================
# Agent Kickoff Structured Output Tests
# =============================================================================
@pytest.mark.vcr()
def test_gemini_agent_kickoff_structured_output_without_tools():
"""
Test that agent kickoff returns structured output without tools.
This tests native structured output handling for Gemini models.
"""
from pydantic import BaseModel, Field
class AnalysisResult(BaseModel):
"""Structured output for analysis results."""
topic: str = Field(description="The topic analyzed")
key_points: list[str] = Field(description="Key insights from the analysis")
summary: str = Field(description="Brief summary of findings")
agent = Agent(
role="Analyst",
goal="Provide structured analysis on topics",
backstory="You are an expert analyst who provides clear, structured insights.",
llm=LLM(model="google/gemini-2.0-flash-001"),
tools=[],
verbose=True,
)
result = agent.kickoff(
messages="Analyze the benefits of remote work briefly. Keep it concise.",
response_format=AnalysisResult,
)
assert result.pydantic is not None, "Expected pydantic output but got None"
assert isinstance(result.pydantic, AnalysisResult), f"Expected AnalysisResult but got {type(result.pydantic)}"
assert result.pydantic.topic, "Topic should not be empty"
assert len(result.pydantic.key_points) > 0, "Should have at least one key point"
assert result.pydantic.summary, "Summary should not be empty"
@pytest.mark.vcr()
def test_gemini_agent_kickoff_structured_output_with_tools():
"""
Test that agent kickoff returns structured output after using tools.
This tests post-tool-call structured output handling for Gemini models.
"""
from pydantic import BaseModel, Field
from crewai.tools import tool
class CalculationResult(BaseModel):
"""Structured output for calculation results."""
operation: str = Field(description="The mathematical operation performed")
result: int = Field(description="The result of the calculation")
explanation: str = Field(description="Brief explanation of the calculation")
@tool
def add_numbers(a: int, b: int) -> int:
"""Add two numbers together and return the sum."""
return a + b
agent = Agent(
role="Calculator",
goal="Perform calculations using available tools",
backstory="You are a calculator assistant that uses tools to compute results.",
llm=LLM(model="google/gemini-2.0-flash-001"),
tools=[add_numbers],
verbose=True,
)
result = agent.kickoff(
messages="Calculate 15 + 27 using your add_numbers tool. Report the result.",
response_format=CalculationResult,
)
assert result.pydantic is not None, "Expected pydantic output but got None"
assert isinstance(result.pydantic, CalculationResult), f"Expected CalculationResult but got {type(result.pydantic)}"
assert result.pydantic.result == 42, f"Expected result 42 but got {result.pydantic.result}"
assert result.pydantic.operation, "Operation should not be empty"
assert result.pydantic.explanation, "Explanation should not be empty"
@pytest.mark.vcr()
def test_gemini_crew_structured_output_with_tools():
"""
Test that a crew with Gemini can use both tools and output_pydantic on a task.
"""
from pydantic import BaseModel, Field
from crewai.tools import tool
class CalculationResult(BaseModel):
operation: str = Field(description="The mathematical operation performed")
result: int = Field(description="The result of the calculation")
explanation: str = Field(description="Brief explanation of the calculation")
@tool
def add_numbers(a: int, b: int) -> int:
"""Add two numbers together and return the sum."""
return a + b
agent = Agent(
role="Calculator",
goal="Perform calculations using available tools",
backstory="You are a calculator assistant that uses tools to compute results.",
llm=LLM(model="google/gemini-2.0-flash-001"),
tools=[add_numbers],
)
task = Task(
description="Calculate 15 + 27 using your add_numbers tool. Report the result.",
expected_output="A structured calculation result",
output_pydantic=CalculationResult,
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
result = crew.kickoff()
assert result.pydantic is not None, "Expected pydantic output but got None"
assert isinstance(result.pydantic, CalculationResult)
assert result.pydantic.result == 42, f"Expected 42 but got {result.pydantic.result}"
def test_gemini_stop_words_not_applied_to_structured_output():
"""
Test that stop words are NOT applied when response_model is provided.
This ensures JSON responses containing stop word patterns (like "Observation:")
are not truncated, which would cause JSON validation to fail.
"""
from pydantic import BaseModel, Field
from crewai.llms.providers.gemini.completion import GeminiCompletion
class ResearchResult(BaseModel):
"""Research result that may contain stop word patterns in string fields."""
finding: str = Field(description="The research finding")
observation: str = Field(description="Observation about the finding")
# Create Gemini completion instance with stop words configured
# Gemini uses stop_sequences instead of stop
llm = GeminiCompletion(
model="gemini-2.0-flash-001",
stop_sequences=["Observation:", "Final Answer:"], # Common stop words
)
# JSON response that contains a stop word pattern in a string field
# Without the fix, this would be truncated at "Observation:" breaking the JSON
json_response = '{"finding": "The data shows growth", "observation": "Observation: This confirms the hypothesis"}'
# Test the _validate_structured_output method which is used for structured output handling
result = llm._validate_structured_output(json_response, ResearchResult)
# Should successfully parse the full JSON without truncation
assert isinstance(result, ResearchResult)
assert result.finding == "The data shows growth"
# The observation field should contain the full text including "Observation:"
assert "Observation:" in result.observation
def test_gemini_stop_words_still_applied_to_regular_responses():
"""
Test that stop words ARE still applied for regular (non-structured) responses.
This ensures the fix didn't break normal stop word behavior.
"""
from crewai.llms.providers.gemini.completion import GeminiCompletion
# Create Gemini completion instance with stop words configured
# Gemini uses stop_sequences instead of stop
llm = GeminiCompletion(
model="gemini-2.0-flash-001",
stop_sequences=["Observation:", "Final Answer:"],
)
# Response that contains a stop word - should be truncated
response_with_stop_word = "I need to search for more information.\n\nAction: search\nObservation: Found results"
# Test the _apply_stop_words method directly
result = llm._apply_stop_words(response_with_stop_word)
# Response should be truncated at the stop word
assert "Observation:" not in result
assert "Found results" not in result
assert "I need to search for more information" in result
def test_gemini_structured_output_preserves_json_with_stop_word_patterns():
"""
Test that structured output validation preserves JSON content
even when string fields contain stop word patterns.
"""
from pydantic import BaseModel, Field
from crewai.llms.providers.gemini.completion import GeminiCompletion
class AgentObservation(BaseModel):
"""Model with fields that might contain stop word-like text."""
action_taken: str = Field(description="What action was taken")
observation_result: str = Field(description="The observation result")
final_answer: str = Field(description="The final answer")
# Gemini uses stop_sequences instead of stop
llm = GeminiCompletion(
model="gemini-2.0-flash-001",
stop_sequences=["Observation:", "Final Answer:", "Action:"],
)
# JSON that contains all the stop word patterns as part of the content
json_with_stop_patterns = '''{
"action_taken": "Action: Searched the database",
"observation_result": "Observation: Found 5 relevant results",
"final_answer": "Final Answer: The data shows positive growth"
}'''
# Test the _validate_structured_output method - this should NOT truncate
# since it's structured output
result = llm._validate_structured_output(json_with_stop_patterns, AgentObservation)
assert isinstance(result, AgentObservation)
assert "Action:" in result.action_taken
assert "Observation:" in result.observation_result
assert "Final Answer:" in result.final_answer
@pytest.mark.vcr()
def test_gemini_cached_prompt_tokens():
"""
Test that Gemini correctly extracts and tracks cached_prompt_tokens
from cached_content_token_count in the usage metadata.
Sends two calls with the same large prompt to trigger caching.
"""
padding = "This is padding text to ensure the prompt is large enough for caching. " * 80
system_msg = f"You are a helpful assistant. {padding}"
llm = LLM(model="google/gemini-2.5-flash")
# First call
llm.call([
{"role": "system", "content": system_msg},
{"role": "user", "content": "Say hello in one word."},
])
# Second call: same system prompt
llm.call([
{"role": "system", "content": system_msg},
{"role": "user", "content": "Say goodbye in one word."},
])
usage = llm.get_token_usage_summary()
assert usage.total_tokens > 0
assert usage.prompt_tokens > 0
assert usage.completion_tokens > 0
assert usage.successful_requests == 2
# cached_prompt_tokens should be populated (may be 0 if Gemini
# doesn't cache for this particular request, but the field should exist)
assert usage.cached_prompt_tokens >= 0
@pytest.mark.vcr()
def test_gemini_cached_prompt_tokens_with_tools():
"""
Test that Gemini correctly tracks cached_prompt_tokens when tools are used.
The large system prompt should be cached across tool-calling requests.
"""
padding = "This is padding text to ensure the prompt is large enough for caching. " * 80
system_msg = f"You are a helpful assistant that uses tools. {padding}"
def get_weather(location: str) -> str:
return f"The weather in {location} is sunny and 72°F"
tools = [
{
"name": "get_weather",
"description": "Get the current weather for a location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name"
}
},
"required": ["location"],
},
}
]
llm = LLM(model="google/gemini-2.5-flash")
# First call with tool
llm.call(
[
{"role": "system", "content": system_msg},
{"role": "user", "content": "What is the weather in Tokyo?"},
],
tools=tools,
available_functions={"get_weather": get_weather},
)
# Second call with same system prompt + tools
llm.call(
[
{"role": "system", "content": system_msg},
{"role": "user", "content": "What is the weather in Paris?"},
],
tools=tools,
available_functions={"get_weather": get_weather},
)
usage = llm.get_token_usage_summary()
assert usage.total_tokens > 0
assert usage.prompt_tokens > 0
assert usage.successful_requests == 2
# cached_prompt_tokens should be populated (may be 0 if Gemini
# doesn't cache for this particular request, but the field should exist)
assert usage.cached_prompt_tokens >= 0
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/llms/google/test_google.py",
"license": "MIT License",
"lines": 942,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/tests/llms/openai/test_openai.py | import os
import sys
import types
from typing import Any
from unittest.mock import patch, MagicMock
import openai
import pytest
from crewai.llm import LLM
from crewai.llms.providers.openai.completion import OpenAICompletion, ResponsesAPIResult
from crewai.crew import Crew
from crewai.agent import Agent
from crewai.task import Task
from crewai.cli.constants import DEFAULT_LLM_MODEL
def test_openai_completion_is_used_when_openai_provider():
"""
Test that OpenAICompletion from completion.py is used when LLM uses provider 'openai'
"""
llm = LLM(model="gpt-4o")
assert llm.__class__.__name__ == "OpenAICompletion"
assert llm.provider == "openai"
assert llm.model == "gpt-4o"
def test_openai_completion_is_used_when_no_provider_prefix():
"""
Test that OpenAICompletion is used when no provider prefix is given (defaults to openai)
"""
llm = LLM(model="gpt-4o")
from crewai.llms.providers.openai.completion import OpenAICompletion
assert isinstance(llm, OpenAICompletion)
assert llm.provider == "openai"
assert llm.model == "gpt-4o"
@pytest.mark.vcr()
def test_openai_is_default_provider_without_explicit_llm_set_on_agent():
"""
Test that OpenAI is the default provider when no explicit LLM is set on the agent
"""
agent = Agent(
role="Research Assistant",
goal="Find information about the population of Tokyo",
backstory="You are a helpful research assistant.",
llm=LLM(model="gpt-4o-mini"),
)
task = Task(
description="Find information about the population of Tokyo",
expected_output="The population of Tokyo is 10 million",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
crew.kickoff()
assert crew.agents[0].llm.__class__.__name__ == "OpenAICompletion"
assert crew.agents[0].llm.model == "gpt-4o-mini"
def test_openai_completion_module_is_imported():
"""
Test that the completion module is properly imported when using OpenAI provider
"""
module_name = "crewai.llms.providers.openai.completion"
# Remove module from cache if it exists
if module_name in sys.modules:
del sys.modules[module_name]
# Create LLM instance - this should trigger the import
LLM(model="gpt-4o")
# Verify the module was imported
assert module_name in sys.modules
completion_mod = sys.modules[module_name]
assert isinstance(completion_mod, types.ModuleType)
# Verify the class exists in the module
assert hasattr(completion_mod, 'OpenAICompletion')
def test_native_openai_raises_error_when_initialization_fails():
"""
Test that LLM raises ImportError when native OpenAI completion fails to initialize.
This ensures we don't silently fall back when there's a configuration issue.
"""
# Mock the _get_native_provider to return a failing class
with patch('crewai.llm.LLM._get_native_provider') as mock_get_provider:
class FailingCompletion:
def __init__(self, *args, **kwargs):
raise Exception("Native SDK failed")
mock_get_provider.return_value = FailingCompletion
# This should raise ImportError, not fall back to LiteLLM
with pytest.raises(ImportError) as excinfo:
LLM(model="gpt-4o")
assert "Error importing native provider" in str(excinfo.value)
assert "Native SDK failed" in str(excinfo.value)
def test_openai_completion_initialization_parameters():
"""
Test that OpenAICompletion is initialized with correct parameters
"""
llm = LLM(
model="gpt-4o",
temperature=0.7,
max_tokens=1000,
api_key="test-key"
)
from crewai.llms.providers.openai.completion import OpenAICompletion
assert isinstance(llm, OpenAICompletion)
assert llm.model == "gpt-4o"
assert llm.temperature == 0.7
assert llm.max_tokens == 1000
def test_openai_completion_call():
"""
Test that OpenAICompletion call method works
"""
llm = LLM(model="openai/gpt-4o")
# Mock the call method on the instance
with patch.object(llm, 'call', return_value="Hello! I'm ready to help.") as mock_call:
result = llm.call("Hello, how are you?")
assert result == "Hello! I'm ready to help."
mock_call.assert_called_once_with("Hello, how are you?")
def test_openai_completion_called_during_crew_execution():
"""
Test that OpenAICompletion.call is actually invoked when running a crew
"""
# Create the LLM instance first
openai_llm = LLM(model="openai/gpt-4o")
# Mock the call method on the specific instance
with patch.object(openai_llm, 'call', return_value="Tokyo has 14 million people.") as mock_call:
# Create agent with explicit LLM configuration
agent = Agent(
role="Research Assistant",
goal="Find population info",
backstory="You research populations.",
llm=openai_llm,
)
task = Task(
description="Find Tokyo population",
expected_output="Population number",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
result = crew.kickoff()
# Verify mock was called
assert mock_call.called
assert "14 million" in str(result)
def test_openai_completion_call_arguments():
"""
Test that OpenAICompletion.call is invoked with correct arguments
"""
# Create LLM instance first (like working tests)
openai_llm = LLM(model="openai/gpt-4o")
# Mock the instance method (like working tests)
with patch.object(openai_llm, 'call') as mock_call:
mock_call.return_value = "Task completed successfully."
agent = Agent(
role="Test Agent",
goal="Complete a simple task",
backstory="You are a test agent.",
llm=openai_llm # Use same instance
)
task = Task(
description="Say hello world",
expected_output="Hello world",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
crew.kickoff()
# Verify call was made
assert mock_call.called
# Check the arguments passed to the call method
call_args = mock_call.call_args
assert call_args is not None
# The first argument should be the messages
messages = call_args[0][0] # First positional argument
assert isinstance(messages, (str, list))
# Verify that the task description appears in the messages
if isinstance(messages, str):
assert "hello world" in messages.lower()
elif isinstance(messages, list):
message_content = str(messages).lower()
assert "hello world" in message_content
def test_multiple_openai_calls_in_crew():
"""
Test that OpenAICompletion.call is invoked multiple times for multiple tasks
"""
# Create LLM instance first
openai_llm = LLM(model="openai/gpt-4o")
# Mock the instance method
with patch.object(openai_llm, 'call') as mock_call:
mock_call.return_value = "Task completed."
agent = Agent(
role="Multi-task Agent",
goal="Complete multiple tasks",
backstory="You can handle multiple tasks.",
llm=openai_llm # Use same instance
)
task1 = Task(
description="First task",
expected_output="First result",
agent=agent,
)
task2 = Task(
description="Second task",
expected_output="Second result",
agent=agent,
)
crew = Crew(
agents=[agent],
tasks=[task1, task2]
)
crew.kickoff()
# Verify multiple calls were made
assert mock_call.call_count >= 2 # At least one call per task
# Verify each call had proper arguments
for call in mock_call.call_args_list:
assert len(call[0]) > 0 # Has positional arguments
messages = call[0][0]
assert messages is not None
def test_openai_completion_with_tools():
"""
Test that OpenAICompletion.call is invoked with tools when agent has tools
"""
from crewai.tools import tool
@tool
def sample_tool(query: str) -> str:
"""A sample tool for testing"""
return f"Tool result for: {query}"
# Create LLM instance first
openai_llm = LLM(model="openai/gpt-4o")
# Mock the instance method (not the class method)
with patch.object(openai_llm, 'call') as mock_call:
mock_call.return_value = "Task completed with tools."
agent = Agent(
role="Tool User",
goal="Use tools to complete tasks",
backstory="You can use tools.",
llm=openai_llm, # Use same instance
tools=[sample_tool]
)
task = Task(
description="Use the sample tool",
expected_output="Tool usage result",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
crew.kickoff()
assert mock_call.called
call_args = mock_call.call_args
call_kwargs = call_args[1] if len(call_args) > 1 else {}
if 'tools' in call_kwargs:
assert call_kwargs['tools'] is not None
assert len(call_kwargs['tools']) > 0
@pytest.mark.vcr()
def test_openai_completion_call_returns_usage_metrics():
"""
Test that OpenAICompletion.call returns usage metrics
"""
agent = Agent(
role="Research Assistant",
goal="Find information about the population of Tokyo",
backstory="You are a helpful research assistant.",
llm=LLM(model="gpt-4o"),
verbose=True,
)
task = Task(
description="Find information about the population of Tokyo",
expected_output="The population of Tokyo is 10 million",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
result = crew.kickoff()
assert result.token_usage is not None
assert result.token_usage.total_tokens == 289
assert result.token_usage.prompt_tokens == 173
assert result.token_usage.completion_tokens == 116
assert result.token_usage.successful_requests == 1
assert result.token_usage.cached_prompt_tokens == 0
@pytest.mark.skip(reason="Allow for litellm")
def test_openai_raises_error_when_model_not_supported():
"""Test that OpenAICompletion raises ValueError when model not supported"""
with patch('crewai.llms.providers.openai.completion.OpenAI') as mock_openai_class:
mock_client = MagicMock()
mock_openai_class.return_value = mock_client
mock_client.chat.completions.create.side_effect = openai.NotFoundError(
message="The model `model-doesnt-exist` does not exist",
response=MagicMock(),
body={}
)
llm = LLM(model="openai/model-doesnt-exist")
with pytest.raises(ValueError, match="Model.*not found"):
llm.call("Hello")
def test_openai_client_setup_with_extra_arguments():
"""
Test that OpenAICompletion is initialized with correct parameters
"""
llm = LLM(
model="gpt-4o",
temperature=0.7,
max_tokens=1000,
top_p=0.5,
max_retries=3,
timeout=30
)
# Check that model parameters are stored on the LLM instance
assert llm.temperature == 0.7
assert llm.max_tokens == 1000
assert llm.top_p == 0.5
# Check that client parameters are properly configured
assert llm.client.max_retries == 3
assert llm.client.timeout == 30
# Test that parameters are properly used in API calls
with patch.object(llm.client.chat.completions, 'create') as mock_create:
mock_create.return_value = MagicMock(
choices=[MagicMock(message=MagicMock(content="test response", tool_calls=None))],
usage=MagicMock(prompt_tokens=10, completion_tokens=20, total_tokens=30)
)
llm.call("Hello")
# Verify the API was called with the right parameters
call_args = mock_create.call_args[1] # keyword arguments
assert call_args['temperature'] == 0.7
assert call_args['max_tokens'] == 1000
assert call_args['top_p'] == 0.5
assert call_args['model'] == 'gpt-4o'
def test_extra_arguments_are_passed_to_openai_completion():
"""
Test that extra arguments are passed to OpenAICompletion
"""
llm = LLM(model="gpt-4o", temperature=0.7, max_tokens=1000, top_p=0.5, max_retries=3)
with patch.object(llm.client.chat.completions, 'create') as mock_create:
mock_create.return_value = MagicMock(
choices=[MagicMock(message=MagicMock(content="test response", tool_calls=None))],
usage=MagicMock(prompt_tokens=10, completion_tokens=20, total_tokens=30)
)
llm.call("Hello, how are you?")
assert mock_create.called
call_kwargs = mock_create.call_args[1]
assert call_kwargs['temperature'] == 0.7
assert call_kwargs['max_tokens'] == 1000
assert call_kwargs['top_p'] == 0.5
assert call_kwargs['model'] == 'gpt-4o'
def test_openai_get_client_params_with_api_base():
"""
Test that _get_client_params correctly converts api_base to base_url
"""
llm = OpenAICompletion(
model="gpt-4o",
api_base="https://custom.openai.com/v1",
)
client_params = llm._get_client_params()
assert client_params["base_url"] == "https://custom.openai.com/v1"
def test_openai_get_client_params_with_base_url_priority():
"""
Test that base_url takes priority over api_base in _get_client_params
"""
llm = OpenAICompletion(
model="gpt-4o",
base_url="https://priority.openai.com/v1",
api_base="https://fallback.openai.com/v1",
)
client_params = llm._get_client_params()
assert client_params["base_url"] == "https://priority.openai.com/v1"
def test_openai_get_client_params_with_env_var():
"""
Test that _get_client_params uses OPENAI_BASE_URL environment variable as fallback
"""
with patch.dict(os.environ, {
"OPENAI_BASE_URL": "https://env.openai.com/v1",
}):
llm = OpenAICompletion(model="gpt-4o")
client_params = llm._get_client_params()
assert client_params["base_url"] == "https://env.openai.com/v1"
def test_openai_get_client_params_priority_order():
"""
Test the priority order: base_url > api_base > OPENAI_BASE_URL env var
"""
with patch.dict(os.environ, {
"OPENAI_BASE_URL": "https://env.openai.com/v1",
}):
# Test base_url beats api_base and env var
llm1 = OpenAICompletion(
model="gpt-4o",
base_url="https://base-url.openai.com/v1",
api_base="https://api-base.openai.com/v1",
)
params1 = llm1._get_client_params()
assert params1["base_url"] == "https://base-url.openai.com/v1"
# Test api_base beats env var when base_url is None
llm2 = OpenAICompletion(
model="gpt-4o",
api_base="https://api-base.openai.com/v1",
)
params2 = llm2._get_client_params()
assert params2["base_url"] == "https://api-base.openai.com/v1"
# Test env var is used when both base_url and api_base are None
llm3 = OpenAICompletion(model="gpt-4o")
params3 = llm3._get_client_params()
assert params3["base_url"] == "https://env.openai.com/v1"
def test_openai_get_client_params_no_base_url(monkeypatch):
"""
Test that _get_client_params works correctly when no base_url is specified
"""
# Clear env vars that could set base_url
monkeypatch.delenv("OPENAI_BASE_URL", raising=False)
monkeypatch.delenv("OPENAI_API_BASE", raising=False)
llm = OpenAICompletion(model="gpt-4o")
client_params = llm._get_client_params()
# When no base_url is provided, it should not be in the params (filtered out as None)
assert "base_url" not in client_params or client_params.get("base_url") is None
def test_openai_streaming_with_response_model():
"""
Test that streaming with response_model works correctly and doesn't call invalid API methods.
This test verifies the fix for the bug where streaming with response_model attempted to call
self.client.responses.stream() with invalid parameters (input, text_format).
"""
from pydantic import BaseModel
class TestResponse(BaseModel):
"""Test response model."""
answer: str
confidence: float
llm = LLM(model="openai/gpt-4o", stream=True)
with patch.object(llm.client.beta.chat.completions, "stream") as mock_stream:
# Create mock chunks with content.delta event structure
mock_chunk1 = MagicMock()
mock_chunk1.type = "content.delta"
mock_chunk1.delta = '{"answer": "test", '
mock_chunk1.id = "response-1"
# Second chunk
mock_chunk2 = MagicMock()
mock_chunk2.type = "content.delta"
mock_chunk2.delta = '"confidence": 0.95}'
mock_chunk2.id = "response-2"
# Create mock final completion with parsed result
mock_parsed = TestResponse(answer="test", confidence=0.95)
mock_message = MagicMock()
mock_message.parsed = mock_parsed
mock_choice = MagicMock()
mock_choice.message = mock_message
mock_final_completion = MagicMock()
mock_final_completion.choices = [mock_choice]
# Create mock stream context manager
mock_stream_obj = MagicMock()
mock_stream_obj.__enter__ = MagicMock(return_value=mock_stream_obj)
mock_stream_obj.__exit__ = MagicMock(return_value=None)
mock_stream_obj.__iter__ = MagicMock(return_value=iter([mock_chunk1, mock_chunk2]))
mock_stream_obj.get_final_completion = MagicMock(return_value=mock_final_completion)
mock_stream.return_value = mock_stream_obj
result = llm.call("Test question", response_model=TestResponse)
assert result is not None
assert isinstance(result, TestResponse)
assert result.answer == "test"
assert result.confidence == 0.95
assert mock_stream.called
call_kwargs = mock_stream.call_args[1]
assert call_kwargs["model"] == "gpt-4o"
assert call_kwargs["response_format"] == TestResponse
assert "input" not in call_kwargs
assert "text_format" not in call_kwargs
@pytest.mark.vcr()
def test_openai_response_format_with_pydantic_model():
"""
Test that response_format with a Pydantic BaseModel returns structured output.
"""
from pydantic import BaseModel, Field
class AnswerResponse(BaseModel):
"""Response model with structured fields."""
answer: str = Field(description="The answer to the question")
confidence: float = Field(description="Confidence score between 0 and 1")
llm = LLM(model="gpt-4o", response_format=AnswerResponse)
result = llm.call("What is the capital of France? Be concise.")
assert isinstance(result, AnswerResponse)
assert result.answer is not None
assert 0 <= result.confidence <= 1
@pytest.mark.vcr()
def test_openai_response_format_with_dict():
"""
Test that response_format with a dict returns JSON output.
"""
import json
llm = LLM(model="gpt-4o", response_format={"type": "json_object"})
result = llm.call("Return a JSON object with a 'status' field set to 'success'")
parsed = json.loads(result)
assert "status" in parsed
@pytest.mark.vcr()
def test_openai_response_format_none():
"""
Test that when response_format is None, the API returns plain text.
"""
llm = LLM(model="gpt-4o", response_format=None)
result = llm.call("Say hello in one word")
assert isinstance(result, str)
assert len(result) > 0
@pytest.mark.vcr()
def test_openai_streaming_returns_usage_metrics():
"""
Test that OpenAI streaming calls return proper token usage metrics.
"""
agent = Agent(
role="Research Assistant",
goal="Find information about the capital of France",
backstory="You are a helpful research assistant.",
llm=LLM(model="gpt-4o-mini", stream=True),
verbose=True,
)
task = Task(
description="What is the capital of France?",
expected_output="The capital of France",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
result = crew.kickoff()
assert result.token_usage is not None
assert result.token_usage.total_tokens > 0
assert result.token_usage.prompt_tokens > 0
assert result.token_usage.completion_tokens > 0
assert result.token_usage.successful_requests >= 1
def test_openai_responses_api_initialization():
"""Test that OpenAI Responses API can be initialized with api='responses'."""
llm = OpenAICompletion(
model="gpt-5",
api="responses",
instructions="You are a helpful assistant.",
store=True,
)
assert llm.api == "responses"
assert llm.instructions == "You are a helpful assistant."
assert llm.store is True
assert llm.model == "gpt-5"
def test_openai_responses_api_default_is_completions():
"""Test that the default API is 'completions' for backward compatibility."""
llm = OpenAICompletion(model="gpt-4o")
assert llm.api == "completions"
def test_openai_responses_api_prepare_params():
"""Test that Responses API params are prepared correctly."""
llm = OpenAICompletion(
model="gpt-5",
api="responses",
instructions="Base instructions.",
store=True,
temperature=0.7,
)
messages = [
{"role": "system", "content": "System message."},
{"role": "user", "content": "Hello!"},
]
params = llm._prepare_responses_params(messages)
assert params["model"] == "gpt-5"
assert "Base instructions." in params["instructions"]
assert "System message." in params["instructions"]
assert params["store"] is True
assert params["temperature"] == 0.7
assert params["input"] == [{"role": "user", "content": "Hello!"}]
def test_openai_responses_api_tool_format():
"""Test that tools are converted to Responses API format (internally-tagged)."""
llm = OpenAICompletion(model="gpt-5", api="responses")
tools = [
{
"name": "get_weather",
"description": "Get the weather for a location",
"parameters": {
"type": "object",
"properties": {"location": {"type": "string"}},
"required": ["location"],
},
}
]
responses_tools = llm._convert_tools_for_responses(tools)
assert len(responses_tools) == 1
tool = responses_tools[0]
assert tool["type"] == "function"
assert tool["name"] == "get_weather"
assert tool["description"] == "Get the weather for a location"
assert "parameters" in tool
assert "function" not in tool
def test_openai_completions_api_tool_format():
"""Test that tools are converted to Chat Completions API format (externally-tagged)."""
llm = OpenAICompletion(model="gpt-4o", api="completions")
tools = [
{
"name": "get_weather",
"description": "Get the weather for a location",
"parameters": {
"type": "object",
"properties": {"location": {"type": "string"}},
"required": ["location"],
},
}
]
completions_tools = llm._convert_tools_for_interference(tools)
assert len(completions_tools) == 1
tool = completions_tools[0]
assert tool["type"] == "function"
assert "function" in tool
assert tool["function"]["name"] == "get_weather"
assert tool["function"]["description"] == "Get the weather for a location"
def test_openai_responses_api_structured_output_format():
"""Test that structured outputs use text.format for Responses API."""
from pydantic import BaseModel
class Person(BaseModel):
name: str
age: int
llm = OpenAICompletion(model="gpt-5", api="responses")
messages = [{"role": "user", "content": "Extract: Jane, 25"}]
params = llm._prepare_responses_params(messages, response_model=Person)
assert "text" in params
assert "format" in params["text"]
assert params["text"]["format"]["type"] == "json_schema"
assert params["text"]["format"]["name"] == "Person"
assert params["text"]["format"]["strict"] is True
def test_openai_responses_api_with_previous_response_id():
"""Test that previous_response_id is passed for multi-turn conversations."""
llm = OpenAICompletion(
model="gpt-5",
api="responses",
previous_response_id="resp_abc123",
store=True,
)
messages = [{"role": "user", "content": "Continue our conversation."}]
params = llm._prepare_responses_params(messages)
assert params["previous_response_id"] == "resp_abc123"
assert params["store"] is True
def test_openai_responses_api_call_routing():
"""Test that call() routes to the correct API based on the api parameter."""
from unittest.mock import patch, MagicMock
llm_completions = OpenAICompletion(model="gpt-4o", api="completions")
llm_responses = OpenAICompletion(model="gpt-5", api="responses")
with patch.object(
llm_completions, "_call_completions", return_value="completions result"
) as mock_completions:
result = llm_completions.call("Hello")
mock_completions.assert_called_once()
assert result == "completions result"
with patch.object(
llm_responses, "_call_responses", return_value="responses result"
) as mock_responses:
result = llm_responses.call("Hello")
mock_responses.assert_called_once()
assert result == "responses result"
# =============================================================================
# VCR Integration Tests for Responses API
# =============================================================================
@pytest.mark.vcr()
def test_openai_responses_api_basic_call():
"""Test basic Responses API call with text generation."""
llm = OpenAICompletion(
model="gpt-4o-mini",
api="responses",
instructions="You are a helpful assistant. Be concise.",
)
result = llm.call("What is 2 + 2? Answer with just the number.")
assert isinstance(result, str)
assert "4" in result
@pytest.mark.vcr()
def test_openai_responses_api_with_structured_output():
"""Test Responses API with structured output using Pydantic model."""
from pydantic import BaseModel, Field
class MathAnswer(BaseModel):
"""Structured math answer."""
result: int = Field(description="The numerical result")
explanation: str = Field(description="Brief explanation")
llm = OpenAICompletion(
model="gpt-4o-mini",
api="responses",
)
result = llm.call("What is 5 * 7?", response_model=MathAnswer)
assert isinstance(result, MathAnswer)
assert result.result == 35
@pytest.mark.vcr()
def test_openai_responses_api_with_system_message_extraction():
"""Test that system messages are properly extracted to instructions."""
llm = OpenAICompletion(
model="gpt-4o-mini",
api="responses",
)
messages = [
{"role": "system", "content": "You always respond in uppercase letters only."},
{"role": "user", "content": "Say hello"},
]
result = llm.call(messages)
assert isinstance(result, str)
assert result.isupper() or "HELLO" in result.upper()
@pytest.mark.vcr()
def test_openai_responses_api_streaming():
"""Test Responses API with streaming enabled."""
llm = OpenAICompletion(
model="gpt-4o-mini",
api="responses",
stream=True,
instructions="Be very concise.",
)
result = llm.call("Count from 1 to 3, separated by commas.")
assert isinstance(result, str)
assert "1" in result
assert "2" in result
assert "3" in result
@pytest.mark.vcr()
def test_openai_responses_api_returns_usage_metrics():
"""Test that Responses API calls return proper token usage metrics."""
llm = OpenAICompletion(
model="gpt-4o-mini",
api="responses",
)
llm.call("Say hello")
usage = llm.get_token_usage_summary()
assert usage.total_tokens > 0
assert usage.prompt_tokens > 0
assert usage.completion_tokens > 0
def test_openai_responses_api_builtin_tools_param():
"""Test that builtin_tools parameter is properly configured."""
llm = OpenAICompletion(
model="gpt-4o",
api="responses",
builtin_tools=["web_search", "code_interpreter"],
)
assert llm.builtin_tools == ["web_search", "code_interpreter"]
messages = [{"role": "user", "content": "Test"}]
params = llm._prepare_responses_params(messages)
assert "tools" in params
tool_types = [t["type"] for t in params["tools"]]
assert "web_search_preview" in tool_types
assert "code_interpreter" in tool_types
def test_openai_responses_api_builtin_tools_with_custom_tools():
"""Test that builtin_tools can be combined with custom function tools."""
llm = OpenAICompletion(
model="gpt-4o",
api="responses",
builtin_tools=["web_search"],
)
custom_tools = [
{
"name": "get_weather",
"description": "Get weather for a location",
"parameters": {"type": "object", "properties": {}},
}
]
messages = [{"role": "user", "content": "Test"}]
params = llm._prepare_responses_params(messages, tools=custom_tools)
assert len(params["tools"]) == 2
tool_types = [t.get("type") for t in params["tools"]]
assert "web_search_preview" in tool_types
assert "function" in tool_types
@pytest.mark.vcr()
def test_openai_responses_api_with_web_search():
"""Test Responses API with web_search built-in tool."""
llm = OpenAICompletion(
model="gpt-4o-mini",
api="responses",
builtin_tools=["web_search"],
)
result = llm.call("What is the current population of Tokyo? Be brief.")
assert isinstance(result, str)
assert len(result) > 0
def test_responses_api_result_dataclass():
"""Test ResponsesAPIResult dataclass functionality."""
result = ResponsesAPIResult(
text="Hello, world!",
response_id="resp_123",
)
assert result.text == "Hello, world!"
assert result.response_id == "resp_123"
assert result.web_search_results == []
assert result.file_search_results == []
assert result.code_interpreter_results == []
assert result.computer_use_results == []
assert result.reasoning_summaries == []
assert result.function_calls == []
assert not result.has_tool_outputs()
assert not result.has_reasoning()
def test_responses_api_result_has_tool_outputs():
"""Test ResponsesAPIResult.has_tool_outputs() method."""
result_with_web = ResponsesAPIResult(
text="Test",
web_search_results=[{"id": "ws_1", "status": "completed", "type": "web_search_call"}],
)
assert result_with_web.has_tool_outputs()
result_with_file = ResponsesAPIResult(
text="Test",
file_search_results=[{"id": "fs_1", "status": "completed", "type": "file_search_call", "queries": [], "results": []}],
)
assert result_with_file.has_tool_outputs()
def test_responses_api_result_has_reasoning():
"""Test ResponsesAPIResult.has_reasoning() method."""
result_with_reasoning = ResponsesAPIResult(
text="Test",
reasoning_summaries=[{"id": "r_1", "type": "reasoning", "summary": []}],
)
assert result_with_reasoning.has_reasoning()
result_without = ResponsesAPIResult(text="Test")
assert not result_without.has_reasoning()
def test_openai_responses_api_parse_tool_outputs_param():
"""Test that parse_tool_outputs parameter is properly configured."""
llm = OpenAICompletion(
model="gpt-4o",
api="responses",
parse_tool_outputs=True,
)
assert llm.parse_tool_outputs is True
def test_openai_responses_api_parse_tool_outputs_default_false():
"""Test that parse_tool_outputs defaults to False."""
llm = OpenAICompletion(
model="gpt-4o",
api="responses",
)
assert llm.parse_tool_outputs is False
@pytest.mark.vcr()
def test_openai_responses_api_with_parse_tool_outputs():
"""Test Responses API with parse_tool_outputs enabled returns ResponsesAPIResult."""
llm = OpenAICompletion(
model="gpt-4o-mini",
api="responses",
builtin_tools=["web_search"],
parse_tool_outputs=True,
)
result = llm.call("What is the current population of Tokyo? Be very brief.")
assert isinstance(result, ResponsesAPIResult)
assert len(result.text) > 0
assert result.response_id is not None
# Web search should have been used
assert len(result.web_search_results) > 0
assert result.has_tool_outputs()
@pytest.mark.vcr()
def test_openai_responses_api_parse_tool_outputs_basic_call():
"""Test Responses API with parse_tool_outputs but no built-in tools."""
llm = OpenAICompletion(
model="gpt-4o-mini",
api="responses",
parse_tool_outputs=True,
)
result = llm.call("Say hello in exactly 3 words.")
assert isinstance(result, ResponsesAPIResult)
assert len(result.text) > 0
assert result.response_id is not None
# No built-in tools used
assert not result.has_tool_outputs()
# ============================================================================
# Auto-Chaining Tests (Responses API)
# ============================================================================
def test_openai_responses_api_auto_chain_param():
"""Test that auto_chain parameter is properly configured."""
llm = OpenAICompletion(
model="gpt-4o",
api="responses",
auto_chain=True,
)
assert llm.auto_chain is True
assert llm._last_response_id is None
def test_openai_responses_api_auto_chain_default_false():
"""Test that auto_chain defaults to False."""
llm = OpenAICompletion(
model="gpt-4o",
api="responses",
)
assert llm.auto_chain is False
def test_openai_responses_api_last_response_id_property():
"""Test last_response_id property."""
llm = OpenAICompletion(
model="gpt-4o",
api="responses",
auto_chain=True,
)
# Initially None
assert llm.last_response_id is None
# Simulate setting the internal value
llm._last_response_id = "resp_test_123"
assert llm.last_response_id == "resp_test_123"
def test_openai_responses_api_reset_chain():
"""Test reset_chain() method clears the response ID."""
llm = OpenAICompletion(
model="gpt-4o",
api="responses",
auto_chain=True,
)
# Set a response ID
llm._last_response_id = "resp_test_123"
assert llm.last_response_id == "resp_test_123"
# Reset the chain
llm.reset_chain()
assert llm.last_response_id is None
def test_openai_responses_api_auto_chain_prepare_params():
"""Test that _prepare_responses_params uses auto-chained response ID."""
llm = OpenAICompletion(
model="gpt-4o",
api="responses",
auto_chain=True,
)
# No previous response ID yet
params = llm._prepare_responses_params(messages=[{"role": "user", "content": "test"}])
assert "previous_response_id" not in params
# Set a previous response ID
llm._last_response_id = "resp_previous_123"
params = llm._prepare_responses_params(messages=[{"role": "user", "content": "test"}])
assert params.get("previous_response_id") == "resp_previous_123"
def test_openai_responses_api_explicit_previous_response_id_takes_precedence():
"""Test that explicit previous_response_id overrides auto-chained ID."""
llm = OpenAICompletion(
model="gpt-4o",
api="responses",
auto_chain=True,
previous_response_id="resp_explicit_456",
)
# Set an auto-chained response ID
llm._last_response_id = "resp_auto_123"
# Explicit should take precedence
params = llm._prepare_responses_params(messages=[{"role": "user", "content": "test"}])
assert params.get("previous_response_id") == "resp_explicit_456"
def test_openai_responses_api_auto_chain_disabled_no_tracking():
"""Test that response ID is not tracked when auto_chain is False."""
llm = OpenAICompletion(
model="gpt-4o",
api="responses",
auto_chain=False,
)
# Even with a "previous" response ID set internally, params shouldn't use it
llm._last_response_id = "resp_should_not_use"
params = llm._prepare_responses_params(messages=[{"role": "user", "content": "test"}])
assert "previous_response_id" not in params
@pytest.mark.vcr()
def test_openai_responses_api_auto_chain_integration():
"""Test auto-chaining tracks response IDs across calls."""
llm = OpenAICompletion(
model="gpt-4o-mini",
api="responses",
auto_chain=True,
)
# First call - should not have previous_response_id
assert llm.last_response_id is None
result1 = llm.call("My name is Alice. Remember this.")
# After first call, should have a response ID
assert llm.last_response_id is not None
first_response_id = llm.last_response_id
assert first_response_id.startswith("resp_")
# Second call - should use the first response ID
result2 = llm.call("What is my name?")
# Response ID should be updated
assert llm.last_response_id is not None
assert llm.last_response_id != first_response_id # Should be a new ID
# The response should remember context (Alice)
assert isinstance(result1, str)
assert isinstance(result2, str)
@pytest.mark.vcr()
def test_openai_responses_api_auto_chain_with_reset():
"""Test that reset_chain() properly starts a new conversation."""
llm = OpenAICompletion(
model="gpt-4o-mini",
api="responses",
auto_chain=True,
)
# First conversation
llm.call("My favorite color is blue.")
first_chain_id = llm.last_response_id
assert first_chain_id is not None
# Reset and start new conversation
llm.reset_chain()
assert llm.last_response_id is None
# New call should start fresh
llm.call("Hello!")
second_chain_id = llm.last_response_id
assert second_chain_id is not None
# New conversation, so different response ID
assert second_chain_id != first_chain_id
# =============================================================================
# Encrypted Reasoning for ZDR (Zero Data Retention) Tests
# =============================================================================
def test_openai_responses_api_auto_chain_reasoning_param():
"""Test that auto_chain_reasoning parameter is properly configured."""
llm = OpenAICompletion(
model="gpt-4o",
api="responses",
auto_chain_reasoning=True,
)
assert llm.auto_chain_reasoning is True
assert llm._last_reasoning_items is None
def test_openai_responses_api_auto_chain_reasoning_default_false():
"""Test that auto_chain_reasoning defaults to False."""
llm = OpenAICompletion(
model="gpt-4o",
api="responses",
)
assert llm.auto_chain_reasoning is False
def test_openai_responses_api_last_reasoning_items_property():
"""Test last_reasoning_items property."""
llm = OpenAICompletion(
model="gpt-4o",
api="responses",
auto_chain_reasoning=True,
)
# Initially None
assert llm.last_reasoning_items is None
# Simulate setting the internal value
mock_items = [{"id": "rs_test_123", "type": "reasoning"}]
llm._last_reasoning_items = mock_items
assert llm.last_reasoning_items == mock_items
def test_openai_responses_api_reset_reasoning_chain():
"""Test reset_reasoning_chain() method clears reasoning items."""
llm = OpenAICompletion(
model="gpt-4o",
api="responses",
auto_chain_reasoning=True,
)
# Set reasoning items
mock_items = [{"id": "rs_test_123", "type": "reasoning"}]
llm._last_reasoning_items = mock_items
assert llm.last_reasoning_items == mock_items
# Reset the reasoning chain
llm.reset_reasoning_chain()
assert llm.last_reasoning_items is None
def test_openai_responses_api_auto_chain_reasoning_adds_include():
"""Test that auto_chain_reasoning adds reasoning.encrypted_content to include."""
llm = OpenAICompletion(
model="gpt-4o",
api="responses",
auto_chain_reasoning=True,
)
params = llm._prepare_responses_params(messages=[{"role": "user", "content": "test"}])
assert "include" in params
assert "reasoning.encrypted_content" in params["include"]
def test_openai_responses_api_auto_chain_reasoning_preserves_existing_include():
"""Test that auto_chain_reasoning preserves existing include items."""
llm = OpenAICompletion(
model="gpt-4o",
api="responses",
auto_chain_reasoning=True,
include=["file_search_call.results"],
)
params = llm._prepare_responses_params(messages=[{"role": "user", "content": "test"}])
assert "include" in params
assert "reasoning.encrypted_content" in params["include"]
assert "file_search_call.results" in params["include"]
def test_openai_responses_api_auto_chain_reasoning_no_duplicate_include():
"""Test that reasoning.encrypted_content is not duplicated if already in include."""
llm = OpenAICompletion(
model="gpt-4o",
api="responses",
auto_chain_reasoning=True,
include=["reasoning.encrypted_content"],
)
params = llm._prepare_responses_params(messages=[{"role": "user", "content": "test"}])
assert "include" in params
# Should only appear once
assert params["include"].count("reasoning.encrypted_content") == 1
def test_openai_responses_api_auto_chain_reasoning_prepends_to_input():
"""Test that stored reasoning items are prepended to input."""
llm = OpenAICompletion(
model="gpt-4o",
api="responses",
auto_chain_reasoning=True,
)
# Simulate stored reasoning items
mock_reasoning = MagicMock()
mock_reasoning.type = "reasoning"
mock_reasoning.id = "rs_test_123"
llm._last_reasoning_items = [mock_reasoning]
params = llm._prepare_responses_params(messages=[{"role": "user", "content": "test"}])
# Input should have reasoning item first, then the message
assert len(params["input"]) == 2
assert params["input"][0] == mock_reasoning
assert params["input"][1]["role"] == "user"
def test_openai_responses_api_auto_chain_reasoning_disabled_no_include():
"""Test that reasoning.encrypted_content is not added when auto_chain_reasoning is False."""
llm = OpenAICompletion(
model="gpt-4o",
api="responses",
auto_chain_reasoning=False,
)
params = llm._prepare_responses_params(messages=[{"role": "user", "content": "test"}])
# Should not have include at all (unless explicitly set)
assert "include" not in params or "reasoning.encrypted_content" not in params.get("include", [])
def test_openai_responses_api_auto_chain_reasoning_disabled_no_prepend():
"""Test that reasoning items are not prepended when auto_chain_reasoning is False."""
llm = OpenAICompletion(
model="gpt-4o",
api="responses",
auto_chain_reasoning=False,
)
# Even with stored reasoning items, they should not be prepended
mock_reasoning = MagicMock()
mock_reasoning.type = "reasoning"
llm._last_reasoning_items = [mock_reasoning]
params = llm._prepare_responses_params(messages=[{"role": "user", "content": "test"}])
# Input should only have the message, not the reasoning item
assert len(params["input"]) == 1
assert params["input"][0]["role"] == "user"
def test_openai_responses_api_both_auto_chains_work_together():
"""Test that auto_chain and auto_chain_reasoning can be used together."""
llm = OpenAICompletion(
model="gpt-4o",
api="responses",
auto_chain=True,
auto_chain_reasoning=True,
)
assert llm.auto_chain is True
assert llm.auto_chain_reasoning is True
assert llm._last_response_id is None
assert llm._last_reasoning_items is None
# Set both internal values
llm._last_response_id = "resp_123"
mock_reasoning = MagicMock()
mock_reasoning.type = "reasoning"
llm._last_reasoning_items = [mock_reasoning]
params = llm._prepare_responses_params(messages=[{"role": "user", "content": "test"}])
# Both should be applied
assert params.get("previous_response_id") == "resp_123"
assert "reasoning.encrypted_content" in params["include"]
assert len(params["input"]) == 2 # Reasoning item + message
# =============================================================================
# Agent Kickoff Structured Output Tests
# =============================================================================
@pytest.mark.vcr()
def test_openai_agent_kickoff_structured_output_without_tools():
"""
Test that agent kickoff returns structured output without tools.
This tests native structured output handling for OpenAI models.
"""
from pydantic import BaseModel, Field
class AnalysisResult(BaseModel):
"""Structured output for analysis results."""
topic: str = Field(description="The topic analyzed")
key_points: list[str] = Field(description="Key insights from the analysis")
summary: str = Field(description="Brief summary of findings")
agent = Agent(
role="Analyst",
goal="Provide structured analysis on topics",
backstory="You are an expert analyst who provides clear, structured insights.",
llm=LLM(model="gpt-4o-mini"),
tools=[],
verbose=True,
)
result = agent.kickoff(
messages="Analyze the benefits of remote work briefly. Keep it concise.",
response_format=AnalysisResult,
)
assert result.pydantic is not None, "Expected pydantic output but got None"
assert isinstance(result.pydantic, AnalysisResult), f"Expected AnalysisResult but got {type(result.pydantic)}"
assert result.pydantic.topic, "Topic should not be empty"
assert len(result.pydantic.key_points) > 0, "Should have at least one key point"
assert result.pydantic.summary, "Summary should not be empty"
@pytest.mark.vcr()
def test_openai_agent_kickoff_structured_output_with_tools():
"""
Test that agent kickoff returns structured output after using tools.
This tests post-tool-call structured output handling for OpenAI models.
"""
from pydantic import BaseModel, Field
from crewai.tools import tool
class CalculationResult(BaseModel):
"""Structured output for calculation results."""
operation: str = Field(description="The mathematical operation performed")
result: int = Field(description="The result of the calculation")
explanation: str = Field(description="Brief explanation of the calculation")
@tool
def add_numbers(a: int, b: int) -> int:
"""Add two numbers together and return the sum."""
return a + b
agent = Agent(
role="Calculator",
goal="Perform calculations using available tools",
backstory="You are a calculator assistant that uses tools to compute results.",
llm=LLM(model="gpt-4o-mini"),
tools=[add_numbers],
verbose=True,
)
result = agent.kickoff(
messages="Calculate 15 + 27 using your add_numbers tool. Report the result.",
response_format=CalculationResult,
)
assert result.pydantic is not None, "Expected pydantic output but got None"
assert isinstance(result.pydantic, CalculationResult), f"Expected CalculationResult but got {type(result.pydantic)}"
assert result.pydantic.result == 42, f"Expected result 42 but got {result.pydantic.result}"
assert result.pydantic.operation, "Operation should not be empty"
assert result.pydantic.explanation, "Explanation should not be empty"
# =============================================================================
# Stop Words with Structured Output Tests
# =============================================================================
def test_openai_stop_words_not_applied_to_structured_output():
"""
Test that stop words are NOT applied when response_model is provided.
This ensures JSON responses containing stop word patterns (like "Observation:")
are not truncated, which would cause JSON validation to fail.
"""
from pydantic import BaseModel, Field
class ResearchResult(BaseModel):
"""Research result that may contain stop word patterns in string fields."""
finding: str = Field(description="The research finding")
observation: str = Field(description="Observation about the finding")
# Create OpenAI completion instance with stop words configured
llm = OpenAICompletion(
model="gpt-4o",
stop=["Observation:", "Final Answer:"], # Common stop words
)
# JSON response that contains a stop word pattern in a string field
# Without the fix, this would be truncated at "Observation:" breaking the JSON
json_response = '{"finding": "The data shows growth", "observation": "Observation: This confirms the hypothesis"}'
# Test the _validate_structured_output method directly with content containing stop words
# This simulates what happens when the API returns JSON with stop word patterns
result = llm._validate_structured_output(json_response, ResearchResult)
# Should successfully parse the full JSON without truncation
assert isinstance(result, ResearchResult)
assert result.finding == "The data shows growth"
# The observation field should contain the full text including "Observation:"
assert "Observation:" in result.observation
def test_openai_stop_words_still_applied_to_regular_responses():
"""
Test that stop words ARE still applied for regular (non-structured) responses.
This ensures the fix didn't break normal stop word behavior.
"""
# Create OpenAI completion instance with stop words configured
llm = OpenAICompletion(
model="gpt-4o",
stop=["Observation:", "Final Answer:"],
)
# Response that contains a stop word - should be truncated
response_with_stop_word = "I need to search for more information.\n\nAction: search\nObservation: Found results"
# Test the _apply_stop_words method directly
result = llm._apply_stop_words(response_with_stop_word)
# Response should be truncated at the stop word
assert "Observation:" not in result
assert "Found results" not in result
assert "I need to search for more information" in result
def test_openai_structured_output_preserves_json_with_stop_word_patterns():
"""
Test that structured output validation preserves JSON content
even when string fields contain stop word patterns.
"""
from pydantic import BaseModel, Field
class AgentObservation(BaseModel):
"""Model with fields that might contain stop word-like text."""
action_taken: str = Field(description="What action was taken")
observation_result: str = Field(description="The observation result")
final_answer: str = Field(description="The final answer")
llm = OpenAICompletion(
model="gpt-4o",
stop=["Observation:", "Final Answer:", "Action:"],
)
# JSON that contains all the stop word patterns as part of the content
json_with_stop_patterns = '''{
"action_taken": "Action: Searched the database",
"observation_result": "Observation: Found 5 relevant results",
"final_answer": "Final Answer: The data shows positive growth"
}'''
# This should NOT be truncated since it's structured output
result = llm._validate_structured_output(json_with_stop_patterns, AgentObservation)
assert isinstance(result, AgentObservation)
assert "Action:" in result.action_taken
assert "Observation:" in result.observation_result
assert "Final Answer:" in result.final_answer
@pytest.mark.vcr()
def test_openai_completions_cached_prompt_tokens():
"""
Test that the Chat Completions API correctly extracts and tracks
cached_prompt_tokens from prompt_tokens_details.cached_tokens.
Sends the same large prompt twice so the second call hits the cache.
"""
# Build a large system prompt to trigger prompt caching (>1024 tokens)
padding = "This is padding text to ensure the prompt is large enough for caching. " * 80
system_msg = f"You are a helpful assistant. {padding}"
llm = OpenAICompletion(model="gpt-4.1")
# First call: creates the cache
llm.call([
{"role": "system", "content": system_msg},
{"role": "user", "content": "Say hello in one word."},
])
# Second call: same system prompt should hit the cache
llm.call([
{"role": "system", "content": system_msg},
{"role": "user", "content": "Say goodbye in one word."},
])
usage = llm.get_token_usage_summary()
assert usage.total_tokens > 0
assert usage.prompt_tokens > 0
assert usage.completion_tokens > 0
assert usage.successful_requests == 2
# The second call should have cached prompt tokens
assert usage.cached_prompt_tokens > 0
@pytest.mark.vcr()
def test_openai_responses_api_cached_prompt_tokens():
"""
Test that the Responses API correctly extracts and tracks
cached_prompt_tokens from input_tokens_details.cached_tokens.
"""
padding = "This is padding text to ensure the prompt is large enough for caching. " * 80
system_msg = f"You are a helpful assistant. {padding}"
llm = OpenAICompletion(model="gpt-4.1", api="responses")
# First call: creates the cache
llm.call([
{"role": "system", "content": system_msg},
{"role": "user", "content": "Say hello in one word."},
])
# Second call: same system prompt should hit the cache
llm.call([
{"role": "system", "content": system_msg},
{"role": "user", "content": "Say goodbye in one word."},
])
usage = llm.get_token_usage_summary()
assert usage.total_tokens > 0
assert usage.prompt_tokens > 0
assert usage.completion_tokens > 0
assert usage.successful_requests == 2
# The second call should have cached prompt tokens
assert usage.cached_prompt_tokens > 0
@pytest.mark.vcr()
def test_openai_streaming_cached_prompt_tokens():
"""
Test that streaming Chat Completions API correctly extracts and tracks
cached_prompt_tokens.
"""
padding = "This is padding text to ensure the prompt is large enough for caching. " * 80
system_msg = f"You are a helpful assistant. {padding}"
llm = OpenAICompletion(model="gpt-4.1", stream=True)
# First call: creates the cache
llm.call([
{"role": "system", "content": system_msg},
{"role": "user", "content": "Say hello in one word."},
])
# Second call: same system prompt should hit the cache
llm.call([
{"role": "system", "content": system_msg},
{"role": "user", "content": "Say goodbye in one word."},
])
usage = llm.get_token_usage_summary()
assert usage.total_tokens > 0
assert usage.successful_requests == 2
# The second call should have cached prompt tokens
assert usage.cached_prompt_tokens > 0
@pytest.mark.vcr()
def test_openai_completions_cached_prompt_tokens_with_tools():
"""
Test that the Chat Completions API correctly tracks cached_prompt_tokens
when tools are used. The large system prompt should be cached across calls.
"""
padding = "This is padding text to ensure the prompt is large enough for caching. " * 80
system_msg = f"You are a helpful assistant that uses tools. {padding}"
def get_weather(location: str) -> str:
return f"The weather in {location} is sunny and 72°F"
tools = [
{
"name": "get_weather",
"description": "Get the current weather for a location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name"
}
},
"required": ["location"],
"additionalProperties": False,
},
}
]
llm = OpenAICompletion(model="gpt-4.1")
# First call with tool: creates the cache
llm.call(
[
{"role": "system", "content": system_msg},
{"role": "user", "content": "What is the weather in Tokyo?"},
],
tools=tools,
available_functions={"get_weather": get_weather},
)
# Second call with same system prompt + tools: should hit the cache
llm.call(
[
{"role": "system", "content": system_msg},
{"role": "user", "content": "What is the weather in Paris?"},
],
tools=tools,
available_functions={"get_weather": get_weather},
)
usage = llm.get_token_usage_summary()
assert usage.total_tokens > 0
assert usage.prompt_tokens > 0
assert usage.successful_requests == 2
# The second call should have cached prompt tokens
assert usage.cached_prompt_tokens > 0
@pytest.mark.vcr()
def test_openai_responses_api_cached_prompt_tokens_with_tools():
"""
Test that the Responses API correctly tracks cached_prompt_tokens
when function tools are used.
"""
padding = "This is padding text to ensure the prompt is large enough for caching. " * 80
system_msg = f"You are a helpful assistant that uses tools. {padding}"
def get_weather(location: str) -> str:
return f"The weather in {location} is sunny and 72°F"
tools = [
{
"name": "get_weather",
"description": "Get the current weather for a location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name"
}
},
"required": ["location"],
},
}
]
llm = OpenAICompletion(model="gpt-4.1", api='response')
# First call with tool
llm.call(
[
{"role": "system", "content": system_msg},
{"role": "user", "content": "What is the weather in Tokyo?"},
],
tools=tools,
available_functions={"get_weather": get_weather},
)
# Second call: same system prompt + tools should hit cache
llm.call(
[
{"role": "system", "content": system_msg},
{"role": "user", "content": "What is the weather in Paris?"},
],
tools=tools,
available_functions={"get_weather": get_weather},
)
usage = llm.get_token_usage_summary()
assert usage.total_tokens > 0
assert usage.successful_requests == 2
assert usage.cached_prompt_tokens > 0
def test_openai_streaming_returns_tool_calls_without_available_functions():
"""Test that streaming returns tool calls list when available_functions is None.
This mirrors the non-streaming path where tool_calls are returned for
the executor to handle. Reproduces the bug where streaming with tool
calls would return empty text instead of tool_calls when
available_functions was not provided (as the crew executor does).
"""
llm = LLM(model="openai/gpt-4o-mini", stream=True)
mock_chunk_1 = MagicMock()
mock_chunk_1.choices = [MagicMock()]
mock_chunk_1.choices[0].delta = MagicMock()
mock_chunk_1.choices[0].delta.content = None
mock_chunk_1.choices[0].delta.tool_calls = [MagicMock()]
mock_chunk_1.choices[0].delta.tool_calls[0].index = 0
mock_chunk_1.choices[0].delta.tool_calls[0].id = "call_abc123"
mock_chunk_1.choices[0].delta.tool_calls[0].function = MagicMock()
mock_chunk_1.choices[0].delta.tool_calls[0].function.name = "calculator"
mock_chunk_1.choices[0].delta.tool_calls[0].function.arguments = '{"expr'
mock_chunk_1.choices[0].finish_reason = None
mock_chunk_1.usage = None
mock_chunk_1.id = "chatcmpl-1"
mock_chunk_2 = MagicMock()
mock_chunk_2.choices = [MagicMock()]
mock_chunk_2.choices[0].delta = MagicMock()
mock_chunk_2.choices[0].delta.content = None
mock_chunk_2.choices[0].delta.tool_calls = [MagicMock()]
mock_chunk_2.choices[0].delta.tool_calls[0].index = 0
mock_chunk_2.choices[0].delta.tool_calls[0].id = None
mock_chunk_2.choices[0].delta.tool_calls[0].function = MagicMock()
mock_chunk_2.choices[0].delta.tool_calls[0].function.name = None
mock_chunk_2.choices[0].delta.tool_calls[0].function.arguments = 'ession": "1+1"}'
mock_chunk_2.choices[0].finish_reason = None
mock_chunk_2.usage = None
mock_chunk_2.id = "chatcmpl-1"
mock_chunk_3 = MagicMock()
mock_chunk_3.choices = [MagicMock()]
mock_chunk_3.choices[0].delta = MagicMock()
mock_chunk_3.choices[0].delta.content = None
mock_chunk_3.choices[0].delta.tool_calls = None
mock_chunk_3.choices[0].finish_reason = "tool_calls"
mock_chunk_3.usage = MagicMock()
mock_chunk_3.usage.prompt_tokens = 10
mock_chunk_3.usage.completion_tokens = 5
mock_chunk_3.id = "chatcmpl-1"
with patch.object(
llm.client.chat.completions, "create", return_value=iter([mock_chunk_1, mock_chunk_2, mock_chunk_3])
):
result = llm.call(
messages=[{"role": "user", "content": "Calculate 1+1"}],
tools=[{
"type": "function",
"function": {
"name": "calculator",
"description": "Calculate expression",
"parameters": {"type": "object", "properties": {"expression": {"type": "string"}}},
},
}],
available_functions=None,
)
assert isinstance(result, list), f"Expected list of tool calls, got {type(result)}: {result}"
assert len(result) == 1
assert result[0]["function"]["name"] == "calculator"
assert result[0]["function"]["arguments"] == '{"expression": "1+1"}'
assert result[0]["id"] == "call_abc123"
assert result[0]["type"] == "function"
@pytest.mark.asyncio
async def test_openai_async_streaming_returns_tool_calls_without_available_functions():
"""Test that async streaming returns tool calls list when available_functions is None.
Same as the sync test but for the async path (_ahandle_streaming_completion).
"""
llm = LLM(model="openai/gpt-4o-mini", stream=True)
mock_chunk_1 = MagicMock()
mock_chunk_1.choices = [MagicMock()]
mock_chunk_1.choices[0].delta = MagicMock()
mock_chunk_1.choices[0].delta.content = None
mock_chunk_1.choices[0].delta.tool_calls = [MagicMock()]
mock_chunk_1.choices[0].delta.tool_calls[0].index = 0
mock_chunk_1.choices[0].delta.tool_calls[0].id = "call_abc123"
mock_chunk_1.choices[0].delta.tool_calls[0].function = MagicMock()
mock_chunk_1.choices[0].delta.tool_calls[0].function.name = "calculator"
mock_chunk_1.choices[0].delta.tool_calls[0].function.arguments = '{"expr'
mock_chunk_1.choices[0].finish_reason = None
mock_chunk_1.usage = None
mock_chunk_1.id = "chatcmpl-1"
mock_chunk_2 = MagicMock()
mock_chunk_2.choices = [MagicMock()]
mock_chunk_2.choices[0].delta = MagicMock()
mock_chunk_2.choices[0].delta.content = None
mock_chunk_2.choices[0].delta.tool_calls = [MagicMock()]
mock_chunk_2.choices[0].delta.tool_calls[0].index = 0
mock_chunk_2.choices[0].delta.tool_calls[0].id = None
mock_chunk_2.choices[0].delta.tool_calls[0].function = MagicMock()
mock_chunk_2.choices[0].delta.tool_calls[0].function.name = None
mock_chunk_2.choices[0].delta.tool_calls[0].function.arguments = 'ession": "1+1"}'
mock_chunk_2.choices[0].finish_reason = None
mock_chunk_2.usage = None
mock_chunk_2.id = "chatcmpl-1"
mock_chunk_3 = MagicMock()
mock_chunk_3.choices = [MagicMock()]
mock_chunk_3.choices[0].delta = MagicMock()
mock_chunk_3.choices[0].delta.content = None
mock_chunk_3.choices[0].delta.tool_calls = None
mock_chunk_3.choices[0].finish_reason = "tool_calls"
mock_chunk_3.usage = MagicMock()
mock_chunk_3.usage.prompt_tokens = 10
mock_chunk_3.usage.completion_tokens = 5
mock_chunk_3.id = "chatcmpl-1"
class MockAsyncStream:
"""Async iterator that mimics OpenAI's async streaming response."""
def __init__(self, chunks: list[Any]) -> None:
self._chunks = chunks
self._index = 0
def __aiter__(self) -> "MockAsyncStream":
return self
async def __anext__(self) -> Any:
if self._index >= len(self._chunks):
raise StopAsyncIteration
chunk = self._chunks[self._index]
self._index += 1
return chunk
async def mock_create(**kwargs: Any) -> MockAsyncStream:
return MockAsyncStream([mock_chunk_1, mock_chunk_2, mock_chunk_3])
with patch.object(
llm.async_client.chat.completions, "create", side_effect=mock_create
):
result = await llm.acall(
messages=[{"role": "user", "content": "Calculate 1+1"}],
tools=[{
"type": "function",
"function": {
"name": "calculator",
"description": "Calculate expression",
"parameters": {"type": "object", "properties": {"expression": {"type": "string"}}},
},
}],
available_functions=None,
)
assert isinstance(result, list), f"Expected list of tool calls, got {type(result)}: {result}"
assert len(result) == 1
assert result[0]["function"]["name"] == "calculator"
assert result[0]["function"]["arguments"] == '{"expression": "1+1"}'
assert result[0]["id"] == "call_abc123"
assert result[0]["type"] == "function"
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/llms/openai/test_openai.py",
"license": "MIT License",
"lines": 1553,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/tests/test_task_guardrails.py | from unittest.mock import Mock, patch
import pytest
from crewai import Agent, Task
from crewai.events.event_bus import crewai_event_bus
from crewai.events.event_types import (
LLMGuardrailCompletedEvent,
LLMGuardrailStartedEvent,
)
from crewai.llm import LLM
from crewai.tasks.hallucination_guardrail import HallucinationGuardrail
from crewai.tasks.llm_guardrail import LLMGuardrail
from crewai.tasks.task_output import TaskOutput
def create_smart_task(**kwargs):
"""
Smart task factory that automatically assigns a mock agent when guardrails are present.
This maintains backward compatibility while handling the agent requirement for guardrails.
"""
guardrails_list = kwargs.get("guardrails")
has_guardrails = kwargs.get("guardrail") is not None or (
guardrails_list is not None and len(guardrails_list) > 0
)
if has_guardrails and kwargs.get("agent") is None:
kwargs["agent"] = Agent(
role="test_agent", goal="test_goal", backstory="test_backstory"
)
return Task(**kwargs)
def test_task_without_guardrail():
"""Test that tasks work normally without guardrails (backward compatibility)."""
agent = Mock()
agent.role = "test_agent"
agent.execute_task.return_value = "test result"
agent.crew = None
agent.last_messages = []
task = create_smart_task(description="Test task", expected_output="Output")
result = task.execute_sync(agent=agent)
assert isinstance(result, TaskOutput)
assert result.raw == "test result"
def test_task_with_successful_guardrail_func():
"""Test that successful guardrail validation passes transformed result."""
def guardrail(result: TaskOutput):
return (True, result.raw.upper())
agent = Mock()
agent.role = "test_agent"
agent.execute_task.return_value = "test result"
agent.crew = None
agent.last_messages = []
task = create_smart_task(
description="Test task", expected_output="Output", guardrail=guardrail
)
result = task.execute_sync(agent=agent)
assert isinstance(result, TaskOutput)
assert result.raw == "TEST RESULT"
def test_task_with_failing_guardrail():
"""Test that failing guardrail triggers retry with error context."""
def guardrail(result: TaskOutput):
return (False, "Invalid format")
agent = Mock()
agent.role = "test_agent"
agent.execute_task.side_effect = ["bad result", "good result"]
agent.crew = None
agent.last_messages = []
task = create_smart_task(
description="Test task",
expected_output="Output",
guardrail=guardrail,
guardrail_max_retries=1,
)
# First execution fails guardrail, second succeeds
agent.execute_task.side_effect = ["bad result", "good result"]
with pytest.raises(Exception) as exc_info:
task.execute_sync(agent=agent)
assert "Task failed guardrail validation" in str(exc_info.value)
assert task.retry_count == 1
def test_task_with_guardrail_retries():
"""Test that guardrail respects max_retries configuration."""
def guardrail(result: TaskOutput):
return (False, "Invalid format")
agent = Mock()
agent.role = "test_agent"
agent.execute_task.return_value = "bad result"
agent.crew = None
agent.last_messages = []
task = create_smart_task(
description="Test task",
expected_output="Output",
guardrail=guardrail,
guardrail_max_retries=2,
)
with pytest.raises(Exception) as exc_info:
task.execute_sync(agent=agent)
assert task.retry_count == 2
assert "Task failed guardrail validation after 2 retries" in str(exc_info.value)
assert "Invalid format" in str(exc_info.value)
def test_guardrail_error_in_context():
"""Test that guardrail error is passed in context for retry."""
def guardrail(result: TaskOutput):
return (False, "Expected JSON, got string")
agent = Mock()
agent.role = "test_agent"
agent.crew = None
agent.last_messages = []
task = create_smart_task(
description="Test task",
expected_output="Output",
guardrail=guardrail,
guardrail_max_retries=1,
)
# Mock execute_task to succeed on second attempt
first_call = True
def execute_task(task, context, tools):
nonlocal first_call
if first_call:
first_call = False
return "invalid"
return '{"valid": "json"}'
agent.execute_task.side_effect = execute_task
with pytest.raises(Exception) as exc_info:
task.execute_sync(agent=agent)
assert "Task failed guardrail validation" in str(exc_info.value)
assert "Expected JSON, got string" in str(exc_info.value)
@pytest.fixture
def sample_agent():
return Agent(role="Test Agent", goal="Test Goal", backstory="Test Backstory")
@pytest.fixture
def task_output():
return TaskOutput(
raw="""
Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever
""",
description="Test task",
expected_output="Output",
agent="Test Agent",
)
@pytest.mark.vcr()
def test_task_guardrail_process_output(task_output):
"""Test that LLMGuardrail correctly validates task output.
Note: Due to VCR cassette response ordering issues, the exact results may vary.
The test verifies that the guardrail returns a tuple with (bool, str) and
processes the output appropriately.
"""
guardrail = LLMGuardrail(
description="Ensure the result has less than 10 words", llm=LLM(model="gpt-4o")
)
result = guardrail(task_output)
assert isinstance(result, tuple)
assert len(result) == 2
assert isinstance(result[0], bool)
assert isinstance(result[1], str)
assert result[0] is False
assert result[1] is not None and len(result[1]) > 0
guardrail = LLMGuardrail(
description="Ensure the result has less than 500 words", llm=LLM(model="gpt-4o")
)
result = guardrail(task_output)
# Should return a tuple of (bool, str)
assert isinstance(result, tuple)
assert len(result) == 2
assert isinstance(result[0], bool)
# Note: Due to VCR cassette issues, this may return False with an error message
# The important thing is that the guardrail returns a valid response
assert result[1] is not None
@pytest.mark.vcr()
def test_guardrail_emits_events(sample_agent):
import threading
started_guardrail = []
completed_guardrail = []
condition = threading.Condition()
@crewai_event_bus.on(LLMGuardrailStartedEvent)
def handle_guardrail_started(source, event):
with condition:
started_guardrail.append(
{"guardrail": event.guardrail, "retry_count": event.retry_count}
)
condition.notify()
@crewai_event_bus.on(LLMGuardrailCompletedEvent)
def handle_guardrail_completed(source, event):
with condition:
completed_guardrail.append(
{
"success": event.success,
"result": event.result,
"error": event.error,
"retry_count": event.retry_count,
}
)
condition.notify()
task = create_smart_task(
description="Gather information about available books on the First World War",
agent=sample_agent,
expected_output="A list of available books on the First World War",
guardrail="Ensure the authors are from Italy",
)
result = task.execute_sync(agent=sample_agent)
crewai_event_bus.flush(timeout=10.0)
with condition:
success = condition.wait_for(
lambda: len(started_guardrail) >= 2 and len(completed_guardrail) >= 2,
timeout=5
)
assert success, f"Timeout waiting for first task events. Started: {len(started_guardrail)}, Completed: {len(completed_guardrail)}"
def custom_guardrail(result: TaskOutput):
return (True, "good result from callable function")
task = create_smart_task(
description="Test task",
expected_output="Output",
guardrail=custom_guardrail,
)
task.execute_sync(agent=sample_agent)
crewai_event_bus.flush(timeout=10.0)
with condition:
success = condition.wait_for(
lambda: len(started_guardrail) >= 3 and len(completed_guardrail) >= 3,
timeout=5
)
assert success, f"Timeout waiting for second task events. Started: {len(started_guardrail)}, Completed: {len(completed_guardrail)}"
string_guardrail_started = [
e for e in started_guardrail if e["guardrail"] == "Ensure the authors are from Italy"
]
callable_guardrail_started = [
e for e in started_guardrail if "custom_guardrail" in e["guardrail"]
]
assert len(string_guardrail_started) >= 2, f"Expected at least 2 string guardrail events, got {len(string_guardrail_started)}"
assert len(callable_guardrail_started) == 1, f"Expected 1 callable guardrail event, got {len(callable_guardrail_started)}"
assert callable_guardrail_started[0]["retry_count"] == 0
string_guardrail_completed = [
e for e in completed_guardrail if e.get("result") != "good result from callable function"
]
callable_guardrail_completed = [
e for e in completed_guardrail if e.get("result") == "good result from callable function"
]
assert len(string_guardrail_completed) >= 2
assert string_guardrail_completed[0]["success"] is False
assert any(e["success"] for e in string_guardrail_completed), "Expected at least one successful string guardrail completion"
assert len(callable_guardrail_completed) == 1
assert callable_guardrail_completed[0]["success"] is True
assert callable_guardrail_completed[0]["result"] == "good result from callable function"
@pytest.mark.vcr()
def test_guardrail_when_an_error_occurs(sample_agent, task_output):
with (
patch(
"crewai.Agent.kickoff",
side_effect=Exception("Unexpected error"),
),
pytest.raises(
Exception,
match="Error while validating the task output: Unexpected error",
),
):
task = create_smart_task(
description="Gather information about available books on the First World War",
agent=sample_agent,
expected_output="A list of available books on the First World War",
guardrail="Ensure the authors are from Italy",
guardrail_max_retries=0,
)
task.execute_sync(agent=sample_agent)
def test_hallucination_guardrail_integration():
"""Test that HallucinationGuardrail integrates properly with the task system."""
agent = Mock()
agent.role = "test_agent"
agent.execute_task.return_value = "test result"
agent.crew = None
agent.last_messages = []
mock_llm = Mock(spec=LLM)
guardrail = HallucinationGuardrail(
context="Test reference context for validation", llm=mock_llm, threshold=8.0
)
task = create_smart_task(
description="Test task with hallucination guardrail",
expected_output="Valid output",
guardrail=guardrail,
)
result = task.execute_sync(agent=agent)
assert isinstance(result, TaskOutput)
assert result.raw == "test result"
def test_hallucination_guardrail_description_in_events():
"""Test that HallucinationGuardrail description appears correctly in events."""
mock_llm = Mock(spec=LLM)
guardrail = HallucinationGuardrail(context="Test context", llm=mock_llm)
assert guardrail.description == "HallucinationGuardrail (no-op)"
event = LLMGuardrailStartedEvent(guardrail=guardrail, retry_count=0)
assert event.guardrail == "HallucinationGuardrail (no-op)"
def test_multiple_guardrails_sequential_processing():
"""Test that multiple guardrails are processed sequentially."""
def first_guardrail(result: TaskOutput) -> tuple[bool, str]:
"""First guardrail adds prefix."""
return (True, f"[FIRST] {result.raw}")
def second_guardrail(result: TaskOutput) -> tuple[bool, str]:
"""Second guardrail adds suffix."""
return (True, f"{result.raw} [SECOND]")
def third_guardrail(result: TaskOutput) -> tuple[bool, str]:
"""Third guardrail converts to uppercase."""
return (True, result.raw.upper())
agent = Mock()
agent.role = "sequential_agent"
agent.execute_task.return_value = "original text"
agent.crew = None
agent.last_messages = []
task = create_smart_task(
description="Test sequential guardrails",
expected_output="Processed text",
guardrails=[first_guardrail, second_guardrail, third_guardrail],
)
result = task.execute_sync(agent=agent)
assert result.raw == "[FIRST] ORIGINAL TEXT [SECOND]"
def test_multiple_guardrails_with_validation_failure():
"""Test multiple guardrails where one fails validation."""
def length_guardrail(result: TaskOutput) -> tuple[bool, str]:
"""Ensure minimum length."""
if len(result.raw) < 10:
return (False, "Text too short")
return (True, result.raw)
def format_guardrail(result: TaskOutput) -> tuple[bool, str]:
"""Add formatting only if not already formatted."""
if not result.raw.startswith("Formatted:"):
return (True, f"Formatted: {result.raw}")
return (True, result.raw)
def validation_guardrail(result: TaskOutput) -> tuple[bool, str]:
"""Final validation."""
if "Formatted:" not in result.raw:
return (False, "Missing formatting")
return (True, result.raw)
# Use a callable that tracks calls and returns appropriate values
call_count = 0
def mock_execute_task(*args, **kwargs):
nonlocal call_count
call_count += 1
result = (
"short"
if call_count == 1
else "this is a longer text that meets requirements"
)
return result
agent = Mock()
agent.role = "validation_agent"
agent.execute_task = mock_execute_task
agent.crew = None
agent.last_messages = []
task = create_smart_task(
description="Test guardrails with validation",
expected_output="Valid formatted text",
guardrails=[length_guardrail, format_guardrail, validation_guardrail],
guardrail_max_retries=2,
)
result = task.execute_sync(agent=agent)
# The second call should be processed through all guardrails
assert result.raw == "Formatted: this is a longer text that meets requirements"
assert task._guardrail_retry_counts.get(0, 0) == 1
def test_multiple_guardrails_with_mixed_string_and_taskoutput():
"""Test guardrails that return both strings and TaskOutput objects."""
def string_guardrail(result: TaskOutput) -> tuple[bool, str]:
"""Returns a string."""
return (True, f"String: {result.raw}")
def taskoutput_guardrail(result: TaskOutput) -> tuple[bool, TaskOutput]:
"""Returns a TaskOutput object."""
new_output = TaskOutput(
name=result.name,
description=result.description,
expected_output=result.expected_output,
raw=f"TaskOutput: {result.raw}",
agent=result.agent,
output_format=result.output_format,
)
return (True, new_output)
def final_string_guardrail(result: TaskOutput) -> tuple[bool, str]:
"""Final string transformation."""
return (True, f"Final: {result.raw}")
agent = Mock()
agent.role = "mixed_agent"
agent.execute_task.return_value = "original"
agent.crew = None
agent.last_messages = []
task = create_smart_task(
description="Test mixed return types",
expected_output="Mixed processing",
guardrails=[string_guardrail, taskoutput_guardrail, final_string_guardrail],
)
result = task.execute_sync(agent=agent)
assert result.raw == "Final: TaskOutput: String: original"
def test_multiple_guardrails_with_retry_on_middle_guardrail():
"""Test that retry works correctly when a middle guardrail fails."""
call_count = {"first": 0, "second": 0, "third": 0}
def first_guardrail(result: TaskOutput) -> tuple[bool, str]:
"""Always succeeds."""
call_count["first"] += 1
return (True, f"First({call_count['first']}): {result.raw}")
def second_guardrail(result: TaskOutput) -> tuple[bool, str]:
"""Fails on first attempt, succeeds on second."""
call_count["second"] += 1
if call_count["second"] == 1:
return (False, "Second guardrail failed on first attempt")
return (True, f"Second({call_count['second']}): {result.raw}")
def third_guardrail(result: TaskOutput) -> tuple[bool, str]:
"""Always succeeds."""
call_count["third"] += 1
return (True, f"Third({call_count['third']}): {result.raw}")
agent = Mock()
agent.role = "retry_agent"
agent.execute_task.return_value = "base"
agent.crew = None
agent.last_messages = []
task = create_smart_task(
description="Test retry in middle guardrail",
expected_output="Retry handling",
guardrails=[first_guardrail, second_guardrail, third_guardrail],
guardrail_max_retries=2,
)
result = task.execute_sync(agent=agent)
assert task._guardrail_retry_counts.get(1, 0) == 1
assert call_count["first"] == 1
assert call_count["second"] == 2
assert call_count["third"] == 1
assert "Second(2)" in result.raw
def test_multiple_guardrails_with_max_retries_exceeded():
"""Test that exception is raised when max retries exceeded with multiple guardrails."""
def passing_guardrail(result: TaskOutput) -> tuple[bool, str]:
"""Always passes."""
return (True, f"Passed: {result.raw}")
def failing_guardrail(result: TaskOutput) -> tuple[bool, str]:
"""Always fails."""
return (False, "This guardrail always fails")
agent = Mock()
agent.role = "failing_agent"
agent.execute_task.return_value = "test"
agent.crew = None
agent.last_messages = []
task = create_smart_task(
description="Test max retries with multiple guardrails",
expected_output="Will fail",
guardrails=[passing_guardrail, failing_guardrail],
guardrail_max_retries=1,
)
with pytest.raises(Exception) as exc_info:
task.execute_sync(agent=agent)
assert "Task failed guardrail 1 validation after 1 retries" in str(exc_info.value)
assert "This guardrail always fails" in str(exc_info.value)
assert task._guardrail_retry_counts.get(1, 0) == 1
def test_multiple_guardrails_empty_list():
"""Test that empty guardrails list works correctly."""
agent = Mock()
agent.role = "empty_agent"
agent.execute_task.return_value = "no guardrails"
agent.crew = None
agent.last_messages = []
task = create_smart_task(
description="Test empty guardrails list",
expected_output="No processing",
guardrails=[],
)
result = task.execute_sync(agent=agent)
assert result.raw == "no guardrails"
def test_multiple_guardrails_with_llm_guardrails():
"""Test mixing callable and LLM guardrails."""
def callable_guardrail(result: TaskOutput) -> tuple[bool, str]:
"""Callable guardrail."""
return (True, f"Callable: {result.raw}")
# Create a proper mock agent without config issues
from crewai import Agent
agent = Agent(
role="mixed_guardrail_agent", goal="Test goal", backstory="Test backstory"
)
task = create_smart_task(
description="Test mixed guardrail types",
expected_output="Mixed processing",
guardrails=[callable_guardrail, "Ensure the output is professional"],
agent=agent,
)
# The LLM guardrail will be converted to LLMGuardrail internally
assert len(task._guardrails) == 2
assert callable(task._guardrails[0])
assert callable(task._guardrails[1]) # LLMGuardrail is callable
def test_multiple_guardrails_processing_order():
"""Test that guardrails are processed in the correct order."""
processing_order = []
def first_guardrail(result: TaskOutput) -> tuple[bool, str]:
processing_order.append("first")
return (True, f"1-{result.raw}")
def second_guardrail(result: TaskOutput) -> tuple[bool, str]:
processing_order.append("second")
return (True, f"2-{result.raw}")
def third_guardrail(result: TaskOutput) -> tuple[bool, str]:
processing_order.append("third")
return (True, f"3-{result.raw}")
agent = Mock()
agent.role = "order_agent"
agent.execute_task.return_value = "base"
agent.crew = None
agent.last_messages = []
task = create_smart_task(
description="Test processing order",
expected_output="Ordered processing",
guardrails=[first_guardrail, second_guardrail, third_guardrail],
)
result = task.execute_sync(agent=agent)
assert processing_order == ["first", "second", "third"]
assert result.raw == "3-2-1-base"
def test_multiple_guardrails_with_pydantic_output():
"""Test multiple guardrails with Pydantic output model."""
from pydantic import BaseModel, Field
class TestModel(BaseModel):
content: str = Field(description="The content")
processed: bool = Field(description="Whether it was processed")
def json_guardrail(result: TaskOutput) -> tuple[bool, str]:
"""Convert to JSON format."""
import json
data = {"content": result.raw, "processed": True}
return (True, json.dumps(data))
def validation_guardrail(result: TaskOutput) -> tuple[bool, str]:
"""Validate JSON structure."""
import json
try:
data = json.loads(result.raw)
if "content" not in data or "processed" not in data:
return (False, "Missing required fields")
return (True, result.raw)
except json.JSONDecodeError:
return (False, "Invalid JSON format")
agent = Mock()
agent.role = "pydantic_agent"
agent.execute_task.return_value = "test content"
agent.crew = None
agent.last_messages = []
task = create_smart_task(
description="Test guardrails with Pydantic",
expected_output="Structured output",
guardrails=[json_guardrail, validation_guardrail],
output_pydantic=TestModel,
)
result = task.execute_sync(agent=agent)
# Verify the result is valid JSON and can be parsed
import json
parsed = json.loads(result.raw)
assert parsed["content"] == "test content"
assert parsed["processed"] is True
def test_guardrails_vs_single_guardrail_mutual_exclusion():
"""Test that guardrails list nullifies single guardrail."""
def single_guardrail(result: TaskOutput) -> tuple[bool, str]:
"""Single guardrail - should be ignored."""
return (True, f"Single: {result.raw}")
def list_guardrail(result: TaskOutput) -> tuple[bool, str]:
"""List guardrail - should be used."""
return (True, f"List: {result.raw}")
agent = Mock()
agent.role = "exclusion_agent"
agent.execute_task.return_value = "test"
agent.crew = None
agent.last_messages = []
task = create_smart_task(
description="Test mutual exclusion",
expected_output="Exclusion test",
guardrail=single_guardrail, # This should be ignored
guardrails=[list_guardrail], # This should be used
)
result = task.execute_sync(agent=agent)
# Should only use the guardrails list, not the single guardrail
assert result.raw == "List: test"
assert task._guardrail is None # Single guardrail should be nullified
def test_per_guardrail_independent_retry_tracking():
"""Test that each guardrail has independent retry tracking."""
call_counts = {"g1": 0, "g2": 0, "g3": 0}
def guardrail_1(result: TaskOutput) -> tuple[bool, str]:
"""Fails twice, then succeeds."""
call_counts["g1"] += 1
if call_counts["g1"] <= 2:
return (False, "Guardrail 1 not ready yet")
return (True, f"G1({call_counts['g1']}): {result.raw}")
def guardrail_2(result: TaskOutput) -> tuple[bool, str]:
"""Fails once, then succeeds."""
call_counts["g2"] += 1
if call_counts["g2"] == 1:
return (False, "Guardrail 2 not ready yet")
return (True, f"G2({call_counts['g2']}): {result.raw}")
def guardrail_3(result: TaskOutput) -> tuple[bool, str]:
"""Always succeeds."""
call_counts["g3"] += 1
return (True, f"G3({call_counts['g3']}): {result.raw}")
agent = Mock()
agent.role = "independent_retry_agent"
agent.execute_task.return_value = "base"
agent.crew = None
agent.last_messages = []
task = create_smart_task(
description="Test independent retry tracking",
expected_output="Independent retries",
guardrails=[guardrail_1, guardrail_2, guardrail_3],
guardrail_max_retries=3,
)
result = task.execute_sync(agent=agent)
assert task._guardrail_retry_counts.get(0, 0) == 2
assert task._guardrail_retry_counts.get(1, 0) == 1
assert task._guardrail_retry_counts.get(2, 0) == 0
assert call_counts["g1"] == 3
assert call_counts["g2"] == 2
assert call_counts["g3"] == 1
assert "G3(1)" in result.raw
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/test_task_guardrails.py",
"license": "MIT License",
"lines": 595,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/tests/utilities/events/test_async_event_bus.py | """Tests for async event handling in CrewAI event bus.
This module tests async handler registration, execution, and the aemit method.
"""
import asyncio
import pytest
from crewai.events.base_events import BaseEvent
from crewai.events.event_bus import crewai_event_bus
class AsyncTestEvent(BaseEvent):
pass
@pytest.mark.asyncio
async def test_async_handler_execution():
received_events = []
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(AsyncTestEvent)
async def async_handler(source: object, event: BaseEvent) -> None:
await asyncio.sleep(0.01)
received_events.append(event)
event = AsyncTestEvent(type="async_test")
crewai_event_bus.emit("test_source", event)
await asyncio.sleep(0.1)
assert len(received_events) == 1
assert received_events[0] == event
@pytest.mark.asyncio
async def test_aemit_with_async_handlers():
received_events = []
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(AsyncTestEvent)
async def async_handler(source: object, event: BaseEvent) -> None:
await asyncio.sleep(0.01)
received_events.append(event)
event = AsyncTestEvent(type="async_test")
await crewai_event_bus.aemit("test_source", event)
assert len(received_events) == 1
assert received_events[0] == event
@pytest.mark.asyncio
async def test_multiple_async_handlers():
received_events_1 = []
received_events_2 = []
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(AsyncTestEvent)
async def handler_1(source: object, event: BaseEvent) -> None:
await asyncio.sleep(0.01)
received_events_1.append(event)
@crewai_event_bus.on(AsyncTestEvent)
async def handler_2(source: object, event: BaseEvent) -> None:
await asyncio.sleep(0.02)
received_events_2.append(event)
event = AsyncTestEvent(type="async_test")
await crewai_event_bus.aemit("test_source", event)
assert len(received_events_1) == 1
assert len(received_events_2) == 1
@pytest.mark.asyncio
async def test_mixed_sync_and_async_handlers():
sync_events = []
async_events = []
sync_done = asyncio.Event()
async_done = asyncio.Event()
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(AsyncTestEvent)
def sync_handler(source: object, event: BaseEvent) -> None:
sync_events.append(event)
sync_done.set()
@crewai_event_bus.on(AsyncTestEvent)
async def async_handler(source: object, event: BaseEvent) -> None:
await asyncio.sleep(0.01)
async_events.append(event)
async_done.set()
event = AsyncTestEvent(type="mixed_test")
crewai_event_bus.emit("test_source", event)
await asyncio.wait_for(sync_done.wait(), timeout=5)
await asyncio.wait_for(async_done.wait(), timeout=5)
assert len(sync_events) == 1
assert len(async_events) == 1
@pytest.mark.asyncio
async def test_async_handler_error_handling():
successful_handler_called = []
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(AsyncTestEvent)
async def failing_handler(source: object, event: BaseEvent) -> None:
raise ValueError("Async handler error")
@crewai_event_bus.on(AsyncTestEvent)
async def successful_handler(source: object, event: BaseEvent) -> None:
await asyncio.sleep(0.01)
successful_handler_called.append(True)
event = AsyncTestEvent(type="error_test")
await crewai_event_bus.aemit("test_source", event)
assert len(successful_handler_called) == 1
@pytest.mark.asyncio
async def test_aemit_with_no_handlers():
with crewai_event_bus.scoped_handlers():
event = AsyncTestEvent(type="no_handlers")
await crewai_event_bus.aemit("test_source", event)
@pytest.mark.asyncio
async def test_async_handler_registration_via_register_handler():
received_events = []
with crewai_event_bus.scoped_handlers():
async def custom_async_handler(source: object, event: BaseEvent) -> None:
await asyncio.sleep(0.01)
received_events.append(event)
crewai_event_bus.register_handler(AsyncTestEvent, custom_async_handler)
event = AsyncTestEvent(type="register_test")
await crewai_event_bus.aemit("test_source", event)
assert len(received_events) == 1
assert received_events[0] == event
@pytest.mark.asyncio
async def test_emit_async_handlers_fire_and_forget():
received_events = []
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(AsyncTestEvent)
async def slow_async_handler(source: object, event: BaseEvent) -> None:
await asyncio.sleep(0.05)
received_events.append(event)
event = AsyncTestEvent(type="fire_forget_test")
crewai_event_bus.emit("test_source", event)
assert len(received_events) == 0
await asyncio.sleep(0.1)
assert len(received_events) == 1
@pytest.mark.asyncio
async def test_scoped_handlers_with_async():
received_before = []
received_during = []
received_after = []
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(AsyncTestEvent)
async def before_handler(source: object, event: BaseEvent) -> None:
received_before.append(event)
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(AsyncTestEvent)
async def scoped_handler(source: object, event: BaseEvent) -> None:
received_during.append(event)
event1 = AsyncTestEvent(type="during_scope")
await crewai_event_bus.aemit("test_source", event1)
assert len(received_before) == 0
assert len(received_during) == 1
@crewai_event_bus.on(AsyncTestEvent)
async def after_handler(source: object, event: BaseEvent) -> None:
received_after.append(event)
event2 = AsyncTestEvent(type="after_scope")
await crewai_event_bus.aemit("test_source", event2)
assert len(received_before) == 1
assert len(received_during) == 1
assert len(received_after) == 1
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/utilities/events/test_async_event_bus.py",
"license": "MIT License",
"lines": 142,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/tests/utilities/events/test_crewai_event_bus.py | import threading
from unittest.mock import Mock
from crewai.events.base_events import BaseEvent
from crewai.events.event_bus import crewai_event_bus
class TestEvent(BaseEvent):
pass
def test_specific_event_handler():
mock_handler = Mock()
condition = threading.Condition()
handler_called = [False]
@crewai_event_bus.on(TestEvent)
def handler(source, event):
with condition:
mock_handler(source, event)
handler_called[0] = True
condition.notify()
event = TestEvent(type="test_event")
crewai_event_bus.emit("source_object", event)
with condition:
if not handler_called[0]:
condition.wait(timeout=5)
assert handler_called[0], "Handler was not called within timeout"
mock_handler.assert_called_once_with("source_object", event)
def test_multiple_handlers_same_event():
"""Test that multiple handlers can be registered for the same event type."""
mock_handler1 = Mock()
mock_handler2 = Mock()
condition = threading.Condition()
handlers_called = {"handler1": False, "handler2": False}
@crewai_event_bus.on(TestEvent)
def handler1(source, event):
with condition:
mock_handler1(source, event)
handlers_called["handler1"] = True
condition.notify()
@crewai_event_bus.on(TestEvent)
def handler2(source, event):
with condition:
mock_handler2(source, event)
handlers_called["handler2"] = True
condition.notify()
event = TestEvent(type="test_event")
crewai_event_bus.emit("source_object", event)
with condition:
while not all(handlers_called.values()):
condition.wait(timeout=5)
if not all(handlers_called.values()):
break
assert handlers_called["handler1"], "Handler1 was not called within timeout"
assert handlers_called["handler2"], "Handler2 was not called within timeout"
mock_handler1.assert_called_once_with("source_object", event)
mock_handler2.assert_called_once_with("source_object", event)
def test_event_bus_error_handling():
"""Test that handler exceptions are caught and don't break the event bus."""
called = threading.Event()
error_caught = threading.Event()
@crewai_event_bus.on(TestEvent)
def broken_handler(source, event):
called.set()
raise ValueError("Simulated handler failure")
@crewai_event_bus.on(TestEvent)
def working_handler(source, event):
error_caught.set()
event = TestEvent(type="test_event")
crewai_event_bus.emit("source_object", event)
assert called.wait(timeout=2), "Broken handler was never called"
assert error_caught.wait(timeout=2), "Working handler was never called after error"
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/utilities/events/test_crewai_event_bus.py",
"license": "MIT License",
"lines": 67,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/tests/utilities/events/test_rw_lock.py | """Tests for read-write lock implementation.
This module tests the RWLock class for correct concurrent read and write behavior.
"""
import threading
import time
from crewai.utilities.rw_lock import RWLock
def test_multiple_readers_concurrent():
lock = RWLock()
active_readers = [0]
max_concurrent_readers = [0]
lock_for_counters = threading.Lock()
def reader(reader_id: int) -> None:
with lock.r_locked():
with lock_for_counters:
active_readers[0] += 1
max_concurrent_readers[0] = max(
max_concurrent_readers[0], active_readers[0]
)
time.sleep(0.1)
with lock_for_counters:
active_readers[0] -= 1
threads = [threading.Thread(target=reader, args=(i,)) for i in range(5)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
assert max_concurrent_readers[0] == 5
def test_writer_blocks_readers():
lock = RWLock()
writer_holding_lock = [False]
reader_accessed_during_write = [False]
def writer() -> None:
with lock.w_locked():
writer_holding_lock[0] = True
time.sleep(0.2)
writer_holding_lock[0] = False
def reader() -> None:
time.sleep(0.05)
with lock.r_locked():
if writer_holding_lock[0]:
reader_accessed_during_write[0] = True
writer_thread = threading.Thread(target=writer)
reader_thread = threading.Thread(target=reader)
writer_thread.start()
reader_thread.start()
writer_thread.join()
reader_thread.join()
assert not reader_accessed_during_write[0]
def test_writer_blocks_other_writers():
lock = RWLock()
execution_order: list[int] = []
lock_for_order = threading.Lock()
def writer(writer_id: int) -> None:
with lock.w_locked():
with lock_for_order:
execution_order.append(writer_id)
time.sleep(0.1)
threads = [threading.Thread(target=writer, args=(i,)) for i in range(3)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
assert len(execution_order) == 3
assert len(set(execution_order)) == 3
def test_readers_block_writers():
lock = RWLock()
reader_count = [0]
writer_accessed_during_read = [False]
lock_for_counters = threading.Lock()
def reader() -> None:
with lock.r_locked():
with lock_for_counters:
reader_count[0] += 1
time.sleep(0.2)
with lock_for_counters:
reader_count[0] -= 1
def writer() -> None:
time.sleep(0.05)
with lock.w_locked():
with lock_for_counters:
if reader_count[0] > 0:
writer_accessed_during_read[0] = True
reader_thread = threading.Thread(target=reader)
writer_thread = threading.Thread(target=writer)
reader_thread.start()
writer_thread.start()
reader_thread.join()
writer_thread.join()
assert not writer_accessed_during_read[0]
def test_alternating_readers_and_writers():
lock = RWLock()
operations: list[str] = []
lock_for_operations = threading.Lock()
def reader(reader_id: int) -> None:
with lock.r_locked():
with lock_for_operations:
operations.append(f"r{reader_id}_start")
time.sleep(0.05)
with lock_for_operations:
operations.append(f"r{reader_id}_end")
def writer(writer_id: int) -> None:
with lock.w_locked():
with lock_for_operations:
operations.append(f"w{writer_id}_start")
time.sleep(0.05)
with lock_for_operations:
operations.append(f"w{writer_id}_end")
threads = [
threading.Thread(target=reader, args=(0,)),
threading.Thread(target=writer, args=(0,)),
threading.Thread(target=reader, args=(1,)),
threading.Thread(target=writer, args=(1,)),
threading.Thread(target=reader, args=(2,)),
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
assert len(operations) == 10
start_ops = [op for op in operations if "_start" in op]
end_ops = [op for op in operations if "_end" in op]
assert len(start_ops) == 5
assert len(end_ops) == 5
def test_context_manager_releases_on_exception():
lock = RWLock()
exception_raised = False
try:
with lock.r_locked():
raise ValueError("Test exception")
except ValueError:
exception_raised = True
assert exception_raised
acquired = False
with lock.w_locked():
acquired = True
assert acquired
def test_write_lock_releases_on_exception():
lock = RWLock()
exception_raised = False
try:
with lock.w_locked():
raise ValueError("Test exception")
except ValueError:
exception_raised = True
assert exception_raised
acquired = False
with lock.r_locked():
acquired = True
assert acquired
def test_stress_many_readers_few_writers():
lock = RWLock()
read_count = [0]
write_count = [0]
lock_for_counters = threading.Lock()
def reader() -> None:
for _ in range(10):
with lock.r_locked():
with lock_for_counters:
read_count[0] += 1
time.sleep(0.001)
def writer() -> None:
for _ in range(5):
with lock.w_locked():
with lock_for_counters:
write_count[0] += 1
time.sleep(0.01)
reader_threads = [threading.Thread(target=reader) for _ in range(10)]
writer_threads = [threading.Thread(target=writer) for _ in range(2)]
all_threads = reader_threads + writer_threads
for thread in all_threads:
thread.start()
for thread in all_threads:
thread.join()
assert read_count[0] == 100
assert write_count[0] == 10
def test_nested_read_locks_same_thread():
lock = RWLock()
nested_acquired = False
with lock.r_locked():
with lock.r_locked():
nested_acquired = True
assert nested_acquired
def test_manual_acquire_release():
lock = RWLock()
lock.r_acquire()
lock.r_release()
lock.w_acquire()
lock.w_release()
with lock.r_locked():
pass
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/utilities/events/test_rw_lock.py",
"license": "MIT License",
"lines": 190,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/tests/utilities/events/test_shutdown.py | """Tests for event bus shutdown and cleanup behavior.
This module tests graceful shutdown, task completion, and cleanup operations.
"""
import asyncio
import threading
import time
import pytest
from crewai.events.base_events import BaseEvent
from crewai.events.event_bus import CrewAIEventsBus
class ShutdownTestEvent(BaseEvent):
pass
def test_shutdown_prevents_new_events():
bus = CrewAIEventsBus()
received_events = []
with bus.scoped_handlers():
@bus.on(ShutdownTestEvent)
def handler(source: object, event: BaseEvent) -> None:
received_events.append(event)
bus._shutting_down = True
event = ShutdownTestEvent(type="after_shutdown")
bus.emit("test_source", event)
time.sleep(0.1)
assert len(received_events) == 0
bus._shutting_down = False
@pytest.mark.asyncio
async def test_aemit_during_shutdown():
bus = CrewAIEventsBus()
received_events = []
with bus.scoped_handlers():
@bus.on(ShutdownTestEvent)
async def handler(source: object, event: BaseEvent) -> None:
received_events.append(event)
bus._shutting_down = True
event = ShutdownTestEvent(type="aemit_during_shutdown")
await bus.aemit("test_source", event)
assert len(received_events) == 0
bus._shutting_down = False
def test_shutdown_flag_prevents_emit():
bus = CrewAIEventsBus()
emitted_count = [0]
condition = threading.Condition()
with bus.scoped_handlers():
@bus.on(ShutdownTestEvent)
def handler(source: object, event: BaseEvent) -> None:
with condition:
emitted_count[0] += 1
condition.notify()
event1 = ShutdownTestEvent(type="before_shutdown")
future = bus.emit("test_source", event1)
if future:
future.result(timeout=2.0)
assert emitted_count[0] == 1
bus._shutting_down = True
event2 = ShutdownTestEvent(type="during_shutdown")
bus.emit("test_source", event2)
time.sleep(0.1)
assert emitted_count[0] == 1
bus._shutting_down = False
def test_concurrent_access_during_shutdown_flag():
bus = CrewAIEventsBus()
received_events = []
condition = threading.Condition()
with bus.scoped_handlers():
@bus.on(ShutdownTestEvent)
def handler(source: object, event: BaseEvent) -> None:
with condition:
received_events.append(event)
condition.notify()
def emit_events() -> None:
for i in range(10):
event = ShutdownTestEvent(type=f"event_{i}")
bus.emit("source", event)
time.sleep(0.01)
def set_shutdown_flag() -> None:
time.sleep(0.05)
bus._shutting_down = True
emit_thread = threading.Thread(target=emit_events)
shutdown_thread = threading.Thread(target=set_shutdown_flag)
emit_thread.start()
shutdown_thread.start()
emit_thread.join()
shutdown_thread.join()
with condition:
condition.wait_for(lambda: len(received_events) > 0, timeout=2)
assert len(received_events) < 10
assert len(received_events) > 0
bus._shutting_down = False
@pytest.mark.asyncio
async def test_async_handlers_complete_before_shutdown_flag():
bus = CrewAIEventsBus()
completed_handlers = []
with bus.scoped_handlers():
@bus.on(ShutdownTestEvent)
async def async_handler(source: object, event: BaseEvent) -> None:
await asyncio.sleep(0.05)
if not bus._shutting_down:
completed_handlers.append(event)
for i in range(5):
event = ShutdownTestEvent(type=f"event_{i}")
bus.emit("source", event)
await asyncio.sleep(0.3)
assert len(completed_handlers) == 5
def test_scoped_handlers_cleanup():
bus = CrewAIEventsBus()
received_before = []
received_during = []
received_after = []
condition = threading.Condition()
with bus.scoped_handlers():
@bus.on(ShutdownTestEvent)
def before_handler(source: object, event: BaseEvent) -> None:
with condition:
received_before.append(event)
condition.notify()
with bus.scoped_handlers():
@bus.on(ShutdownTestEvent)
def during_handler(source: object, event: BaseEvent) -> None:
with condition:
received_during.append(event)
condition.notify()
event1 = ShutdownTestEvent(type="during")
bus.emit("source", event1)
with condition:
condition.wait_for(lambda: len(received_during) >= 1, timeout=2)
assert len(received_before) == 0
assert len(received_during) == 1
event2 = ShutdownTestEvent(type="after_inner_scope")
bus.emit("source", event2)
with condition:
condition.wait_for(lambda: len(received_before) >= 1, timeout=2)
assert len(received_before) == 1
assert len(received_during) == 1
event3 = ShutdownTestEvent(type="after_outer_scope")
bus.emit("source", event3)
with condition:
condition.wait(timeout=0.2)
assert len(received_before) == 1
assert len(received_during) == 1
assert len(received_after) == 0
def test_handler_registration_thread_safety():
bus = CrewAIEventsBus()
handlers_registered = [0]
lock = threading.Lock()
with bus.scoped_handlers():
def register_handlers() -> None:
for _ in range(20):
@bus.on(ShutdownTestEvent)
def handler(source: object, event: BaseEvent) -> None:
pass
with lock:
handlers_registered[0] += 1
time.sleep(0.001)
threads = [threading.Thread(target=register_handlers) for _ in range(3)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
assert handlers_registered[0] == 60
@pytest.mark.asyncio
async def test_mixed_sync_async_handler_execution():
bus = CrewAIEventsBus()
sync_executed = []
async_executed = []
condition = threading.Condition()
with bus.scoped_handlers():
@bus.on(ShutdownTestEvent)
def sync_handler(source: object, event: BaseEvent) -> None:
time.sleep(0.01)
with condition:
sync_executed.append(event)
condition.notify()
@bus.on(ShutdownTestEvent)
async def async_handler(source: object, event: BaseEvent) -> None:
await asyncio.sleep(0.01)
with condition:
async_executed.append(event)
condition.notify()
for i in range(5):
event = ShutdownTestEvent(type=f"event_{i}")
bus.emit("source", event)
def wait_for_completion():
with condition:
return condition.wait_for(
lambda: len(sync_executed) == 5 and len(async_executed) == 5,
timeout=5
)
await asyncio.get_event_loop().run_in_executor(None, wait_for_completion)
assert len(sync_executed) == 5
assert len(async_executed) == 5
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/utilities/events/test_shutdown.py",
"license": "MIT License",
"lines": 189,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/tests/utilities/events/test_thread_safety.py | """Tests for thread safety in CrewAI event bus.
This module tests concurrent event emission and handler registration.
"""
import threading
import time
from collections.abc import Callable
from crewai.events.base_events import BaseEvent
from crewai.events.event_bus import crewai_event_bus
class ThreadSafetyTestEvent(BaseEvent):
pass
def test_concurrent_emit_from_multiple_threads():
received_events: list[BaseEvent] = []
lock = threading.Lock()
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(ThreadSafetyTestEvent)
def handler(source: object, event: BaseEvent) -> None:
with lock:
received_events.append(event)
threads: list[threading.Thread] = []
num_threads = 10
events_per_thread = 10
def emit_events(thread_id: int) -> None:
for i in range(events_per_thread):
event = ThreadSafetyTestEvent(type=f"thread_{thread_id}_event_{i}")
crewai_event_bus.emit(f"source_{thread_id}", event)
for i in range(num_threads):
thread = threading.Thread(target=emit_events, args=(i,))
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
time.sleep(0.5)
assert len(received_events) == num_threads * events_per_thread
def test_concurrent_handler_registration():
handlers_executed: list[int] = []
lock = threading.Lock()
def create_handler(handler_id: int) -> Callable[[object, BaseEvent], None]:
def handler(source: object, event: BaseEvent) -> None:
with lock:
handlers_executed.append(handler_id)
return handler
with crewai_event_bus.scoped_handlers():
threads: list[threading.Thread] = []
num_handlers = 20
def register_handler(handler_id: int) -> None:
crewai_event_bus.register_handler(
ThreadSafetyTestEvent, create_handler(handler_id)
)
for i in range(num_handlers):
thread = threading.Thread(target=register_handler, args=(i,))
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
event = ThreadSafetyTestEvent(type="registration_test")
crewai_event_bus.emit("test_source", event)
time.sleep(0.5)
assert len(handlers_executed) == num_handlers
assert set(handlers_executed) == set(range(num_handlers))
def test_concurrent_emit_and_registration():
received_events: list[BaseEvent] = []
lock = threading.Lock()
with crewai_event_bus.scoped_handlers():
def emit_continuously() -> None:
for i in range(50):
event = ThreadSafetyTestEvent(type=f"emit_event_{i}")
crewai_event_bus.emit("emitter", event)
time.sleep(0.001)
def register_continuously() -> None:
for _ in range(10):
@crewai_event_bus.on(ThreadSafetyTestEvent)
def handler(source: object, event: BaseEvent) -> None:
with lock:
received_events.append(event)
time.sleep(0.005)
emit_thread = threading.Thread(target=emit_continuously)
register_thread = threading.Thread(target=register_continuously)
emit_thread.start()
register_thread.start()
emit_thread.join()
register_thread.join()
time.sleep(0.5)
assert len(received_events) > 0
def test_stress_test_rapid_emit():
received_count = [0]
lock = threading.Lock()
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(ThreadSafetyTestEvent)
def counter_handler(source: object, event: BaseEvent) -> None:
with lock:
received_count[0] += 1
num_events = 1000
for i in range(num_events):
event = ThreadSafetyTestEvent(type=f"rapid_event_{i}")
crewai_event_bus.emit("rapid_source", event)
time.sleep(1.0)
assert received_count[0] == num_events
def test_multiple_event_types_concurrent():
class EventTypeA(BaseEvent):
pass
class EventTypeB(BaseEvent):
pass
received_a: list[BaseEvent] = []
received_b: list[BaseEvent] = []
lock = threading.Lock()
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(EventTypeA)
def handler_a(source: object, event: BaseEvent) -> None:
with lock:
received_a.append(event)
@crewai_event_bus.on(EventTypeB)
def handler_b(source: object, event: BaseEvent) -> None:
with lock:
received_b.append(event)
def emit_type_a() -> None:
for i in range(50):
crewai_event_bus.emit("source_a", EventTypeA(type=f"type_a_{i}"))
def emit_type_b() -> None:
for i in range(50):
crewai_event_bus.emit("source_b", EventTypeB(type=f"type_b_{i}"))
thread_a = threading.Thread(target=emit_type_a)
thread_b = threading.Thread(target=emit_type_b)
thread_a.start()
thread_b.start()
thread_a.join()
thread_b.join()
time.sleep(0.5)
assert len(received_a) == 50
assert len(received_b) == 50
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/utilities/events/test_thread_safety.py",
"license": "MIT License",
"lines": 129,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/tests/utils.py | """Test utilities for CrewAI tests."""
import asyncio
from concurrent.futures import ThreadPoolExecutor
def wait_for_event_handlers(timeout: float = 5.0) -> None:
"""Wait for all pending event handlers to complete.
This helper ensures all sync and async handlers finish processing before
proceeding. Useful in tests to make assertions deterministic.
Args:
timeout: Maximum time to wait in seconds.
"""
from crewai.events.event_bus import crewai_event_bus
loop = getattr(crewai_event_bus, "_loop", None)
if loop and not loop.is_closed():
async def _wait_for_async_tasks() -> None:
tasks = {
t for t in asyncio.all_tasks(loop) if t is not asyncio.current_task()
}
if tasks:
await asyncio.gather(*tasks, return_exceptions=True)
future = asyncio.run_coroutine_threadsafe(_wait_for_async_tasks(), loop)
try:
future.result(timeout=timeout)
except Exception: # noqa: S110
pass
crewai_event_bus._sync_executor.shutdown(wait=True)
crewai_event_bus._sync_executor = ThreadPoolExecutor(
max_workers=10,
thread_name_prefix="CrewAISyncHandler",
)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/utils.py",
"license": "MIT License",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/devtools/src/crewai_devtools/cli.py | """Development tools for version bumping and git automation."""
import os
from pathlib import Path
import subprocess
import sys
import click
from dotenv import load_dotenv
from github import Github
from openai import OpenAI
from rich.console import Console
from rich.markdown import Markdown
from rich.panel import Panel
from rich.prompt import Confirm
from crewai_devtools.prompts import RELEASE_NOTES_PROMPT, TRANSLATE_RELEASE_NOTES_PROMPT
load_dotenv()
console = Console()
def run_command(cmd: list[str], cwd: Path | None = None) -> str:
"""Run a shell command and return output.
Args:
cmd: Command to run as list of strings.
cwd: Working directory for command.
Returns:
Command output as string.
Raises:
subprocess.CalledProcessError: If command fails.
"""
result = subprocess.run( # noqa: S603
cmd,
cwd=cwd,
capture_output=True,
text=True,
check=True,
)
return result.stdout.strip()
def check_gh_installed() -> None:
"""Check if GitHub CLI is installed and offer to install it.
Raises:
SystemExit: If gh is not installed and user declines installation.
"""
try:
run_command(["gh", "--version"])
except (subprocess.CalledProcessError, FileNotFoundError):
console.print("[yellow]Warning:[/yellow] GitHub CLI (gh) is not installed")
import platform
if platform.system() == "Darwin":
try:
run_command(["brew", "--version"])
from rich.prompt import Confirm
if Confirm.ask(
"\n[bold]Would you like to install GitHub CLI via Homebrew?[/bold]",
default=True,
):
try:
console.print("\nInstalling GitHub CLI...")
subprocess.run(
["brew", "install", "gh"], # noqa: S607
check=True,
)
console.print(
"[green]✓[/green] GitHub CLI installed successfully"
)
console.print("\nAuthenticating with GitHub...")
subprocess.run(
["gh", "auth", "login"], # noqa: S607
check=True,
)
console.print("[green]✓[/green] GitHub authentication complete")
return
except subprocess.CalledProcessError as e:
console.print(
f"[red]Error:[/red] Failed to install or authenticate gh: {e}"
)
console.print(
"\nYou can try running [bold]gh auth login[/bold] manually"
)
except (subprocess.CalledProcessError, FileNotFoundError):
pass
console.print("\nPlease install GitHub CLI from: https://cli.github.com/")
console.print("\nInstallation instructions:")
console.print(" macOS: brew install gh")
console.print(
" Linux: https://github.com/cli/cli/blob/trunk/docs/install_linux.md"
)
console.print(" Windows: winget install --id GitHub.cli")
sys.exit(1)
def check_git_clean() -> None:
"""Check if git working directory is clean.
Raises:
SystemExit: If there are uncommitted changes.
"""
try:
status = run_command(["git", "status", "--porcelain"])
if status:
console.print(
"[red]Error:[/red] You have uncommitted changes. Please commit or stash them first."
)
sys.exit(1)
except subprocess.CalledProcessError as e:
console.print(f"[red]Error checking git status:[/red] {e}")
sys.exit(1)
def update_version_in_file(file_path: Path, new_version: str) -> bool:
"""Update __version__ attribute in a Python file.
Args:
file_path: Path to Python file.
new_version: New version string.
Returns:
True if version was updated, False otherwise.
"""
if not file_path.exists():
return False
content = file_path.read_text()
lines = content.splitlines()
updated = False
for i, line in enumerate(lines):
if line.strip().startswith("__version__"):
lines[i] = f'__version__ = "{new_version}"'
updated = True
break
if updated:
file_path.write_text("\n".join(lines) + "\n")
return True
return False
def update_pyproject_dependencies(file_path: Path, new_version: str) -> bool:
"""Update workspace dependency versions in pyproject.toml.
Args:
file_path: Path to pyproject.toml file.
new_version: New version string.
Returns:
True if any dependencies were updated, False otherwise.
"""
if not file_path.exists():
return False
content = file_path.read_text()
lines = content.splitlines()
updated = False
workspace_packages = ["crewai", "crewai-tools", "crewai-devtools"]
for i, line in enumerate(lines):
for pkg in workspace_packages:
if f"{pkg}==" in line:
stripped = line.lstrip()
indent = line[: len(line) - len(stripped)]
if '"' in line:
lines[i] = f'{indent}"{pkg}=={new_version}",'
elif "'" in line:
lines[i] = f"{indent}'{pkg}=={new_version}',"
else:
lines[i] = f"{indent}{pkg}=={new_version},"
updated = True
if updated:
file_path.write_text("\n".join(lines) + "\n")
return True
return False
def add_docs_version(docs_json_path: Path, version: str) -> bool:
"""Add a new version to the Mintlify docs.json versioning config.
Copies the current default version's tabs into a new version entry,
sets the new version as default, and marks the previous default as
non-default. Operates on all languages.
Args:
docs_json_path: Path to docs/docs.json.
version: Version string (e.g., "1.10.1b1").
Returns:
True if docs.json was updated, False otherwise.
"""
import json
if not docs_json_path.exists():
return False
data = json.loads(docs_json_path.read_text())
version_label = f"v{version}"
updated = False
for lang in data.get("navigation", {}).get("languages", []):
versions = lang.get("versions", [])
if not versions:
continue
# Skip if this version already exists for this language
if any(v.get("version") == version_label for v in versions):
continue
# Find the current default and copy its tabs
default_version = next(
(v for v in versions if v.get("default")),
versions[0],
)
new_version = {
"version": version_label,
"default": True,
"tabs": default_version.get("tabs", []),
}
# Remove default flag from old default
default_version.pop("default", None)
# Insert new version at the beginning
versions.insert(0, new_version)
updated = True
if not updated:
return False
docs_json_path.write_text(json.dumps(data, indent=2, ensure_ascii=False) + "\n")
return True
_PT_BR_MONTHS = {
1: "jan",
2: "fev",
3: "mar",
4: "abr",
5: "mai",
6: "jun",
7: "jul",
8: "ago",
9: "set",
10: "out",
11: "nov",
12: "dez",
}
_CHANGELOG_LOCALES: dict[str, dict[str, str]] = {
"en": {
"link_text": "View release on GitHub",
"language_name": "English",
},
"pt-BR": {
"link_text": "Ver release no GitHub",
"language_name": "Brazilian Portuguese",
},
"ko": {
"link_text": "GitHub 릴리스 보기",
"language_name": "Korean",
},
}
def translate_release_notes(
release_notes: str,
lang: str,
client: OpenAI,
) -> str:
"""Translate release notes into the target language using OpenAI.
Args:
release_notes: English release notes markdown.
lang: Language code (e.g., "pt-BR", "ko").
client: OpenAI client instance.
Returns:
Translated release notes, or original on failure.
"""
locale_cfg = _CHANGELOG_LOCALES.get(lang)
if not locale_cfg:
return release_notes
language_name = locale_cfg["language_name"]
prompt = TRANSLATE_RELEASE_NOTES_PROMPT.substitute(
language=language_name,
release_notes=release_notes,
)
try:
response = client.chat.completions.create(
model="gpt-4o-mini",
messages=[
{
"role": "system",
"content": f"You are a professional translator. Translate technical documentation into {language_name}.",
},
{"role": "user", "content": prompt},
],
temperature=0.3,
)
return response.choices[0].message.content or release_notes
except Exception as e:
console.print(
f"[yellow]Warning:[/yellow] Could not translate to {language_name}: {e}"
)
return release_notes
def _format_changelog_date(lang: str) -> str:
"""Format today's date for a changelog entry in the given language."""
from datetime import datetime
now = datetime.now()
if lang == "ko":
return f"{now.year}년 {now.month}월 {now.day}일"
if lang == "pt-BR":
return f"{now.day:02d} {_PT_BR_MONTHS[now.month]} {now.year}"
return now.strftime("%b %d, %Y")
def update_changelog(
changelog_path: Path,
version: str,
release_notes: str,
lang: str = "en",
) -> bool:
"""Prepend a new release entry to a docs changelog file.
Args:
changelog_path: Path to the changelog.mdx file.
version: Version string (e.g., "1.9.3").
release_notes: Markdown release notes content.
lang: Language code for localized date/link text.
Returns:
True if changelog was updated, False otherwise.
"""
if not changelog_path.exists():
return False
locale_cfg = _CHANGELOG_LOCALES.get(lang, _CHANGELOG_LOCALES["en"])
date_label = _format_changelog_date(lang)
link_text = locale_cfg["link_text"]
# Indent each non-empty line with 2 spaces to match <Update> block format
indented_lines = []
for line in release_notes.splitlines():
if line.strip():
indented_lines.append(f" {line}")
else:
indented_lines.append("")
indented_notes = "\n".join(indented_lines)
entry = (
f'<Update label="{date_label}">\n'
f" ## v{version}\n"
f"\n"
f" [{link_text}]"
f"(https://github.com/crewAIInc/crewAI/releases/tag/{version})\n"
f"\n"
f"{indented_notes}\n"
f"\n"
f"</Update>"
)
content = changelog_path.read_text()
# Insert after the frontmatter closing ---
parts = content.split("---", 2)
if len(parts) >= 3:
new_content = (
parts[0]
+ "---"
+ parts[1]
+ "---\n"
+ entry
+ "\n\n"
+ parts[2].lstrip("\n")
)
else:
new_content = entry + "\n\n" + content
changelog_path.write_text(new_content)
return True
def update_template_dependencies(templates_dir: Path, new_version: str) -> list[Path]:
"""Update crewai dependency versions in CLI template pyproject.toml files.
Handles both pinned (==) and minimum (>=) version specifiers,
as well as extras like [tools].
Args:
templates_dir: Path to the CLI templates directory.
new_version: New version string.
Returns:
List of paths that were updated.
"""
import re
updated = []
for pyproject in templates_dir.rglob("pyproject.toml"):
content = pyproject.read_text()
new_content = re.sub(
r'"crewai(\[tools\])?(==|>=)[^"]*"',
lambda m: f'"crewai{(m.group(1) or "")!s}=={new_version}"',
content,
)
if new_content != content:
pyproject.write_text(new_content)
updated.append(pyproject)
return updated
def find_version_files(base_path: Path) -> list[Path]:
"""Find all __init__.py files that contain __version__.
Args:
base_path: Base directory to search in.
Returns:
List of paths to files containing __version__.
"""
return [
init_file
for init_file in base_path.rglob("__init__.py")
if "__version__" in init_file.read_text()
]
def get_packages(lib_dir: Path) -> list[Path]:
"""Get all packages from lib/ directory.
Args:
lib_dir: Path to lib/ directory.
Returns:
List of package directory paths.
Raises:
SystemExit: If lib/ doesn't exist or no packages found.
"""
if not lib_dir.exists():
console.print("[red]Error:[/red] lib/ directory not found")
sys.exit(1)
packages = [p for p in lib_dir.iterdir() if p.is_dir()]
if not packages:
console.print("[red]Error:[/red] No packages found in lib/")
sys.exit(1)
return packages
def get_commits_from_last_tag(tag_name: str, version: str) -> tuple[str, str]:
"""Get commits from the last tag, excluding current version.
Args:
tag_name: Current tag name (e.g., "v1.0.0").
version: Current version (e.g., "1.0.0").
Returns:
Tuple of (commit_range, commits) where commits is newline-separated.
"""
try:
all_tags = run_command(["git", "tag", "--sort=-version:refname"]).split("\n")
prev_tags = [t for t in all_tags if t and t != tag_name and t != f"v{version}"]
if prev_tags:
last_tag = prev_tags[0]
commit_range = f"{last_tag}..HEAD"
commits = run_command(["git", "log", commit_range, "--pretty=format:%s"])
else:
commit_range = "HEAD"
commits = run_command(["git", "log", "--pretty=format:%s"])
except subprocess.CalledProcessError:
commit_range = "HEAD"
commits = run_command(["git", "log", "--pretty=format:%s"])
return commit_range, commits
def get_github_contributors(commit_range: str) -> list[str]:
"""Get GitHub usernames from commit range using GitHub API.
Args:
commit_range: Git commit range (e.g., "abc123..HEAD").
Returns:
List of GitHub usernames sorted alphabetically.
"""
try:
# Get GitHub token from gh CLI
try:
gh_token = run_command(["gh", "auth", "token"])
except subprocess.CalledProcessError:
gh_token = None
g = Github(login_or_token=gh_token) if gh_token else Github()
github_repo = g.get_repo("crewAIInc/crewAI")
commit_shas = run_command(
["git", "log", commit_range, "--pretty=format:%H"]
).split("\n")
contributors = set()
for sha in commit_shas:
if not sha:
continue
try:
commit = github_repo.get_commit(sha)
if commit.author and commit.author.login:
contributors.add(commit.author.login)
if commit.commit.message:
for line in commit.commit.message.split("\n"):
if line.strip().startswith("Co-authored-by:"):
if "<" in line and ">" in line:
email_part = line.split("<")[1].split(">")[0]
if "@users.noreply.github.com" in email_part:
username = email_part.split("+")[-1].split("@")[0]
contributors.add(username)
except Exception: # noqa: S112
continue
return sorted(list(contributors))
except Exception as e:
console.print(
f"[yellow]Warning:[/yellow] Could not fetch GitHub contributors: {e}"
)
return []
@click.group()
def cli() -> None:
"""Development tools for version bumping and git automation."""
@click.command()
@click.argument("version")
@click.option(
"--dry-run", is_flag=True, help="Show what would be done without making changes"
)
@click.option("--no-push", is_flag=True, help="Don't push changes to remote")
@click.option(
"--no-commit", is_flag=True, help="Don't commit changes (just update files)"
)
def bump(version: str, dry_run: bool, no_push: bool, no_commit: bool) -> None:
"""Bump version across all packages in lib/.
Args:
version: New version to set (e.g., 1.0.0, 1.0.0a1).
dry_run: Show what would be done without making changes.
no_push: Don't push changes to remote.
no_commit: Don't commit changes (just update files).
"""
try:
# Check prerequisites
check_gh_installed()
cwd = Path.cwd()
lib_dir = cwd / "lib"
if not dry_run:
console.print("Checking git status...")
check_git_clean()
console.print("[green]✓[/green] Working directory is clean")
else:
console.print("[dim][DRY RUN][/dim] Would check git status")
packages = get_packages(lib_dir)
console.print(f"\nFound {len(packages)} package(s) to update:")
for pkg in packages:
console.print(f" - {pkg.name}")
console.print(f"\nUpdating version to {version}...")
updated_files = []
for pkg in packages:
version_files = find_version_files(pkg)
for vfile in version_files:
if dry_run:
console.print(
f"[dim][DRY RUN][/dim] Would update: {vfile.relative_to(cwd)}"
)
else:
if update_version_in_file(vfile, version):
console.print(
f"[green]✓[/green] Updated: {vfile.relative_to(cwd)}"
)
updated_files.append(vfile)
else:
console.print(
f"[red]✗[/red] Failed to update: {vfile.relative_to(cwd)}"
)
pyproject = pkg / "pyproject.toml"
if pyproject.exists():
if dry_run:
console.print(
f"[dim][DRY RUN][/dim] Would update dependencies in: {pyproject.relative_to(cwd)}"
)
else:
if update_pyproject_dependencies(pyproject, version):
console.print(
f"[green]✓[/green] Updated dependencies in: {pyproject.relative_to(cwd)}"
)
updated_files.append(pyproject)
if not updated_files and not dry_run:
console.print(
"[yellow]Warning:[/yellow] No __version__ attributes found to update"
)
# Update CLI template pyproject.toml files
templates_dir = lib_dir / "crewai" / "src" / "crewai" / "cli" / "templates"
if templates_dir.exists():
if dry_run:
for tpl in templates_dir.rglob("pyproject.toml"):
console.print(
f"[dim][DRY RUN][/dim] Would update template: {tpl.relative_to(cwd)}"
)
else:
tpl_updated = update_template_dependencies(templates_dir, version)
for tpl in tpl_updated:
console.print(
f"[green]✓[/green] Updated template: {tpl.relative_to(cwd)}"
)
updated_files.append(tpl)
if not dry_run:
console.print("\nSyncing workspace...")
run_command(["uv", "sync"])
console.print("[green]✓[/green] Workspace synced")
else:
console.print("[dim][DRY RUN][/dim] Would run: uv sync")
if no_commit:
console.print("\nSkipping git operations (--no-commit flag set)")
else:
branch_name = f"feat/bump-version-{version}"
if not dry_run:
console.print(f"\nCreating branch {branch_name}...")
run_command(["git", "checkout", "-b", branch_name])
console.print("[green]✓[/green] Branch created")
console.print("\nCommitting changes...")
run_command(["git", "add", "."])
run_command(
["git", "commit", "-m", f"feat: bump versions to {version}"]
)
console.print("[green]✓[/green] Changes committed")
if not no_push:
console.print("\nPushing branch...")
run_command(["git", "push", "-u", "origin", branch_name])
console.print("[green]✓[/green] Branch pushed")
else:
console.print(
f"[dim][DRY RUN][/dim] Would create branch: {branch_name}"
)
console.print(
f"[dim][DRY RUN][/dim] Would commit: feat: bump versions to {version}"
)
if not no_push:
console.print(
f"[dim][DRY RUN][/dim] Would push branch: {branch_name}"
)
if not dry_run and not no_push:
console.print("\nCreating pull request...")
run_command(
[
"gh",
"pr",
"create",
"--base",
"main",
"--title",
f"feat: bump versions to {version}",
"--body",
"",
]
)
console.print("[green]✓[/green] Pull request created")
elif dry_run:
console.print(
f"[dim][DRY RUN][/dim] Would create PR: feat: bump versions to {version}"
)
else:
console.print("\nSkipping PR creation (--no-push flag set)")
console.print(f"\n[green]✓[/green] Version bump to {version} complete!")
except subprocess.CalledProcessError as e:
console.print(f"[red]Error running command:[/red] {e}")
if e.stderr:
console.print(e.stderr)
sys.exit(1)
except Exception as e:
console.print(f"[red]Error:[/red] {e}")
sys.exit(1)
@click.command()
@click.option(
"--dry-run", is_flag=True, help="Show what would be done without making changes"
)
@click.option("--no-edit", is_flag=True, help="Skip editing release notes")
def tag(dry_run: bool, no_edit: bool) -> None:
"""Create and push a version tag on main branch.
Run this after the version bump PR has been merged.
Automatically detects version from __version__ in packages.
Args:
dry_run: Show what would be done without making changes.
no_edit: Skip editing release notes.
"""
try:
cwd = Path.cwd()
lib_dir = cwd / "lib"
packages = get_packages(lib_dir)
with console.status("[cyan]Validating package versions..."):
versions = {}
for pkg in packages:
version_files = find_version_files(pkg)
for vfile in version_files:
content = vfile.read_text()
for line in content.splitlines():
if line.strip().startswith("__version__"):
ver = line.split("=")[1].strip().strip('"').strip("'")
versions[vfile.relative_to(cwd)] = ver
break
if not versions:
console.print(
"[red]✗[/red] Validated package versions: Could not find __version__ in any package"
)
sys.exit(1)
unique_versions = set(versions.values())
if len(unique_versions) > 1:
console.print(
"[red]✗[/red] Validated package versions: Version mismatch detected"
)
for file, ver in versions.items():
console.print(f" {file}: {ver}")
sys.exit(1)
version = unique_versions.pop()
console.print(f"[green]✓[/green] Validated packages @ [bold]{version}[/bold]")
tag_name = version
if not dry_run:
with console.status("[cyan]Checking out main branch..."):
try:
run_command(["git", "checkout", "main"])
except subprocess.CalledProcessError as e:
console.print(f"[red]✗[/red] Checked out main branch: {e}")
sys.exit(1)
console.print("[green]✓[/green] On main branch")
with console.status("[cyan]Pulling latest changes..."):
try:
run_command(["git", "pull"])
except subprocess.CalledProcessError as e:
console.print(f"[red]✗[/red] Pulled latest changes: {e}")
sys.exit(1)
console.print("[green]✓[/green] main branch up to date")
release_notes = f"Release {version}"
commits = ""
with console.status("[cyan]Generating release notes..."):
try:
prev_bump_commit = run_command(
[
"git",
"log",
"--grep=^feat: bump versions to",
"--format=%H",
"-n",
"2",
]
)
commits_list = prev_bump_commit.strip().split("\n")
if len(commits_list) > 1:
prev_commit = commits_list[1]
commit_range = f"{prev_commit}..HEAD"
commits = run_command(
["git", "log", commit_range, "--pretty=format:%s"]
)
commit_lines = [
line
for line in commits.split("\n")
if not line.startswith("feat: bump versions to")
]
commits = "\n".join(commit_lines)
else:
commit_range, commits = get_commits_from_last_tag(tag_name, version)
except subprocess.CalledProcessError:
commit_range, commits = get_commits_from_last_tag(tag_name, version)
github_contributors = get_github_contributors(commit_range)
openai_client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
if commits.strip():
contributors_section = ""
if github_contributors:
contributors_section = f"\n\n## Contributors\n\n{', '.join([f'@{u}' for u in github_contributors])}"
prompt = RELEASE_NOTES_PROMPT.substitute(
version=version,
commits=commits,
contributors_section=contributors_section,
)
response = openai_client.chat.completions.create(
model="gpt-4o-mini",
messages=[
{
"role": "system",
"content": "You are a helpful assistant that generates clear, concise release notes.",
},
{"role": "user", "content": prompt},
],
temperature=0.7,
)
release_notes = (
response.choices[0].message.content or f"Release {version}"
)
console.print("[green]✓[/green] Generated release notes")
if commits.strip():
try:
console.print()
md = Markdown(release_notes, justify="left")
console.print(
Panel(
md,
title="[bold cyan]Generated Release Notes[/bold cyan]",
border_style="cyan",
padding=(1, 2),
)
)
except Exception as e:
console.print(
f"[yellow]Warning:[/yellow] Could not generate release notes with OpenAI: {e}"
)
console.print("Using default release notes")
if not no_edit:
if Confirm.ask(
"\n[bold]Would you like to edit the release notes?[/bold]", default=True
):
edited_notes = click.edit(release_notes)
if edited_notes is not None:
release_notes = edited_notes.strip()
console.print("\n[green]✓[/green] Release notes updated")
else:
console.print("\n[green]✓[/green] Using original release notes")
else:
console.print(
"\n[green]✓[/green] Using generated release notes without editing"
)
else:
console.print(
"\n[green]✓[/green] Using generated release notes without editing"
)
is_prerelease = any(
indicator in version.lower()
for indicator in ["a", "b", "rc", "alpha", "beta", "dev"]
)
# Update docs: changelogs + version switcher
docs_json_path = cwd / "docs" / "docs.json"
changelog_langs = ["en", "pt-BR", "ko"]
if not dry_run:
docs_files_staged = []
for lang in changelog_langs:
cl_path = cwd / "docs" / lang / "changelog.mdx"
if lang == "en":
notes_for_lang = release_notes
else:
console.print(f"[dim]Translating release notes to {lang}...[/dim]")
notes_for_lang = translate_release_notes(
release_notes, lang, openai_client
)
if update_changelog(cl_path, version, notes_for_lang, lang=lang):
console.print(
f"[green]✓[/green] Updated {cl_path.relative_to(cwd)}"
)
docs_files_staged.append(str(cl_path))
else:
console.print(
f"[yellow]Warning:[/yellow] Changelog not found at {cl_path.relative_to(cwd)}"
)
if not is_prerelease:
if add_docs_version(docs_json_path, version):
console.print(
f"[green]✓[/green] Added v{version} to docs version switcher"
)
docs_files_staged.append(str(docs_json_path))
else:
console.print(
f"[yellow]Warning:[/yellow] docs.json not found at {docs_json_path.relative_to(cwd)}"
)
if docs_files_staged:
docs_branch = f"docs/changelog-v{version}"
run_command(["git", "checkout", "-b", docs_branch])
for f in docs_files_staged:
run_command(["git", "add", f])
run_command(
[
"git",
"commit",
"-m",
f"docs: update changelog and version for v{version}",
]
)
console.print("[green]✓[/green] Committed docs updates")
run_command(["git", "push", "-u", "origin", docs_branch])
console.print(f"[green]✓[/green] Pushed branch {docs_branch}")
run_command(
[
"gh",
"pr",
"create",
"--base",
"main",
"--title",
f"docs: update changelog and version for v{version}",
"--body",
"",
]
)
console.print("[green]✓[/green] Created docs PR")
run_command(
[
"gh",
"pr",
"merge",
docs_branch,
"--squash",
"--auto",
"--delete-branch",
]
)
console.print("[green]✓[/green] Enabled auto-merge on docs PR")
import time
console.print("[cyan]Waiting for PR checks to pass and merge...[/cyan]")
while True:
time.sleep(10)
try:
state = run_command(
[
"gh",
"pr",
"view",
docs_branch,
"--json",
"state",
"--jq",
".state",
]
)
except subprocess.CalledProcessError:
state = ""
if state == "MERGED":
break
console.print("[dim]Still waiting for PR to merge...[/dim]")
console.print("[green]✓[/green] Docs PR merged")
run_command(["git", "checkout", "main"])
run_command(["git", "pull"])
console.print("[green]✓[/green] main branch updated with docs changes")
else:
for lang in changelog_langs:
cl_path = cwd / "docs" / lang / "changelog.mdx"
translated = " (translated)" if lang != "en" else ""
console.print(
f"[dim][DRY RUN][/dim] Would update {cl_path.relative_to(cwd)}{translated}"
)
if not is_prerelease:
console.print(
f"[dim][DRY RUN][/dim] Would add v{version} to docs version switcher"
)
else:
console.print(
"[dim][DRY RUN][/dim] Skipping docs version (pre-release)"
)
console.print(
f"[dim][DRY RUN][/dim] Would create branch docs/changelog-v{version}, PR, and merge"
)
if not dry_run:
with console.status(f"[cyan]Creating tag {tag_name}..."):
try:
run_command(["git", "tag", "-a", tag_name, "-m", release_notes])
except subprocess.CalledProcessError as e:
console.print(f"[red]✗[/red] Created tag {tag_name}: {e}")
sys.exit(1)
console.print(f"[green]✓[/green] Created tag {tag_name}")
with console.status(f"[cyan]Pushing tag {tag_name}..."):
try:
run_command(["git", "push", "origin", tag_name])
except subprocess.CalledProcessError as e:
console.print(f"[red]✗[/red] Pushed tag {tag_name}: {e}")
sys.exit(1)
console.print(f"[green]✓[/green] Pushed tag {tag_name}")
with console.status("[cyan]Creating GitHub Release..."):
try:
gh_cmd = [
"gh",
"release",
"create",
tag_name,
"--title",
tag_name,
"--notes",
release_notes,
]
if is_prerelease:
gh_cmd.append("--prerelease")
run_command(gh_cmd)
except subprocess.CalledProcessError as e:
console.print(f"[red]✗[/red] Created GitHub Release: {e}")
sys.exit(1)
release_type = "prerelease" if is_prerelease else "release"
console.print(
f"[green]✓[/green] Created GitHub {release_type} for {tag_name}"
)
console.print(
f"\n[green]✓[/green] Packages @ [bold]{version}[/bold] tagged successfully!"
)
except subprocess.CalledProcessError as e:
console.print(f"[red]Error running command:[/red] {e}")
if e.stderr:
console.print(e.stderr)
sys.exit(1)
except Exception as e:
console.print(f"[red]Error:[/red] {e}")
sys.exit(1)
cli.add_command(bump)
cli.add_command(tag)
def main() -> None:
"""Entry point for the CLI."""
cli()
if __name__ == "__main__":
main()
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/devtools/src/crewai_devtools/cli.py",
"license": "MIT License",
"lines": 923,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/devtools/src/crewai_devtools/prompts.py | """Prompt templates for AI-generated content."""
from string import Template
RELEASE_NOTES_PROMPT = Template(
"""Generate concise release notes for version $version based on these commits:
$commits
The commits follow the Conventional Commits standard (feat:, fix:, chore:, etc.).
Use this exact template format:
## What's Changed
### Features
- [List feat: commits here, using imperative mood like "Add X", "Implement Y"]
### Bug Fixes
- [List fix: commits here, using imperative mood like "Fix X", "Resolve Y"]
### Documentation
- [List docs: commits here, using imperative mood like "Update X", "Add Y"]
### Performance
- [List perf: commits here, using imperative mood like "Improve X", "Optimize Y"]
### Refactoring
- [List refactor: commits here, using imperative mood like "Refactor X", "Simplify Y"]
### Breaking Changes
- [List commits with BREAKING CHANGE in footer or ! after type, using imperative mood]$contributors_section
Instructions:
- Parse conventional commit format (type: description or type(scope): description)
- Only include sections that have relevant changes from the commits
- Skip chore:, ci:, test:, and style: commits unless significant
- Convert commit messages to imperative mood if needed (e.g., "adds" → "Add")
- Be concise but informative
- Focus on user-facing changes
- Use the exact Contributors list provided above, do not modify it
Keep it professional and clear."""
)
TRANSLATE_RELEASE_NOTES_PROMPT = Template(
"""Translate the following release notes into $language.
$release_notes
Instructions:
- Translate all section headers and descriptions naturally
- Keep markdown formatting (##, ###, -, etc.) exactly as-is
- Keep all proper nouns, code identifiers, class names, and technical terms unchanged
(e.g. "CrewAI", "LiteAgent", "ChromaDB", "MCP", "@username")
- Keep the ## Contributors section and GitHub usernames unchanged
- Do not add or remove any content, only translate"""
)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/devtools/src/crewai_devtools/prompts.py",
"license": "MIT License",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
danielgatis/rembg:rembg/sessions/ben_custom.py | import os
from typing import List
import numpy as np
import onnxruntime as ort
from PIL import Image
from PIL.Image import Image as PILImage
from .base import BaseSession
class BenCustomSession(BaseSession):
"""This is a class representing a custom session for the Ben model."""
def __init__(self, model_name: str, sess_opts: ort.SessionOptions, *args, **kwargs):
"""
Initialize a new BenCustomSession object.
Parameters:
model_name (str): The name of the model.
sess_opts: The session options.
*args: Additional positional arguments.
**kwargs: Additional keyword arguments.
"""
model_path = kwargs.get("model_path")
if model_path is None:
raise ValueError("model_path is required")
super().__init__(model_name, sess_opts, *args, **kwargs)
def predict(self, img: PILImage, *args, **kwargs) -> List[PILImage]:
"""
Predicts the mask image for the input image.
This method takes a PILImage object as input and returns a list of PILImage objects as output. It performs several image processing operations to generate the mask image.
Parameters:
img (PILImage): The input image.
Returns:
List[PILImage]: A list of PILImage objects representing the generated mask image.
"""
ort_outs = self.inner_session.run(
None,
self.normalize(img, (0.5, 0.5, 0.5), (1.0, 1.0, 1.0), (1024, 1024)),
)
pred = ort_outs[0][:, 0, :, :]
ma = np.max(pred)
mi = np.min(pred)
pred = (pred - mi) / (ma - mi)
pred = np.squeeze(pred)
mask = Image.fromarray((pred * 255).astype("uint8"), mode="L")
mask = mask.resize(img.size, Image.Resampling.LANCZOS)
return [mask]
@classmethod
def download_models(cls, *args, **kwargs):
"""
Download the model files.
Parameters:
*args: Additional positional arguments.
**kwargs: Additional keyword arguments.
Returns:
str: The absolute path to the model files.
"""
model_path = kwargs.get("model_path")
if model_path is None:
raise ValueError("model_path is required")
return os.path.abspath(os.path.expanduser(model_path))
@classmethod
def name(cls, *args, **kwargs):
"""
Get the name of the model.
Parameters:
*args: Additional positional arguments.
**kwargs: Additional keyword arguments.
Returns:
str: The name of the model.
"""
return "ben_custom"
| {
"repo_id": "danielgatis/rembg",
"file_path": "rembg/sessions/ben_custom.py",
"license": "MIT License",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
danielgatis/rembg:rembg/sessions/dis_custom.py | import os
from typing import List
import numpy as np
import onnxruntime as ort
from PIL import Image
from PIL.Image import Image as PILImage
from .base import BaseSession
class DisCustomSession(BaseSession):
"""This is a class representing a custom session for the Dis model."""
def __init__(self, model_name: str, sess_opts: ort.SessionOptions, *args, **kwargs):
"""
Initialize a new DisCustomSession object.
Parameters:
model_name (str): The name of the model.
sess_opts: The session options.
*args: Additional positional arguments.
**kwargs: Additional keyword arguments.
"""
model_path = kwargs.get("model_path")
if model_path is None:
raise ValueError("model_path is required")
super().__init__(model_name, sess_opts, *args, **kwargs)
def predict(self, img: PILImage, *args, **kwargs) -> List[PILImage]:
"""
Predicts the mask image for the input image.
This method takes a PILImage object as input and returns a list of PILImage objects as output. It performs several image processing operations to generate the mask image.
Parameters:
img (PILImage): The input image.
Returns:
List[PILImage]: A list of PILImage objects representing the generated mask image.
"""
ort_outs = self.inner_session.run(
None,
self.normalize(img, (0.5, 0.5, 0.5), (1.0, 1.0, 1.0), (1024, 1024)),
)
pred = ort_outs[0][:, 0, :, :]
ma = np.max(pred)
mi = np.min(pred)
pred = (pred - mi) / (ma - mi)
pred = np.squeeze(pred)
mask = Image.fromarray((pred * 255).astype("uint8"), mode="L")
mask = mask.resize(img.size, Image.Resampling.LANCZOS)
return [mask]
@classmethod
def download_models(cls, *args, **kwargs):
"""
Download the model files.
Parameters:
*args: Additional positional arguments.
**kwargs: Additional keyword arguments.
Returns:
str: The absolute path to the model files.
"""
model_path = kwargs.get("model_path")
if model_path is None:
raise ValueError("model_path is required")
return os.path.abspath(os.path.expanduser(model_path))
@classmethod
def name(cls, *args, **kwargs):
"""
Get the name of the model.
Parameters:
*args: Additional positional arguments.
**kwargs: Additional keyword arguments.
Returns:
str: The name of the model.
"""
return "dis_custom"
| {
"repo_id": "danielgatis/rembg",
"file_path": "rembg/sessions/dis_custom.py",
"license": "MIT License",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
datalab-to/marker:examples/marker_modal_deployment.py | """
Modal deployment for Datalab Marker PDF conversion service.
"""
import modal
import os
from typing import Optional
# Define the Modal app
app = modal.App("datalab-marker-modal-demo")
GPU_TYPE = "L40S"
MODEL_PATH_PREFIX = "/root/.cache/datalab/models"
# Define the container image with all dependencies
image = (
modal.Image.debian_slim(python_version="3.10")
.apt_install(["git", "wget"])
.env({"TORCH_DEVICE": "cuda"})
.pip_install([
"marker-pdf[full]",
"fastapi==0.104.1",
"uvicorn==0.24.0",
"python-multipart==0.0.6",
"torch>=2.2.2,<3.0.0",
"torchvision>=0.17.0",
"torchaudio>=2.2.0",
])
)
# Create a persistent volume for model caching
models_volume = modal.Volume.from_name("marker-models-modal-demo", create_if_missing=True)
def setup_models_with_cache_check(logger, commit_volume=False):
"""
Shared function to create models and handle cache checking/logging.
"""
import os
import gc
from marker.models import create_model_dict
# Check if models exist in cache
models_dir_exists = os.path.exists(MODEL_PATH_PREFIX)
models_dir_contents = os.listdir(MODEL_PATH_PREFIX) if models_dir_exists else []
logger.info(f"Models cache directory exists: {models_dir_exists}")
logger.info(f"Models cache directory contents: {models_dir_contents}")
if models_dir_exists and models_dir_contents:
logger.info("Found existing models in volume cache, loading from cache...")
else:
logger.warning("No models found in volume cache. Models will be downloaded now (this may take several minutes).")
# Create/load models
models = create_model_dict()
logger.info(f"Successfully loaded {len(models)} models")
# Check what was downloaded/cached
if os.path.exists(MODEL_PATH_PREFIX):
contents = os.listdir(MODEL_PATH_PREFIX)
logger.info(f"Models in cache: {contents}")
# Commit volume if requested (for download function)
if commit_volume:
gc.collect()
logger.info("Attempting to commit volume...")
models_volume.commit()
logger.info("Volume committed successfully")
return models
@app.function(
image=image,
volumes={MODEL_PATH_PREFIX: models_volume},
gpu=GPU_TYPE,
timeout=600,
)
def download_models():
"""
Helper function to download models used in marker into a Modal volume.
"""
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
logger.info("Downloading models to persistent volume...")
logger.info(f"Volume mounted at: {MODEL_PATH_PREFIX}")
try:
models = setup_models_with_cache_check(logger, commit_volume=True)
return f"Models downloaded successfully: {list(models.keys())}"
except Exception as e:
logger.error(f"Failed to download models: {e}")
raise
@app.cls(
image=image,
gpu=GPU_TYPE,
memory=16384,
timeout=600, # 10 minute timeout for large documents
volumes={MODEL_PATH_PREFIX: models_volume},
scaledown_window=300,
)
class MarkerModalDemoService:
@modal.enter()
def load_models(self):
"""Load models once per container using @modal.enter() for efficiency."""
import logging
import traceback
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
logger.info("Loading Marker models using @modal.enter()...")
try:
self.models = setup_models_with_cache_check(logger, commit_volume=True)
except Exception as e:
logger.error(f"Error loading models: {e}")
traceback.print_exc()
self.models = None
@modal.asgi_app()
def marker_api(self):
import traceback
import io
import base64
import logging
from contextlib import asynccontextmanager
from typing import Optional
from pathlib import Path
from fastapi import FastAPI, Form, File, UploadFile, HTTPException
from fastapi.responses import JSONResponse
from marker.converters.pdf import PdfConverter
from marker.config.parser import ConfigParser
from marker.settings import settings
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
@asynccontextmanager
async def lifespan(app: FastAPI):
# Models are already loaded in @modal.enter()
logger.info("Datalab Marker / Modal demo app starting up...")
yield
logger.info("Datalab Marker / Modal demo app shutting down...")
# Create FastAPI app
web_app = FastAPI(
title="Datalab Marker PDF Conversion Service - Modal Demo",
description="Convert PDFs and documents to markdown, JSON, or HTML using Marker, deployed on Modal",
version="1.0.0",
lifespan=lifespan
)
@web_app.get("/health")
async def health_check():
models_loaded = hasattr(self, 'models') and self.models is not None
model_count = len(self.models) if models_loaded else 0
# Check volume contents for debugging
cache_exists = os.path.exists(MODEL_PATH_PREFIX)
cache_contents = os.listdir(MODEL_PATH_PREFIX) if cache_exists else []
return {
"status": "healthy" if models_loaded else "loading",
"models_loaded": models_loaded,
"model_count": model_count,
"cache_dir": MODEL_PATH_PREFIX,
"cache_exists": cache_exists,
"cache_contents": cache_contents[:10]
}
@web_app.post("/convert")
async def convert_document(
file: UploadFile = File(..., description="Document to convert"),
page_range: Optional[str] = Form(None),
force_ocr: bool = Form(False),
paginate_output: bool = Form(False),
output_format: str = Form("markdown"),
use_llm: bool = Form(False),
):
"""Convert uploaded document to specified format."""
if not hasattr(self, 'models') or self.models is None:
logger.error("Models not available for conversion")
raise HTTPException(status_code=503, detail="Models not loaded yet. Please wait for model initialization.")
# Validate file type
allowed_extensions = {'.pdf', '.png', '.jpg', '.jpeg', '.tiff', '.bmp'}
file_ext = Path(file.filename).suffix.lower()
if file_ext not in allowed_extensions:
raise HTTPException(
status_code=400,
detail=f"Unsupported file type: {file_ext}. Supported: {allowed_extensions}"
)
# Validate output format
if output_format not in ["markdown", "json", "html", "chunks"]:
raise HTTPException(
status_code=400,
detail="Output format must be one of: markdown, json, html, chunks"
)
try:
# Read file content
file_content = await file.read()
# Save to temporary file
temp_path = f"/tmp/{file.filename}"
with open(temp_path, "wb") as temp_file:
temp_file.write(file_content)
# Configure conversion parameters
config = {
"filepath": temp_path,
"page_range": page_range,
"force_ocr": force_ocr,
"paginate_output": paginate_output,
"output_format": output_format,
"use_llm": use_llm,
}
# Create converter
config_parser = ConfigParser(config)
config_dict = config_parser.generate_config_dict()
config_dict["pdftext_workers"] = 1
converter = PdfConverter(
config=config_dict,
artifact_dict=self.models,
processor_list=config_parser.get_processors(),
renderer=config_parser.get_renderer(),
llm_service=config_parser.get_llm_service() if use_llm else None,
)
# Convert document - converter already applies the appropriate renderer
logger.info(f"Converting {file.filename} to {output_format}...")
rendered_output = converter(temp_path)
# Extract content based on output format
json_content = None
html_content = None
markdown_content = None
encoded_images = {}
if output_format == "json":
# For JSON, return the structured data directly
json_content = rendered_output.model_dump()
else:
from marker.output import text_from_rendered
text, _, images = text_from_rendered(rendered_output)
# Assign to appropriate content field
if output_format == "html":
html_content = text
else:
markdown_content = text
# Encode images as base64
for img_name, img_obj in images.items():
byte_stream = io.BytesIO()
img_obj.save(byte_stream, format=settings.OUTPUT_IMAGE_FORMAT)
encoded_images[img_name] = base64.b64encode(byte_stream.getvalue()).decode('utf-8')
metadata = rendered_output.metadata
logger.info(f"Conversion completed for {file.filename}")
# Clean up temp file
os.unlink(temp_path)
return JSONResponse({
"success": True,
"filename": file.filename,
"output_format": output_format,
"json": json_content,
"html": html_content,
"markdown": markdown_content,
"images": encoded_images,
"metadata": metadata,
"page_count": len(metadata.get("page_stats", [])),
})
except Exception as e:
# Clean up temp file if it exists
if os.path.exists(temp_path):
os.unlink(temp_path)
logger.error(f"Conversion error for {file.filename}: {str(e)}")
traceback.print_exc()
raise HTTPException(
status_code=500,
detail=f"Conversion failed: {str(e)}"
)
return web_app
#
# This does not get deployed. It's a useful entrypoint from your local CLI
# that you can use to test your deployment. It'll store the
# API response in a new file on your machine.
#
@app.local_entrypoint()
async def invoke_conversion(
pdf_file: Optional[str] = None,
output_format: str = "markdown",
env: str = 'main'
):
"""
Local entrypoint to test your deployed Marker endpoint in Modal.
Usage:
modal run marker_modal_deployment.py::invoke_conversion --pdf-file /path/to/file.pdf --output-format markdown
"""
import requests
import json
from pathlib import Path
if not pdf_file:
print("No PDF file specified. Use --pdf-file /path/to/your.pdf")
return
pdf_path = Path(pdf_file)
if not pdf_path.exists():
print(f"File not found: {pdf_file}")
return
#
# Get the web URL for our deployed service
#
try:
service = modal.Cls.from_name(
"datalab-marker-modal-demo",
"MarkerModalDemoService",
environment_name=env
)
web_url = service().marker_api.get_web_url()
print(f"Found deployed service at: {web_url}")
except Exception as e:
print(f"Error getting web URL: {e}")
print("Make sure you've deployed the service first with: modal deploy marker_modal_deployment.py")
return
print(f"Testing conversion of: {pdf_path.name}")
print(f"Output format: {output_format}")
#
# Test health endpoint first
#
try:
health_response = requests.get(f"{web_url}/health")
health_data = health_response.json()
print(f"Service health: {health_data['status']}")
print(f"Models loaded: {health_data['models_loaded']} ({health_data['model_count']} models)")
if not health_data['models_loaded']:
print("Warning: Models not loaded yet. First request may be slow.")
except Exception as e:
print(f"Health check failed: {e}")
#
# Make conversion request
#
try:
with open(pdf_path, 'rb') as f:
files = {'file': (pdf_path.name, f, 'application/pdf')}
data = {'output_format': output_format}
print(f"Sending request to {web_url}/convert...")
response = requests.post(f"{web_url}/convert", files=files, data=data)
if response.status_code == 200:
result = response.json()
print(f"✅ Conversion successful!")
print(f"Filename: {result['filename']}")
print(f"Format: {result['output_format']}")
print(f"Pages: {result['page_count']}")
output_file = f"{pdf_path.stem}_response.json"
with open(output_file, 'w', encoding='utf-8') as f:
json.dump(result, f, indent=2, ensure_ascii=False)
print(f"Full API response saved to: {output_file}")
if result['images']:
print(f"Images extracted: {len(result['images'])}")
else:
print(f"❌ Conversion failed: {response.status_code}")
print(f"Error: {response.text}")
except Exception as e:
print(f"Request failed: {e}")
| {
"repo_id": "datalab-to/marker",
"file_path": "examples/marker_modal_deployment.py",
"license": "GNU General Public License v3.0",
"lines": 330,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
datalab-to/marker:marker/utils/image.py | from PIL import Image
import numpy as np
import cv2
from typing import List, Optional
def is_blank_image(image: Image.Image, polygon: Optional[List[List[int]]] = None) -> bool:
image = np.asarray(image)
if (
image is None
or image.size == 0
or image.shape[0] == 0
or image.shape[1] == 0
):
# Handle empty image case
return True
if polygon is not None:
rounded_polys = [[int(corner[0]), int(corner[1])] for corner in polygon]
if rounded_polys[0] == rounded_polys[1] and rounded_polys[2] == rounded_polys[3]:
return True
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
gray = cv2.GaussianBlur(gray, (7, 7), 0)
# Adaptive threshold (inverse for text as white)
binarized = cv2.adaptiveThreshold(
gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 31, 15
)
num_labels, labels, stats, _ = cv2.connectedComponentsWithStats(
binarized, connectivity=8
)
cleaned = np.zeros_like(binarized)
for i in range(1, num_labels): # skip background
cleaned[labels == i] = 255
kernel = np.ones((1, 5), np.uint8)
dilated = cv2.dilate(cleaned, kernel, iterations=3)
b = dilated / 255
return bool(b.sum() == 0) | {
"repo_id": "datalab-to/marker",
"file_path": "marker/utils/image.py",
"license": "GNU General Public License v3.0",
"lines": 34,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
datalab-to/marker:tests/renderers/test_html_renderer.py | import pytest
from marker.renderers.html import HTMLRenderer
@pytest.mark.config(
{
"page_range": [0],
"disable_ocr": True,
"add_block_ids": True,
"paginate_output": True,
}
)
def test_html_renderer_block_ids(pdf_document, config):
renderer = HTMLRenderer(config)
html = renderer(pdf_document).html
# Verify some block IDs are present
assert "/page/0/Text/1" in html
| {
"repo_id": "datalab-to/marker",
"file_path": "tests/renderers/test_html_renderer.py",
"license": "GNU General Public License v3.0",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
datalab-to/marker:marker/extractors/document.py | import json
from pydantic import BaseModel
from typing import Annotated, Optional, List
from marker.extractors import BaseExtractor
from marker.extractors.page import PageExtractionSchema
from marker.logger import get_logger
logger = get_logger()
class DocumentExtractionSchema(BaseModel):
analysis: str
document_json: str
class DocumentExtractor(BaseExtractor):
"""
An extractor that combines data from across all pages.
"""
page_schema: Annotated[
str,
"The JSON schema to be extracted from the page.",
] = ""
page_extraction_prompt = """You are an expert document analyst who reads documents and pulls data out in JSON format. You will receive your detailed notes from all the pages of a document, and a JSON schema that we want to extract from the document. Your task is to extract all the information properly into the JSON schema.
Some notes:
- The schema may contain a single object to extract from the entire document, or an array of objects.
- The schema may contain nested objects, arrays, and other complex structures.
Some guidelines:
- Some entities will span multiple pages, so make sure to consult your notes thoroughly.
- In the case of potential conflicting values, pull out the values you have the most confidence in, from your notes.
- If you cannot find a value for a field, leave it blank in the JSON.
**Instructions:**
1. Analyze your provided notes.
2. Analyze the JSON schema.
3. Write a detailed analysis of the notes, and the associated values in the schema. Make sure to reference which page each piece of information comes from.
4. Write the output in the JSON schema format, ensuring all required fields are filled out. Output only the json data, without any additional text or formatting.
**Example:**
Input:
Detailed Notes
Page 0
On this page, I see a table with car makes and sales. The makes are Honda and Toyota, with sales of 100 and 200 respectively. The color is not present in the table, so I will leave it blank in the JSON. That information may be present on another page. Some JSON snippets I may find useful later are:
```json
{
"make": "Honda",
"sales": 100,
}
```
```json
{
"make": "Toyota",
"sales": 200,
}
```
Honda is the first row in the table, and Toyota is the second row. Make is the first column, and sales is the second.
Page 1
I see a table that contains 2 rows, and has a color header. The first row has the color red, and the second row has the color blue. Here are some useful snippets:
Schema
```json
{'$defs': {'Cars': {'properties': {'make': {'title': 'Make', 'type': 'string'}, 'sales': {'title': 'Sales', 'type': 'integer'}, 'color': {'title': 'Color', 'type': 'string'}}, 'required': ['make', 'sales', 'color'], 'title': 'Cars', 'type': 'object'}}, 'properties': {'cars': {'items': {'$ref': '#/$defs/Cars'}, 'title': 'Cars', 'type': 'array'}}, 'required': ['cars'], 'title': 'CarsList', 'type': 'object'}
```
Output:
Analysis: From the notes, it looks like the information I need is in a table that spans 2 pages. The first page has the makes and sales, while the second page has the colors. I will combine this information into the JSON schema.
JSON
{
"cars": [
{
"make": "Honda",
"sales": 100,
"color": "red"
},
{
"make": "Toyota",
"sales": 200,
"color": "blue"
}
]
}
**Input:**
Detailed Notes
{{document_notes}}
Schema
```json
{{schema}}
```
"""
def assemble_document_notes(self, page_notes: List[PageExtractionSchema]) -> str:
notes = ""
for i, page_schema in enumerate(page_notes):
if not page_notes:
continue
notes += f"Page {i + 1}\n{page_schema.detailed_notes}\n\n"
return notes.strip()
def __call__(
self,
page_notes: List[PageExtractionSchema],
**kwargs,
) -> Optional[DocumentExtractionSchema]:
if not self.page_schema:
raise ValueError(
"Page schema must be defined for structured extraction to work."
)
prompt = self.page_extraction_prompt.replace(
"{{document_notes}}", self.assemble_document_notes(page_notes)
).replace("{{schema}}", json.dumps(self.page_schema))
response = self.llm_service(prompt, None, None, DocumentExtractionSchema)
logger.debug(f"Document extraction response: {response}")
if not response or any(
[
key not in response
for key in [
"analysis",
"document_json",
]
]
):
return None
json_data = response["document_json"].strip().lstrip("```json").rstrip("```")
return DocumentExtractionSchema(
analysis=response["analysis"], document_json=json_data
)
| {
"repo_id": "datalab-to/marker",
"file_path": "marker/extractors/document.py",
"license": "GNU General Public License v3.0",
"lines": 115,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
datalab-to/marker:marker/processors/blank_page.py | from typing import Annotated
from PIL import Image
import numpy as np
import cv2
from marker.processors import BaseProcessor
from marker.schema import BlockTypes
from marker.schema.blocks import Block
from marker.schema.document import Document
from marker.logger import get_logger
logger = get_logger()
class BlankPageProcessor(BaseProcessor):
"""
A processor to filter out blank pages detected as a single layout block
"""
full_page_block_intersection_threshold: Annotated[
float, "Threshold to detect blank pages at"
] = 0.8
filter_blank_pages: Annotated[bool, "Remove blank pages detected as images."] = (
False
)
def is_blank(self, image: Image.Image):
image = np.asarray(image)
if image.size == 0 or image.shape[0] == 0 or image.shape[1] == 0:
# Handle empty image case
return True
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
gray = cv2.GaussianBlur(gray, (7, 7), 0)
# Adaptive threshold (inverse for text as white)
binarized = cv2.adaptiveThreshold(
gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 31, 15
)
num_labels, labels, stats, _ = cv2.connectedComponentsWithStats(
binarized, connectivity=8
)
cleaned = np.zeros_like(binarized)
for i in range(1, num_labels): # skip background
cleaned[labels == i] = 255
kernel = np.ones((1, 5), np.uint8)
dilated = cv2.dilate(cleaned, kernel, iterations=3)
b = dilated / 255
return b.sum() == 0
def __call__(self, document: Document):
if not self.filter_blank_pages:
return
for page in document.pages:
structure_blocks = page.structure_blocks(document)
if not structure_blocks or len(structure_blocks) > 1:
continue
full_page_block: Block = structure_blocks[0]
conditions = [
full_page_block.block_type in [BlockTypes.Picture, BlockTypes.Figure],
self.is_blank(full_page_block.get_image(document)),
page.polygon.intersection_area(full_page_block.polygon)
> self.full_page_block_intersection_threshold,
]
if all(conditions):
logger.debug(f"Removing blank block {full_page_block.id}")
page.remove_structure_items([full_page_block.id])
full_page_block.removed = True
| {
"repo_id": "datalab-to/marker",
"file_path": "marker/processors/blank_page.py",
"license": "GNU General Public License v3.0",
"lines": 59,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
datalab-to/marker:marker/processors/llm/llm_page_correction.py | import json
from concurrent.futures import ThreadPoolExecutor, as_completed
from typing import List, Annotated
from marker.logger import get_logger
from marker.processors.llm import BaseLLMComplexBlockProcessor
from marker.schema import BlockTypes
from marker.schema.blocks import BlockId
from marker.schema.document import Document
from marker.schema.groups import PageGroup
from pydantic import BaseModel
from tqdm import tqdm
logger = get_logger()
FORMAT_TAGS = ["b", "i", "u", "del", "math", "sub", "sup", "a", "code", "p", "img"]
BLOCK_MAP = {
"Text": [],
"TextInlineMath": [],
"Table": ["table", "tbody", "tr", "td", "th"],
"ListGroup": ["ul", "li"],
"SectionHeader": [],
"Form": ["form", "input", "select", "textarea", "table", "tbody", "tr", "td", "th"],
"Figure": [],
"Picture": [],
"Code": ["pre"],
"TableOfContents": ["table", "tbody", "tr", "td", "th"],
}
ALL_TAGS = FORMAT_TAGS + [tag for tags in BLOCK_MAP.values() for tag in tags]
class LLMPageCorrectionProcessor(BaseLLMComplexBlockProcessor):
block_correction_prompt: Annotated[
str, "The user prompt to guide the block correction process."
] = None
default_user_prompt = """Your goal is to reformat the blocks to be as correct as possible, without changing the underlying meaning of the text within the blocks. Mostly focus on reformatting the content. Ignore minor formatting issues like extra <i> tags."""
page_prompt = """You're a text correction expert specializing in accurately reproducing text from PDF pages. You will be given a JSON list of blocks on a PDF page, along with the image for that page. The blocks will be formatted like the example below. The blocks will be presented in reading order.
```json
[
{
"bbox": [x1, y1, x2, y2],
"id": "/page/0/Text/1",
"block_type": "Text",
"html": "<p>Some text here</p>",
}, ...
]
```
You will also be given a prompt from the user that tells you how to correct the blocks. Your task is to analyze the blocks and the image, then follow the prompt to correct the blocks.
Here are the types of changes you can make in response to the prompt:
- Reorder the blocks to reflect the correct reading order.
- Change the block type to the correct type - the potential types are "SectionHeader", "Form", "Text", "Table", "Figure", "Picture", "ListGroup", "PageFooter", "PageHeader", "Footnote", or "Equation". In this case, update the html as well to match the new block type.
- Make edits to block content by changing the HTML.
Guidelines:
- Only use the following tags: {{format_tags}}. Do not use any other tags.
- The math tag can have the attribute `display="block"` to indicate display math, the a tag can have the attribute `href="..."` to indicate a link, and td and th tags can have the attribute `colspan="..."` and `rowspan="..."` to indicate table cells that span multiple columns or rows. There can be a "block-type" attribute on p tags. Do not use any other attributes.
- Keep LaTeX formulas inside <math> tags - these are important for downstream processing.
- Bboxes are normalized 0-1000
- The order of the JSON list is the reading order for the blocks
- Follow the user prompt faithfully, and only make additional changes if there is a significant issue with correctness.
- Stay faithful to the original image, and do not insert any content that is not present in the image or the blocks, unless specifically requested by the user prompt.
**Instructions:**
1. Carefully examine the provided JSON representation of the page, along with the image.
2. Analyze the user prompt.
3. Identify any issues you'll need to fix, and write a short analysis.
4. If everything is fine, output "no_corrections" Otherwise, output the type of correction needed: ["reorder", "rewrite", "reorder_first"]. Rewrite includes rewriting html and changing the block type. If you need to do both, then perform only the reordering, and output "reorder_first", so we can do the rewriting later.
5. If corrections are needed, output any blocks that need updates:
a. If reading order needs to be changed, output the IDs of the blocks in the correct order, and keep block_type and html blank, like this:
```json
[
{
"id": "/page/0/Text/1",
"block_type": "",
"html": ""
},
...
]
b. If blocks need to be rewritten, output the block ids and new HTML for the blocks, like this:
```json
[
{
"id": "/page/0/Text/1",
"block_type": "Text",
"html": "<p>New HTML content here</p>"
},
...
]
```
**Example:**
Input:
Blocks
```json
[
{
"bbox": [x1, y1, x2, y2],
"id": "/page/0/Text/1",
"block_type": "Text",
"html": "1.14 Vector Operations",
},
{
"bbox": [x1, y1, x2, y2],
"id": "/page/0/Text/2",
"block_type": "Text",
"html": "<p>You can perform many operations on a vector, including...</p>",
},
]
```
User Prompt
Ensure that all blocks have the correct labels, and that reading order is correct.
Output:
Analysis: The blocks are in the correct reading order, but the first block should actually be a SectionHeader.
```json
[
{
"id": "/page/0/Text/1",
"block_type": "SectionHeader",
"html": "<h1>1.14 Vector Operations</h1>"
}
]
```
**Input:**
Blocks
```json
{{page_json}}
```
User Prompt
{{user_prompt}}
"""
def get_selected_blocks(
self,
document: Document,
page: PageGroup,
) -> List[dict]:
selected_blocks = page.structure_blocks(document)
json_blocks = [
self.normalize_block_json(block, document, page)
for i, block in enumerate(selected_blocks)
]
return json_blocks
def process_rewriting(self, document: Document, page1: PageGroup):
page_blocks = self.get_selected_blocks(document, page1)
image = page1.get_image(document, highres=False)
prompt = (
self.page_prompt.replace("{{page_json}}", json.dumps(page_blocks))
.replace("{{format_tags}}", json.dumps(ALL_TAGS))
.replace("{{user_prompt}}", self.block_correction_prompt)
)
response = self.llm_service(prompt, image, page1, PageSchema)
logger.debug(f"Got reponse from LLM: {response}")
if not response or "correction_type" not in response:
logger.warning("LLM did not return a valid response")
return
correction_type = response["correction_type"]
if correction_type == "no_corrections":
return
elif correction_type in ["reorder", "reorder_first"]:
self.load_blocks(response)
self.handle_reorder(response["blocks"], page1)
# If we needed to reorder first, we will handle the rewriting next
if correction_type == "reorder_first":
self.process_rewriting(document, page1)
elif correction_type == "rewrite":
self.load_blocks(response)
self.handle_rewrites(response["blocks"], document)
else:
logger.warning(f"Unknown correction type: {correction_type}")
return
def load_blocks(self, response):
if isinstance(response["blocks"], str):
response["blocks"] = json.loads(response["blocks"])
def handle_reorder(self, blocks: list, page1: PageGroup):
unique_page_ids = set()
document_page_ids = [str(page1.page_id)]
document_pages = [page1]
for block_data in blocks:
try:
page_id, _, _ = block_data["id"].split("/")
unique_page_ids.add(page_id)
except Exception as e:
logger.debug(f"Error parsing block ID {block_data['id']}: {e}")
continue
if set(document_page_ids) != unique_page_ids:
logger.debug(
"Some page IDs in the response do not match the document's pages"
)
return
for page_id, document_page in zip(unique_page_ids, document_pages):
block_ids_for_page = []
for block_data in blocks:
try:
page_id, block_type, block_id = block_data["id"].split("/")
block_id = BlockId(
page_id=page_id,
block_id=block_id,
block_type=getattr(BlockTypes, block_type),
)
block_ids_for_page.append(block_id)
except Exception as e:
logger.debug(f"Error parsing block ID {block_data['id']}: {e}")
continue
# Both sides should have the same values, just be reordered
if not all(
[
block_id in document_page.structure
for block_id in block_ids_for_page
]
):
logger.debug(
f"Some blocks for page {page_id} not found in document"
)
continue
if not all(
[
block_id in block_ids_for_page
for block_id in document_page.structure
]
):
logger.debug(
f"Some blocks in document page {page_id} not found in response"
)
continue
# Swap the order of blocks in the document page
document_page.structure = block_ids_for_page
def handle_rewrites(self, blocks: list, document: Document):
for block_data in blocks:
try:
block_id = block_data["id"].strip().lstrip("/")
_, page_id, block_type, block_id = block_id.split("/")
block_id = BlockId(
page_id=page_id,
block_id=block_id,
block_type=getattr(BlockTypes, block_type),
)
block = document.get_block(block_id)
if not block:
logger.debug(f"Block {block_id} not found in document")
continue
if hasattr(block, "html"):
block.html = block_data["html"]
except Exception as e:
logger.debug(f"Error parsing block ID {block_data['id']}: {e}")
continue
def rewrite_blocks(self, document: Document):
if not self.block_correction_prompt:
return
# Don't show progress if there are no blocks to process
total_blocks = len(document.pages)
if total_blocks == 0:
return
pbar = tqdm(
total=max(1, total_blocks - 1),
desc=f"{self.__class__.__name__} running",
disable=self.disable_tqdm,
)
with ThreadPoolExecutor(max_workers=self.max_concurrency) as executor:
for future in as_completed(
[
executor.submit(self.process_rewriting, document, page)
for page in document.pages
]
):
future.result() # Raise exceptions if any occurred
pbar.update(1)
pbar.close()
class BlockSchema(BaseModel):
id: str
html: str
block_type: str
class PageSchema(BaseModel):
analysis: str
correction_type: str
blocks: List[BlockSchema]
| {
"repo_id": "datalab-to/marker",
"file_path": "marker/processors/llm/llm_page_correction.py",
"license": "GNU General Public License v3.0",
"lines": 266,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
datalab-to/marker:marker/processors/llm/llm_sectionheader.py | import json
from typing import List, Tuple
from tqdm import tqdm
from marker.logger import get_logger
from marker.processors.llm import BaseLLMComplexBlockProcessor
from marker.schema import BlockTypes
from marker.schema.blocks import Block
from marker.schema.document import Document
from marker.schema.groups import PageGroup
from pydantic import BaseModel
logger = get_logger()
class LLMSectionHeaderProcessor(BaseLLMComplexBlockProcessor):
page_prompt = """You're a text correction expert specializing in accurately analyzing complex PDF documents. You will be given a list of all of the section headers from a document, along with their page number and approximate dimensions. The headers will be formatted like below, and will be presented in order.
```json
[
{
"bbox": [x1, y1, x2, y2],
"width": x2 - x1,
"height": y2 - y1,
"page": 0,
"id": "/page/0/SectionHeader/1",
"html": "<h1>Introduction</h1>",
}, ...
]
```
Bboxes have been normalized to 0-1000.
Your goal is to make sure that the section headers have the correct levels (h1, h2, h3, h4, h5, or h6). If a section header does not have the right level, edit the html to fix it.
Guidelines:
- Edit the blocks to ensure that the section headers have the correct levels.
- Only edit the h1, h2, h3, h4, h5, and h6 tags. Do not change any other tags or content in the headers.
- Only output the headers that changed (if nothing changed, output nothing).
- Every header you output needs to have one and only one level tag (h1, h2, h3, h4, h5, or h6).
**Instructions:**
1. Carefully examine the provided section headers and JSON.
2. Identify any changes you'll need to make, and write a short analysis.
3. Output "no_corrections", or "corrections_needed", depending on whether you need to make changes.
4. If corrections are needed, output any blocks that need updates. Only output the block ids and html, like this:
```json
[
{
"id": "/page/0/SectionHeader/1",
"html": "<h2>Introduction</h2>"
},
...
]
```
**Example:**
Input:
Section Headers
```json
[
{
"bbox": [x1, y1, x2, y2],
"id": "/page/0/SectionHeader/1",
"page": 0,
"html": "1 Vector Operations",
},
{
"bbox": [x1, y1, x2, y2],
"id": "/page/0/SectionHeader/2",
"page": 0,
"html": "1.1 Vector Addition",
},
]
```
Output:
Analysis: The first section header is missing the h1 tag, and the second section header is missing the h2 tag.
```json
[
{
"id": "/page/0/SectionHeader/1",
"html": "<h1>1 Vector Operations</h1>"
},
{
"id": "/page/0/SectionHeader/2",
"html": "<h2>1.1 Vector Addition</h2>"
}
]
```
**Input:**
Section Headers
```json
{{section_header_json}}
```
"""
def get_selected_blocks(
self,
document: Document,
page: PageGroup,
) -> List[dict]:
selected_blocks = page.structure_blocks(document)
json_blocks = [
self.normalize_block_json(block, document, page, i)
for i, block in enumerate(selected_blocks)
]
return json_blocks
def process_rewriting(
self, document: Document, section_headers: List[Tuple[Block, dict]]
):
section_header_json = [sh[1] for sh in section_headers]
for item in section_header_json:
_, _, page_id, block_type, block_id = item["id"].split("/")
item["page"] = page_id
item["width"] = item["bbox"][2] - item["bbox"][0]
item["height"] = item["bbox"][3] - item["bbox"][1]
del item["block_type"] # Not needed, since they're all section headers
prompt = self.page_prompt.replace(
"{{section_header_json}}", json.dumps(section_header_json)
)
response = self.llm_service(
prompt, None, document.pages[0], SectionHeaderSchema
)
logger.debug(f"Got section header reponse from LLM: {response}")
if not response or "correction_type" not in response:
logger.warning("LLM did not return a valid response")
return
correction_type = response["correction_type"]
if correction_type == "no_corrections":
return
self.load_blocks(response)
self.handle_rewrites(response["blocks"], document)
def load_blocks(self, response):
if isinstance(response["blocks"], str):
response["blocks"] = json.loads(response["blocks"])
def rewrite_blocks(self, document: Document):
# Don't show progress if there are no blocks to process
section_headers = [
(block, self.normalize_block_json(block, document, page))
for page in document.pages
for block in page.structure_blocks(document)
if block.block_type == BlockTypes.SectionHeader
]
if len(section_headers) == 0:
return
pbar = tqdm(
total=1,
desc=f"Running {self.__class__.__name__}",
disable=self.disable_tqdm,
)
self.process_rewriting(document, section_headers)
pbar.update(1)
pbar.close()
class BlockSchema(BaseModel):
id: str
html: str
class SectionHeaderSchema(BaseModel):
analysis: str
correction_type: str
blocks: List[BlockSchema]
| {
"repo_id": "datalab-to/marker",
"file_path": "marker/processors/llm/llm_sectionheader.py",
"license": "GNU General Public License v3.0",
"lines": 149,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
datalab-to/marker:marker/renderers/chunk.py | import html
from typing import List, Dict
from bs4 import BeautifulSoup
from pydantic import BaseModel
from marker.renderers.json import JSONRenderer, JSONBlockOutput
from marker.schema.document import Document
class FlatBlockOutput(BaseModel):
id: str
block_type: str
html: str
page: int
polygon: List[List[float]]
bbox: List[float]
section_hierarchy: Dict[int, str] | None = None
images: dict | None = None
class ChunkOutput(BaseModel):
blocks: List[FlatBlockOutput]
page_info: Dict[int, dict]
metadata: dict
def collect_images(block: JSONBlockOutput) -> dict[str, str]:
if not getattr(block, "children", None):
return block.images or {}
else:
images = block.images or {}
for child_block in block.children:
images.update(collect_images(child_block))
return images
def assemble_html_with_images(block: JSONBlockOutput, image_blocks: set[str]) -> str:
if not getattr(block, "children", None):
if block.block_type in image_blocks:
return f"<p>{block.html}<img src='{block.id}'></p>"
else:
return block.html
child_html = [assemble_html_with_images(child, image_blocks) for child in block.children]
child_ids = [child.id for child in block.children]
soup = BeautifulSoup(block.html, "html.parser")
content_refs = soup.find_all("content-ref")
for ref in content_refs:
src_id = ref.attrs["src"]
if src_id in child_ids:
ref.replace_with(child_html[child_ids.index(src_id)])
return html.unescape(str(soup))
def json_to_chunks(
block: JSONBlockOutput, image_blocks: set[str], page_id: int=0) -> FlatBlockOutput | List[FlatBlockOutput]:
if block.block_type == "Page":
children = block.children
page_id = int(block.id.split("/")[-1])
return [json_to_chunks(child, image_blocks, page_id=page_id) for child in children]
else:
return FlatBlockOutput(
id=block.id,
block_type=block.block_type,
html=assemble_html_with_images(block, image_blocks),
page=page_id,
polygon=block.polygon,
bbox=block.bbox,
section_hierarchy=block.section_hierarchy,
images=collect_images(block),
)
class ChunkRenderer(JSONRenderer):
def __call__(self, document: Document) -> ChunkOutput:
document_output = document.render(self.block_config)
json_output = []
for page_output in document_output.children:
json_output.append(self.extract_json(document, page_output))
# This will get the top-level blocks from every page
chunk_output = []
for item in json_output:
chunks = json_to_chunks(item, set([str(block) for block in self.image_blocks]))
chunk_output.extend(chunks)
page_info = {
page.page_id: {"bbox": page.polygon.bbox, "polygon": page.polygon.polygon}
for page in document.pages
}
return ChunkOutput(
blocks=chunk_output,
page_info=page_info,
metadata=self.generate_document_metadata(document, document_output),
)
| {
"repo_id": "datalab-to/marker",
"file_path": "marker/renderers/chunk.py",
"license": "GNU General Public License v3.0",
"lines": 79,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
datalab-to/marker:marker/utils/batch.py | from marker.utils.gpu import GPUManager
def get_batch_sizes_worker_counts(gpu_manager: GPUManager, peak_worker_vram: int):
vram = gpu_manager.get_gpu_vram()
workers = max(1, vram // peak_worker_vram)
if workers == 1:
return {}, workers
return {
"layout_batch_size": 12,
"detection_batch_size": 8,
"table_rec_batch_size": 12,
"ocr_error_batch_size": 12,
"recognition_batch_size": 64,
"equation_batch_size": 16,
"detector_postprocessing_cpu_workers": 2,
}, workers
| {
"repo_id": "datalab-to/marker",
"file_path": "marker/utils/batch.py",
"license": "GNU General Public License v3.0",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
datalab-to/marker:marker/utils/gpu.py | import os
import subprocess
import torch
from marker.logger import get_logger
from marker.settings import settings
logger = get_logger()
class GPUManager:
default_gpu_vram: int = 8
def __init__(self, device_idx: int):
self.device_idx = device_idx
self.original_compute_mode = None
self.mps_server_process = None
def __enter__(self):
if self.using_cuda():
self.start_mps_server()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.using_cuda():
self.cleanup()
@staticmethod
def using_cuda():
return "cuda" in settings.TORCH_DEVICE_MODEL
def check_cuda_available(self) -> bool:
if not torch.cuda.is_available():
return False
try:
subprocess.run(["nvidia-smi", "--version"], capture_output=True, check=True)
return True
except (subprocess.CalledProcessError, FileNotFoundError):
return False
def get_gpu_vram(self):
if not self.using_cuda():
return self.default_gpu_vram
try:
result = subprocess.run(
[
"nvidia-smi",
"--query-gpu=memory.total",
"--format=csv,noheader,nounits",
"-i",
str(self.device_idx),
],
capture_output=True,
text=True,
check=True,
)
vram_mb = int(result.stdout.strip())
vram_gb = int(vram_mb / 1024)
return vram_gb
except (subprocess.CalledProcessError, ValueError, FileNotFoundError):
return self.default_gpu_vram
def start_mps_server(self) -> bool:
if not self.check_cuda_available():
return False
try:
# Set MPS environment with chunk-specific directories
env = os.environ.copy()
pipe_dir = f"/tmp/nvidia-mps-{self.device_idx}"
log_dir = f"/tmp/nvidia-log-{self.device_idx}"
env["CUDA_MPS_PIPE_DIRECTORY"] = pipe_dir
env["CUDA_MPS_LOG_DIRECTORY"] = log_dir
# Create directories
os.makedirs(pipe_dir, exist_ok=True)
os.makedirs(log_dir, exist_ok=True)
# Start MPS control daemon
self.mps_server_process = subprocess.Popen(
["nvidia-cuda-mps-control", "-d"],
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
logger.info(f"Started NVIDIA MPS server for chunk {self.device_idx}")
return True
except (subprocess.CalledProcessError, FileNotFoundError) as e:
logger.warning(
f"Failed to start MPS server for chunk {self.device_idx}: {e}"
)
return False
def stop_mps_server(self) -> None:
try:
# Stop MPS server
env = os.environ.copy()
env["CUDA_MPS_PIPE_DIRECTORY"] = f"/tmp/nvidia-mps-{self.device_idx}"
env["CUDA_MPS_LOG_DIRECTORY"] = f"/tmp/nvidia-log-{self.device_idx}"
subprocess.run(
["nvidia-cuda-mps-control"],
input="quit\n",
text=True,
env=env,
timeout=10,
)
if self.mps_server_process:
self.mps_server_process.terminate()
try:
self.mps_server_process.wait(timeout=5)
except subprocess.TimeoutExpired:
self.mps_server_process.kill()
self.mps_server_process = None
logger.info(f"Stopped NVIDIA MPS server for chunk {self.device_idx}")
except Exception as e:
logger.warning(
f"Failed to stop MPS server for chunk {self.device_idx}: {e}"
)
def cleanup(self) -> None:
self.stop_mps_server()
| {
"repo_id": "datalab-to/marker",
"file_path": "marker/utils/gpu.py",
"license": "GNU General Public License v3.0",
"lines": 105,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
datalab-to/marker:tests/renderers/test_chunk_renderer.py | import pytest
from marker.renderers.chunk import ChunkRenderer
@pytest.mark.config({"page_range": [0]})
def test_chunk_renderer(pdf_document):
renderer = ChunkRenderer()
chunk_output = renderer(pdf_document)
blocks = chunk_output.blocks
page_info = chunk_output.page_info
assert len(blocks) == 14
assert blocks[0].block_type == "SectionHeader"
assert page_info[0]["bbox"] is not None
assert page_info[0]["polygon"] is not None
figure_groups = [block for block in blocks if block.block_type == "FigureGroup"]
figures = [block for block in blocks if block.block_type == "Figure"]
captions = [block for block in blocks if block.block_type == "Caption"]
assert len(figure_groups) == 1
assert len(figures) == 0
assert len(captions) == 0
figure_group = figure_groups[0]
assert figure_group.images is not None
assert len(figure_group.images) == 1
assert "<img src='/page/0/Figure/9'>" in figure_group.html | {
"repo_id": "datalab-to/marker",
"file_path": "tests/renderers/test_chunk_renderer.py",
"license": "GNU General Public License v3.0",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
datalab-to/marker:marker/processors/block_relabel.py | from copy import deepcopy
from typing import Annotated
from marker.processors import BaseProcessor
from marker.schema import BlockTypes
from marker.schema.blocks import BlockId
from marker.schema.document import Document
from marker.schema.registry import get_block_class
from marker.logger import get_logger
logger = get_logger()
class BlockRelabelProcessor(BaseProcessor):
"""
A processor to heuristically relabel blocks based on a confidence threshold.
Each rule in the relabel string maps an original block label to a new one
if the confidence exceeds a given threshold.
"""
block_relabel_str: Annotated[
str,
"Comma-separated relabeling rules in the format '<original_label>:<new_label>:<confidence_threshold>'.",
"Each rule defines how blocks of a certain type should be relabeled when the confidence exceeds the threshold.",
"Example: 'Table:Picture:0.85,Form:Picture:0.9'"
] = ""
def __init__(self, config=None):
super().__init__(config)
self.block_relabel_map = {}
if not self.block_relabel_str:
return
for i, block_config_str in enumerate(self.block_relabel_str.split(',')):
block_config_str = block_config_str.strip()
if not block_config_str:
continue # Skip empty segments
try:
parts = block_config_str.split(':')
if len(parts) != 3:
raise ValueError(f"Expected 3 parts, got {len(parts)}")
block_label, block_relabel, confidence_str = parts
confidence_thresh = float(confidence_str)
block_type = BlockTypes[block_label]
relabel_block_type = BlockTypes[block_relabel]
self.block_relabel_map[block_type] = (
confidence_thresh,
relabel_block_type
)
except Exception as e:
logger.warning(f"Failed to parse relabel rule '{block_config_str}' at index {i}: {e}. Expected format is <original_label>:<new_label>:<confidence_threshold>")
def __call__(self, document: Document):
if len(self.block_relabel_map) == 0:
return
for page in document.pages:
for block in page.structure_blocks(document):
if block.block_type not in self.block_relabel_map:
continue
block_id = BlockId(page_id=page.page_id, block_id=block.block_id, block_type=block.block_type)
confidence_thresh, relabel_block_type = self.block_relabel_map[block.block_type]
confidence = block.top_k.get(block.block_type)
if confidence > confidence_thresh:
logger.debug(f"Skipping relabel for {block_id}; Confidence: {confidence} > Confidence Threshold {confidence_thresh} for re-labelling")
continue
new_block_cls = get_block_class(relabel_block_type)
new_block = new_block_cls(
polygon=deepcopy(block.polygon),
page_id=block.page_id,
structure=deepcopy(block.structure),
text_extraction_method=block.text_extraction_method,
source="heuristics",
top_k=block.top_k,
metadata=block.metadata
)
page.replace_block(block, new_block)
logger.debug(f"Relabelled {block_id} to {relabel_block_type}") | {
"repo_id": "datalab-to/marker",
"file_path": "marker/processors/block_relabel.py",
"license": "GNU General Public License v3.0",
"lines": 69,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
datalab-to/marker:marker/converters/extraction.py | import re
from typing import Annotated
from marker.builders.document import DocumentBuilder
from marker.builders.line import LineBuilder
from marker.builders.ocr import OcrBuilder
from marker.builders.structure import StructureBuilder
from marker.converters.pdf import PdfConverter
from marker.extractors.document import DocumentExtractor
from marker.extractors.page import PageExtractor
from marker.providers.registry import provider_from_filepath
from marker.renderers.extraction import ExtractionRenderer, ExtractionOutput
from marker.renderers.markdown import MarkdownRenderer
from marker.logger import get_logger
logger = get_logger()
class ExtractionConverter(PdfConverter):
pattern: str = r"{\d+\}-{48}\n\n"
existing_markdown: Annotated[
str, "Markdown that was already converted for extraction."
] = None
def build_document(self, filepath: str):
provider_cls = provider_from_filepath(filepath)
layout_builder = self.resolve_dependencies(self.layout_builder_class)
line_builder = self.resolve_dependencies(LineBuilder)
ocr_builder = self.resolve_dependencies(OcrBuilder)
provider = provider_cls(filepath, self.config)
document = DocumentBuilder(self.config)(
provider, layout_builder, line_builder, ocr_builder
)
structure_builder_cls = self.resolve_dependencies(StructureBuilder)
structure_builder_cls(document)
for processor in self.processor_list:
processor(document)
return document, provider
def __call__(self, filepath: str) -> ExtractionOutput:
self.config["paginate_output"] = True # Ensure we can split the output properly
self.config["output_format"] = (
"markdown" # Output must be markdown for extraction
)
markdown = self.existing_markdown
if not markdown:
document, provider = self.build_document(filepath)
self.page_count = len(document.pages)
renderer = self.resolve_dependencies(MarkdownRenderer)
output = renderer(document)
markdown = output.markdown
output_pages = re.split(self.pattern, markdown)[1:] # Split output into pages
# This needs an LLM service for extraction, this sets it in the extractor
if self.artifact_dict.get("llm_service") is None:
self.artifact_dict["llm_service"] = self.resolve_dependencies(
self.default_llm_service
)
page_extractor = self.resolve_dependencies(PageExtractor)
document_extractor = self.resolve_dependencies(DocumentExtractor)
renderer = self.resolve_dependencies(ExtractionRenderer)
# Inference in parallel
notes = page_extractor(output_pages)
document_output = document_extractor(notes)
merged = renderer(document_output, markdown)
return merged
| {
"repo_id": "datalab-to/marker",
"file_path": "marker/converters/extraction.py",
"license": "GNU General Public License v3.0",
"lines": 59,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
datalab-to/marker:marker/extractors/page.py | import json
from concurrent.futures import ThreadPoolExecutor
from pydantic import BaseModel
from typing import Annotated, Optional, List
from tqdm import tqdm
from marker.extractors import BaseExtractor
from marker.logger import get_logger
logger = get_logger()
class PageExtractionSchema(BaseModel):
description: str
detailed_notes: str
class PageExtractor(BaseExtractor):
"""
An extractor that pulls data from a single page.
"""
extraction_page_chunk_size: Annotated[
int, "The number of pages to chunk together for extraction."
] = 3
page_schema: Annotated[
str,
"The JSON schema to be extracted from the page.",
] = ""
page_extraction_prompt = """You are an expert document analyst who reads documents and pulls data out in JSON format. You will receive the markdown representation of a document page, and a JSON schema that we want to extract from the document. Your task is to write detailed notes on this page, so that when you look at all your notes from across the document, you can fill in the schema.
Some notes:
- The schema may contain a single object to extract from the entire document, or an array of objects.
- The schema may contain nested objects, arrays, and other complex structures.
Some guidelines:
- Write very thorough notes, and include specific JSON snippets that can be extracted from the page.
- You may need information from prior or subsequent pages to fully fill in the schema, so make sure to write detailed notes that will let you join entities across pages later on.
- Estimate your confidence in the values you extract, so you can reconstruct the JSON later when you only have your notes.
- Some tables and other data structures may continue on a subsequent page, so make sure to store the positions that data comes from where appropriate.
**Instructions:**
1. Analyze the provided markdown representation of the page.
2. Analyze the JSON schema.
3. Write a short description of the fields in the schema, and the associated values in the markdown.
4. Write detailed notes on the page, including any values that can be extracted from the markdown. Include snippets of JSON that can be extracted from the page where possible.
**Example:**
Input:
Markdown
```markdown
| Make | Sales |
|--------|-------|
| Honda | 100 |
| Toyota | 200 |
```
Schema
```json
{'$defs': {'Cars': {'properties': {'make': {'title': 'Make', 'type': 'string'}, 'sales': {'title': 'Sales', 'type': 'integer'}, 'color': {'title': 'Color', 'type': 'string'}}, 'required': ['make', 'sales', 'color'], 'title': 'Cars', 'type': 'object'}}, 'properties': {'cars': {'items': {'$ref': '#/$defs/Cars'}, 'title': 'Cars', 'type': 'array'}}, 'required': ['cars'], 'title': 'CarsList', 'type': 'object'}
```
Output:
Description: The schema has a list of cars, each with a make, sales, and color. The image and markdown contain a table with 2 cars: Honda with 100 sales and Toyota with 200 sales. The color is not present in the table.
Detailed Notes: On this page, I see a table with car makes and sales. The makes are Honda and Toyota, with sales of 100 and 200 respectively. The color is not present in the table, so I will leave it blank in the JSON. That information may be present on another page. Some JSON snippets I may find useful later are:
```json
{
"make": "Honda",
"sales": 100,
}
```
```json
{
"make": "Toyota",
"sales": 200,
}
```
Honda is the first row in the table, and Toyota is the second row. Make is the first column, and sales is the second.
**Input:**
Markdown
```markdown
{{page_md}}
```
Schema
```json
{{schema}}
```
"""
def chunk_page_markdown(self, page_markdown: List[str]) -> List[str]:
"""
Chunk the page markdown into smaller pieces for processing.
"""
chunks = []
for i in range(0, len(page_markdown), self.extraction_page_chunk_size):
chunk = page_markdown[i : i + self.extraction_page_chunk_size]
chunks.append("\n\n".join(chunk))
return chunks
def inference_single_chunk(
self, page_markdown: str
) -> Optional[PageExtractionSchema]:
prompt = self.page_extraction_prompt.replace(
"{{page_md}}", page_markdown
).replace("{{schema}}", json.dumps(self.page_schema))
response = self.llm_service(prompt, None, None, PageExtractionSchema)
logger.debug(f"Page extraction response: {response}")
if not response or any(
[
key not in response
for key in [
"description",
"detailed_notes",
]
]
):
return None
return PageExtractionSchema(
description=response["description"],
detailed_notes=response["detailed_notes"],
)
def __call__(
self,
page_markdown: List[str],
**kwargs,
) -> List[PageExtractionSchema]:
if not self.page_schema:
raise ValueError(
"Page schema must be defined for structured extraction to work."
)
chunks = self.chunk_page_markdown(page_markdown)
results = []
pbar = tqdm(
desc="Running page extraction",
disable=self.disable_tqdm,
total=len(chunks),
)
with ThreadPoolExecutor(max_workers=self.max_concurrency) as executor:
for future in [
executor.submit(self.inference_single_chunk, chunk) for chunk in chunks
]:
results.append(future.result()) # Raise exceptions if any occurred
pbar.update(1)
pbar.close()
return results
| {
"repo_id": "datalab-to/marker",
"file_path": "marker/extractors/page.py",
"license": "GNU General Public License v3.0",
"lines": 130,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
datalab-to/marker:marker/renderers/extraction.py | from pydantic import BaseModel
from marker.extractors.document import DocumentExtractionSchema
from marker.renderers import BaseRenderer
class ExtractionOutput(BaseModel):
analysis: str
document_json: str
original_markdown: str
class ExtractionRenderer(BaseRenderer):
def __call__(
self, output: DocumentExtractionSchema, markdown: str
) -> ExtractionOutput:
# We definitely want to do more complex stuff here soon, so leave it in
return ExtractionOutput(
analysis=output.analysis,
document_json=output.document_json,
original_markdown=markdown,
)
| {
"repo_id": "datalab-to/marker",
"file_path": "marker/renderers/extraction.py",
"license": "GNU General Public License v3.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
datalab-to/marker:marker/scripts/common.py | import ast
import base64
import io
import re
import sys
from typing import Optional
from PIL import Image
import click
import pypdfium2
import streamlit as st
from pydantic import BaseModel
from streamlit.runtime.uploaded_file_manager import UploadedFile
from marker.config.parser import ConfigParser
from marker.config.printer import CustomClickPrinter
from marker.models import create_model_dict
from marker.settings import settings
@st.cache_data()
def parse_args():
# Use to grab common cli options
@ConfigParser.common_options
def options_func():
pass
def extract_click_params(decorated_function):
if hasattr(decorated_function, "__click_params__"):
return decorated_function.__click_params__
return []
cmd = CustomClickPrinter("Marker app.")
extracted_params = extract_click_params(options_func)
cmd.params.extend(extracted_params)
ctx = click.Context(cmd)
try:
cmd_args = sys.argv[1:]
cmd.parse_args(ctx, cmd_args)
return ctx.params
except click.exceptions.ClickException as e:
return {"error": str(e)}
@st.cache_resource()
def load_models():
return create_model_dict()
def open_pdf(pdf_file):
stream = io.BytesIO(pdf_file.getvalue())
return pypdfium2.PdfDocument(stream)
def img_to_html(img, img_alt):
img_bytes = io.BytesIO()
img.save(img_bytes, format=settings.OUTPUT_IMAGE_FORMAT)
img_bytes = img_bytes.getvalue()
encoded = base64.b64encode(img_bytes).decode()
img_html = f'<img src="data:image/{settings.OUTPUT_IMAGE_FORMAT.lower()};base64,{encoded}" alt="{img_alt}" style="max-width: 100%;">'
return img_html
@st.cache_data()
def get_page_image(pdf_file, page_num, dpi=96):
if "pdf" in pdf_file.type:
doc = open_pdf(pdf_file)
page = doc[page_num]
png_image = (
page.render(
scale=dpi / 72,
)
.to_pil()
.convert("RGB")
)
else:
png_image = Image.open(pdf_file).convert("RGB")
return png_image
@st.cache_data()
def page_count(pdf_file: UploadedFile):
if "pdf" in pdf_file.type:
doc = open_pdf(pdf_file)
return len(doc) - 1
else:
return 1
def pillow_image_to_base64_string(img: Image) -> str:
buffered = io.BytesIO()
img.save(buffered, format="JPEG")
return base64.b64encode(buffered.getvalue()).decode("utf-8")
def extract_root_pydantic_class(schema_code: str) -> Optional[str]:
try:
# Parse the code into an AST
tree = ast.parse(schema_code)
# Find all class definitions that inherit from BaseModel
class_names = set()
class_info = {} # Store information about each class
for node in ast.walk(tree):
if isinstance(node, ast.ClassDef):
# Check if this class inherits from BaseModel
is_pydantic = False
for base in node.bases:
if isinstance(base, ast.Name) and base.id == "BaseModel":
is_pydantic = True
break
if is_pydantic:
class_names.add(node.name)
class_info[node.name] = {
"references": set(), # Classes this class references
"fields": [], # Field names in this class
}
# Extract field information
for item in node.body:
if isinstance(item, ast.AnnAssign) and isinstance(
item.target, ast.Name
):
field_name = item.target.id
class_info[node.name]["fields"].append(field_name)
# Check if this field references another class
annotation_str = ast.unparse(item.annotation)
# Look for List[ClassName], Optional[ClassName], Dict[Any, ClassName], etc.
for other_class in class_names:
pattern = rf"(?:List|Dict|Set|Tuple|Optional|Union)?\[.*{other_class}.*\]|{other_class}"
if re.search(pattern, annotation_str):
class_info[node.name]["references"].add(other_class)
if len(class_names) == 1:
return list(class_names)[0]
referenced_classes = set()
for class_name, info in class_info.items():
referenced_classes.update(info["references"])
# Find classes that reference others but aren't referenced themselves (potential roots)
root_candidates = set()
for class_name, info in class_info.items():
if info["references"] and class_name not in referenced_classes:
root_candidates.add(class_name)
# If we found exactly one root candidate, return it
if len(root_candidates) == 1:
return list(root_candidates)[0]
return None
except Exception as e:
print(f"Error parsing schema: {e}")
return None
def get_root_class(schema_code: str) -> Optional[BaseModel]:
root_class_name = extract_root_pydantic_class(schema_code)
if not root_class_name:
return None
if "from pydantic" not in schema_code:
schema_code = "from pydantic import BaseModel\n" + schema_code
if "from typing" not in schema_code:
schema_code = (
"from typing import List, Dict, Optional, Set, Tuple, Union, Any\n\n"
+ schema_code
)
# Execute the code in a new namespace
namespace = {}
exec(schema_code, namespace)
# Return the root class object
return namespace.get(root_class_name)
| {
"repo_id": "datalab-to/marker",
"file_path": "marker/scripts/common.py",
"license": "GNU General Public License v3.0",
"lines": 143,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
datalab-to/marker:marker/scripts/extraction_app.py | import json
import os
from streamlit_ace import st_ace
from pydantic import BaseModel
from marker.converters.extraction import ExtractionConverter
from marker.scripts.common import (
parse_args,
load_models,
get_page_image,
page_count,
get_root_class,
)
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
os.environ["IN_STREAMLIT"] = "true"
from streamlit.runtime.uploaded_file_manager import UploadedFile
import tempfile
from typing import Any, Dict
import streamlit as st
from marker.config.parser import ConfigParser
def extract_data(
fname: str, config: dict, schema: str, markdown: str | None = None
) -> (str, Dict[str, Any], dict):
config["pdftext_workers"] = 1
config["page_schema"] = schema
config["existing_markdown"] = markdown
config_parser = ConfigParser(config)
config_dict = config_parser.generate_config_dict()
converter_cls = ExtractionConverter
converter = converter_cls(
config=config_dict,
artifact_dict=model_dict,
processor_list=config_parser.get_processors(),
renderer=config_parser.get_renderer(),
llm_service=config_parser.get_llm_service(),
)
return converter(fname)
st.set_page_config(layout="wide")
col1, col2 = st.columns([0.5, 0.5])
model_dict = load_models()
cli_options = parse_args()
st.markdown("""
# Marker Extraction Demo
This app will let you use marker to do structured extraction.
Warning: This can execute untrusted code entered into the schema panel.
""")
in_file: UploadedFile = st.sidebar.file_uploader(
"PDF, document, or image file:",
type=["pdf", "png", "jpg", "jpeg", "gif", "pptx", "docx", "xlsx", "html", "epub"],
)
# Initialize session state variables
if "rendered_pydantic_schema" not in st.session_state:
st.session_state.rendered_pydantic_schema = ""
if "markdown" not in st.session_state:
st.session_state.markdown = ""
if "current_file_id" not in st.session_state:
st.session_state.current_file_id = None
# Detect file changes and clear markdown when new file is uploaded
if in_file is not None:
# Create a unique identifier for the current file
current_file_id = f"{in_file.name}_{in_file.size}_{hash(in_file.getvalue())}"
# Check if this is a new file
if st.session_state.current_file_id != current_file_id:
st.session_state.current_file_id = current_file_id
st.session_state.markdown = "" # Clear markdown for new file
else:
# No file uploaded, clear the current file ID
if st.session_state.current_file_id is not None:
st.session_state.current_file_id = None
st.session_state.markdown = "" # Clear markdown when no file
st.session_state.rendered_pydantic_schema = ""
if in_file is None:
st.stop()
filetype = in_file.type
with col1:
page_count = page_count(in_file)
page_number = st.number_input(
f"Page number out of {page_count}:", min_value=0, value=0, max_value=page_count
)
pil_image = get_page_image(in_file, page_number)
st.image(pil_image, use_container_width=True)
with col2:
tab1, tab2 = st.tabs(["JSON Schema", "Pydantic Schema"])
# Initialize schema variable
schema = None
with tab1:
st.write("Enter an existing JSON schema here:")
default_json_value = (
st.session_state.rendered_pydantic_schema
if st.session_state.rendered_pydantic_schema
else ""
)
json_schema_input = st.text_area(
"JSON Schema",
value=default_json_value,
height=300,
placeholder='{"type": "object", "properties": {"name": {"type": "string"}, "age": {"type": "integer"}}}',
key="json_schema_input",
label_visibility="collapsed",
)
# Set schema if JSON input is provided
if json_schema_input and json_schema_input.strip():
try:
# Validate JSON
json.loads(json_schema_input)
schema = json_schema_input.strip()
st.success("✅ Valid JSON schema detected")
except json.JSONDecodeError as e:
st.error(f"❌ Invalid JSON: {e}")
schema = None
with tab2:
st.write("Enter pydantic schema here:")
pydantic_schema_input = st_ace(
value="""from pydantic import BaseModel
class Schema(BaseModel):
# Add your fields here
# Example:
name: str
age: int
# email: str
pass""",
language="python",
height=300,
key="pydantic_editor",
)
render_schema = st.button("🔄 Render Pydantic schema to JSON")
if render_schema and pydantic_schema_input:
try:
pydantic_root: BaseModel = get_root_class(pydantic_schema_input)
json_schema = pydantic_root.model_json_schema()
schema = json.dumps(json_schema, indent=2)
st.success("✅ Schema rendered successfully!")
st.json(json_schema)
st.session_state.rendered_pydantic_schema = schema
except Exception as e:
st.error(f"❌ Could not parse your schema: {e}")
schema = None
elif (
pydantic_schema_input
and pydantic_schema_input.strip()
and not render_schema
):
# If there's Pydantic code but not rendered yet, show a message
if (
"class Schema(BaseModel):" in pydantic_schema_input
and "pass" not in pydantic_schema_input
):
st.info(
"💡 Click 'Render Pydantic schema to JSON' to convert your Pydantic model to JSON schema"
)
# Move the run logic outside of col2
run_marker = st.sidebar.button("Run Extraction")
use_llm = st.sidebar.checkbox(
"Use LLM", help="Use LLM for higher quality text", value=False
)
force_ocr = st.sidebar.checkbox("Force OCR", help="Force OCR on all pages", value=False)
strip_existing_ocr = st.sidebar.checkbox(
"Strip existing OCR",
help="Strip existing OCR text from the PDF and re-OCR.",
value=False,
)
# Check if schema is provided before running
if run_marker:
if not schema:
st.error(
"❌ Please provide a schema in either the JSON Schema or Pydantic Schema tab before running extraction."
)
st.stop()
# Run Marker
with tempfile.TemporaryDirectory() as tmp_dir:
temp_pdf = os.path.join(tmp_dir, "temp.pdf")
with open(temp_pdf, "wb") as f:
f.write(in_file.getvalue())
cli_options.update(
{
"force_ocr": force_ocr,
"use_llm": use_llm,
"strip_existing_ocr": strip_existing_ocr,
}
)
try:
rendered = extract_data(
temp_pdf, cli_options, schema, st.session_state.markdown
)
with col2:
st.write("## Output JSON")
st.json(rendered.model_dump(exclude=["original_markdown"]))
st.session_state.markdown = rendered.original_markdown
except Exception as e:
st.error(f"❌ Extraction failed: {e}")
else:
# Show instruction when not running
if not schema:
st.info("📝 Please provide a schema and click 'Run Extraction' to begin.")
| {
"repo_id": "datalab-to/marker",
"file_path": "marker/scripts/extraction_app.py",
"license": "GNU General Public License v3.0",
"lines": 193,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
datalab-to/marker:tests/builders/test_ocr_builder.py | from PIL import Image
from marker.builders.ocr import OcrBuilder
def test_blank_char_builder(recognition_model):
builder = OcrBuilder(recognition_model)
image = Image.new("RGB", (100, 100))
spans = builder.spans_from_html_chars([], None, image) # Test with empty char list
assert len(spans) == 0
| {
"repo_id": "datalab-to/marker",
"file_path": "tests/builders/test_ocr_builder.py",
"license": "GNU General Public License v3.0",
"lines": 7,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
datalab-to/marker:tests/converters/test_extraction_converter.py | import json
import pytest
from marker.converters.extraction import ExtractionConverter
from marker.extractors.page import PageExtractionSchema
from marker.extractors.document import DocumentExtractionSchema
from marker.services import BaseService
class MockLLMService(BaseService):
def __call__(self, prompt, image=None, page=None, response_schema=None, **kwargs):
if response_schema == PageExtractionSchema:
return {
"description": "Mock extraction description",
"detailed_notes": "Mock detailed notes for page extraction",
}
elif response_schema == DocumentExtractionSchema:
return {
"analysis": "Mock document analysis",
"document_json": json.dumps({"test_key": "test_value"}),
}
return {}
@pytest.fixture
def mock_llm_service():
return MockLLMService
@pytest.fixture
def extraction_converter(config, model_dict, mock_llm_service):
test_schema = {
"title": "TestSchema",
"type": "object",
"properties": {"test_key": {"title": "Test Key", "type": "string"}},
"required": ["test_key"],
}
config["page_schema"] = json.dumps(test_schema)
config["output_format"] = "markdown"
model_dict["llm_service"] = mock_llm_service
converter = ExtractionConverter(
artifact_dict=model_dict, processor_list=None, config=config
)
converter.llm_service = mock_llm_service
converter.default_llm_service = MockLLMService
return converter
@pytest.mark.config({"page_range": [0]})
def test_extraction_converter(config, model_dict, mock_llm_service, temp_doc):
config["page_schema"] = "invalid json"
model_dict["llm_service"] = mock_llm_service
converter = ExtractionConverter(
artifact_dict=model_dict, processor_list=None, config=config
)
converter.artifact_dict["llm_service"] = mock_llm_service()
results = converter(temp_doc.name)
assert results.document_json == '{"test_key": "test_value"}'
@pytest.mark.config({"page_range": [0, 1]})
def test_extraction_converter_multiple_pages(extraction_converter, temp_doc):
result = extraction_converter(temp_doc.name)
assert result is not None
assert result.document_json is not None
assert json.loads(result.document_json) == {"test_key": "test_value"}
assert result.analysis == "Mock document analysis"
| {
"repo_id": "datalab-to/marker",
"file_path": "tests/converters/test_extraction_converter.py",
"license": "GNU General Public License v3.0",
"lines": 56,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
davila7/claude-code-templates:cli-tool/components/hooks/automation/change-logger.py | #!/usr/bin/env python3
"""
Change Logger Hook
Logs every file mutation (Edit, Write, Bash) to a CSV file for demo prep and session review.
Output: .claude/critical_log_changes.csv
"""
import csv
import json
import os
import sys
from datetime import datetime
# Read-only commands that should not be logged
READONLY_COMMANDS = {
"cat", "head", "tail", "less", "more",
"ls", "dir", "tree", "pwd", "which", "where", "whereis",
"echo", "printf",
"grep", "rg", "find", "fd", "ag",
"git status", "git log", "git diff", "git show", "git branch",
"git remote", "git stash list", "git tag",
"node -e", "python -c", "ruby -e",
"type", "file", "wc", "du", "df",
}
CSV_PATH = ".claude/critical_log_changes.csv"
CSV_HEADERS = ["timestamp", "tool", "file_path", "action", "details"]
def is_readonly_command(command):
"""Check if a bash command is read-only and should be skipped."""
cmd_stripped = command.strip()
for ro_cmd in READONLY_COMMANDS:
if cmd_stripped.startswith(ro_cmd):
return True
return False
def log_change(tool_name, file_path, action, details=""):
"""Append a row to the CSV change log."""
os.makedirs(os.path.dirname(CSV_PATH), exist_ok=True)
write_header = not os.path.exists(CSV_PATH)
with open(CSV_PATH, "a", newline="", encoding="utf-8") as f:
writer = csv.writer(f, quoting=csv.QUOTE_MINIMAL)
if write_header:
writer.writerow(CSV_HEADERS)
writer.writerow([
datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
tool_name,
file_path,
action,
details[:200],
])
def main():
try:
data = json.load(sys.stdin)
except (json.JSONDecodeError, EOFError):
sys.exit(0)
tool_name = data.get("tool_name", "")
tool_input = data.get("tool_input", {})
if tool_name in ("Edit", "MultiEdit"):
file_path = tool_input.get("file_path", "unknown")
log_change(tool_name, file_path, "modified")
elif tool_name == "Write":
file_path = tool_input.get("file_path", "unknown")
log_change(tool_name, file_path, "created")
elif tool_name == "Bash":
command = tool_input.get("command", "")
if command and not is_readonly_command(command):
log_change(tool_name, "-", "executed", command[:200])
# Never block tool execution
sys.exit(0)
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/hooks/automation/change-logger.py",
"license": "MIT License",
"lines": 67,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/settings/statusline/deadline-countdown.py | #!/usr/bin/env python3
"""
Deadline Countdown Statusline
Shows git branch, changed files count, and countdown to deadline.
Color-coded urgency: green >2h, yellow 1-2h, red <1h, blinking <30min.
Configure with DEADLINE_TIME (HH:MM, default 15:30) env var.
"""
import json
import os
import subprocess
import sys
from datetime import datetime
def get_git_info():
"""Get git branch and change count for statusline."""
try:
subprocess.check_output(
["git", "rev-parse", "--git-dir"], stderr=subprocess.DEVNULL
)
branch = (
subprocess.check_output(
["git", "branch", "--show-current"], stderr=subprocess.DEVNULL
)
.decode()
.strip()
)
if not branch:
return ""
changes = (
subprocess.check_output(
["git", "status", "--porcelain"], stderr=subprocess.DEVNULL
)
.decode()
.splitlines()
)
change_count = len(changes)
if change_count > 0:
color = "\033[31m" # Red = dirty
suffix = f" ({change_count})"
else:
color = "\033[32m" # Green = clean
suffix = ""
return f"{color}\u00b7 {branch}{suffix}\033[0m"
except Exception:
return ""
def get_countdown():
"""Calculate countdown to deadline with color-coded urgency."""
deadline_str = os.environ.get("DEADLINE_TIME", "15:30")
try:
hour, minute = map(int, deadline_str.split(":"))
except (ValueError, AttributeError):
hour, minute = 15, 30
now = datetime.now()
deadline = now.replace(hour=hour, minute=minute, second=0, microsecond=0)
diff = deadline - now
total_seconds = int(diff.total_seconds())
reset = "\033[0m"
if total_seconds <= 0:
overtime_min = abs(total_seconds) // 60
return f"\033[31;5m OVERTIME +{overtime_min}m{reset}"
total_minutes = total_seconds // 60
hours = total_minutes // 60
minutes = total_minutes % 60
# Format time remaining
if hours > 0:
time_str = f"{hours}h {minutes}m"
else:
time_str = f"{minutes}m"
# Color coding based on urgency
if total_minutes > 120:
color = "\033[32m" # Green >2h
elif total_minutes > 60:
color = "\033[33m" # Yellow 1-2h
elif total_minutes > 30:
color = "\033[31m" # Red <1h
else:
color = "\033[31;5m" # Blinking red <30min
return f"{color} {time_str}{reset}"
def main():
try:
data = json.load(sys.stdin)
model_name = data.get("model", {}).get("display_name", "Claude")
git_info = get_git_info()
countdown = get_countdown()
sep = " \033[90m|\033[0m "
parts = [f"\033[94m[{model_name}]\033[0m"]
if git_info:
parts.append(git_info)
parts.append(countdown)
print(sep.join(parts))
except Exception as e:
print(f"\033[94m[Claude]\033[0m \033[31m[Error: {str(e)[:30]}]\033[0m")
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/settings/statusline/deadline-countdown.py",
"license": "MIT License",
"lines": 93,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/creative-design/imagegen/scripts/image_gen.py | #!/usr/bin/env python3
"""Generate or edit images with the OpenAI Image API.
Defaults to gpt-image-1.5 and a structured prompt augmentation workflow.
"""
from __future__ import annotations
import argparse
import asyncio
import base64
import json
import os
from pathlib import Path
import re
import sys
import time
from typing import Any, Dict, Iterable, List, Optional, Tuple
from io import BytesIO
DEFAULT_MODEL = "gpt-image-1.5"
DEFAULT_SIZE = "1024x1024"
DEFAULT_QUALITY = "auto"
DEFAULT_OUTPUT_FORMAT = "png"
DEFAULT_CONCURRENCY = 5
DEFAULT_DOWNSCALE_SUFFIX = "-web"
ALLOWED_SIZES = {"1024x1024", "1536x1024", "1024x1536", "auto"}
ALLOWED_QUALITIES = {"low", "medium", "high", "auto"}
ALLOWED_BACKGROUNDS = {"transparent", "opaque", "auto", None}
MAX_IMAGE_BYTES = 50 * 1024 * 1024
MAX_BATCH_JOBS = 500
def _die(message: str, code: int = 1) -> None:
print(f"Error: {message}", file=sys.stderr)
raise SystemExit(code)
def _warn(message: str) -> None:
print(f"Warning: {message}", file=sys.stderr)
def _ensure_api_key(dry_run: bool) -> None:
if os.getenv("OPENAI_API_KEY"):
print("OPENAI_API_KEY is set.", file=sys.stderr)
return
if dry_run:
_warn("OPENAI_API_KEY is not set; dry-run only.")
return
_die("OPENAI_API_KEY is not set. Export it before running.")
def _read_prompt(prompt: Optional[str], prompt_file: Optional[str]) -> str:
if prompt and prompt_file:
_die("Use --prompt or --prompt-file, not both.")
if prompt_file:
path = Path(prompt_file)
if not path.exists():
_die(f"Prompt file not found: {path}")
return path.read_text(encoding="utf-8").strip()
if prompt:
return prompt.strip()
_die("Missing prompt. Use --prompt or --prompt-file.")
return "" # unreachable
def _check_image_paths(paths: Iterable[str]) -> List[Path]:
resolved: List[Path] = []
for raw in paths:
path = Path(raw)
if not path.exists():
_die(f"Image file not found: {path}")
if path.stat().st_size > MAX_IMAGE_BYTES:
_warn(f"Image exceeds 50MB limit: {path}")
resolved.append(path)
return resolved
def _normalize_output_format(fmt: Optional[str]) -> str:
if not fmt:
return DEFAULT_OUTPUT_FORMAT
fmt = fmt.lower()
if fmt not in {"png", "jpeg", "jpg", "webp"}:
_die("output-format must be png, jpeg, jpg, or webp.")
return "jpeg" if fmt == "jpg" else fmt
def _validate_size(size: str) -> None:
if size not in ALLOWED_SIZES:
_die(
"size must be one of 1024x1024, 1536x1024, 1024x1536, or auto for GPT image models."
)
def _validate_quality(quality: str) -> None:
if quality not in ALLOWED_QUALITIES:
_die("quality must be one of low, medium, high, or auto.")
def _validate_background(background: Optional[str]) -> None:
if background not in ALLOWED_BACKGROUNDS:
_die("background must be one of transparent, opaque, or auto.")
def _validate_transparency(background: Optional[str], output_format: str) -> None:
if background == "transparent" and output_format not in {"png", "webp"}:
_die("transparent background requires output-format png or webp.")
def _validate_generate_payload(payload: Dict[str, Any]) -> None:
n = int(payload.get("n", 1))
if n < 1 or n > 10:
_die("n must be between 1 and 10")
size = str(payload.get("size", DEFAULT_SIZE))
quality = str(payload.get("quality", DEFAULT_QUALITY))
background = payload.get("background")
_validate_size(size)
_validate_quality(quality)
_validate_background(background)
oc = payload.get("output_compression")
if oc is not None and not (0 <= int(oc) <= 100):
_die("output_compression must be between 0 and 100")
def _build_output_paths(
out: str,
output_format: str,
count: int,
out_dir: Optional[str],
) -> List[Path]:
ext = "." + output_format
if out_dir:
out_base = Path(out_dir)
out_base.mkdir(parents=True, exist_ok=True)
return [out_base / f"image_{i}{ext}" for i in range(1, count + 1)]
out_path = Path(out)
if out_path.exists() and out_path.is_dir():
out_path.mkdir(parents=True, exist_ok=True)
return [out_path / f"image_{i}{ext}" for i in range(1, count + 1)]
if out_path.suffix == "":
out_path = out_path.with_suffix(ext)
elif output_format and out_path.suffix.lstrip(".").lower() != output_format:
_warn(
f"Output extension {out_path.suffix} does not match output-format {output_format}."
)
if count == 1:
return [out_path]
return [
out_path.with_name(f"{out_path.stem}-{i}{out_path.suffix}")
for i in range(1, count + 1)
]
def _augment_prompt(args: argparse.Namespace, prompt: str) -> str:
fields = _fields_from_args(args)
return _augment_prompt_fields(args.augment, prompt, fields)
def _augment_prompt_fields(augment: bool, prompt: str, fields: Dict[str, Optional[str]]) -> str:
if not augment:
return prompt
sections: List[str] = []
if fields.get("use_case"):
sections.append(f"Use case: {fields['use_case']}")
sections.append(f"Primary request: {prompt}")
if fields.get("scene"):
sections.append(f"Scene/background: {fields['scene']}")
if fields.get("subject"):
sections.append(f"Subject: {fields['subject']}")
if fields.get("style"):
sections.append(f"Style/medium: {fields['style']}")
if fields.get("composition"):
sections.append(f"Composition/framing: {fields['composition']}")
if fields.get("lighting"):
sections.append(f"Lighting/mood: {fields['lighting']}")
if fields.get("palette"):
sections.append(f"Color palette: {fields['palette']}")
if fields.get("materials"):
sections.append(f"Materials/textures: {fields['materials']}")
if fields.get("text"):
sections.append(f"Text (verbatim): \"{fields['text']}\"")
if fields.get("constraints"):
sections.append(f"Constraints: {fields['constraints']}")
if fields.get("negative"):
sections.append(f"Avoid: {fields['negative']}")
return "\n".join(sections)
def _fields_from_args(args: argparse.Namespace) -> Dict[str, Optional[str]]:
return {
"use_case": getattr(args, "use_case", None),
"scene": getattr(args, "scene", None),
"subject": getattr(args, "subject", None),
"style": getattr(args, "style", None),
"composition": getattr(args, "composition", None),
"lighting": getattr(args, "lighting", None),
"palette": getattr(args, "palette", None),
"materials": getattr(args, "materials", None),
"text": getattr(args, "text", None),
"constraints": getattr(args, "constraints", None),
"negative": getattr(args, "negative", None),
}
def _print_request(payload: dict) -> None:
print(json.dumps(payload, indent=2, sort_keys=True))
def _decode_and_write(images: List[str], outputs: List[Path], force: bool) -> None:
for idx, image_b64 in enumerate(images):
if idx >= len(outputs):
break
out_path = outputs[idx]
if out_path.exists() and not force:
_die(f"Output already exists: {out_path} (use --force to overwrite)")
out_path.parent.mkdir(parents=True, exist_ok=True)
out_path.write_bytes(base64.b64decode(image_b64))
print(f"Wrote {out_path}")
def _derive_downscale_path(path: Path, suffix: str) -> Path:
if suffix and not suffix.startswith("-") and not suffix.startswith("_"):
suffix = "-" + suffix
return path.with_name(f"{path.stem}{suffix}{path.suffix}")
def _downscale_image_bytes(image_bytes: bytes, *, max_dim: int, output_format: str) -> bytes:
try:
from PIL import Image
except Exception:
_die(
"Downscaling requires Pillow. Install with `uv pip install pillow` (then re-run)."
)
if max_dim < 1:
_die("--downscale-max-dim must be >= 1")
with Image.open(BytesIO(image_bytes)) as img:
img.load()
w, h = img.size
scale = min(1.0, float(max_dim) / float(max(w, h)))
target = (max(1, int(round(w * scale))), max(1, int(round(h * scale))))
resized = img if target == (w, h) else img.resize(target, Image.Resampling.LANCZOS)
fmt = output_format.lower()
if fmt == "jpg":
fmt = "jpeg"
if fmt == "jpeg":
if resized.mode in ("RGBA", "LA") or ("transparency" in getattr(resized, "info", {})):
bg = Image.new("RGB", resized.size, (255, 255, 255))
bg.paste(resized.convert("RGBA"), mask=resized.convert("RGBA").split()[-1])
resized = bg
else:
resized = resized.convert("RGB")
out = BytesIO()
resized.save(out, format=fmt.upper())
return out.getvalue()
def _decode_write_and_downscale(
images: List[str],
outputs: List[Path],
*,
force: bool,
downscale_max_dim: Optional[int],
downscale_suffix: str,
output_format: str,
) -> None:
for idx, image_b64 in enumerate(images):
if idx >= len(outputs):
break
out_path = outputs[idx]
if out_path.exists() and not force:
_die(f"Output already exists: {out_path} (use --force to overwrite)")
out_path.parent.mkdir(parents=True, exist_ok=True)
raw = base64.b64decode(image_b64)
out_path.write_bytes(raw)
print(f"Wrote {out_path}")
if downscale_max_dim is None:
continue
derived = _derive_downscale_path(out_path, downscale_suffix)
if derived.exists() and not force:
_die(f"Output already exists: {derived} (use --force to overwrite)")
derived.parent.mkdir(parents=True, exist_ok=True)
resized = _downscale_image_bytes(raw, max_dim=downscale_max_dim, output_format=output_format)
derived.write_bytes(resized)
print(f"Wrote {derived}")
def _create_client():
try:
from openai import OpenAI
except ImportError as exc:
_die("openai SDK not installed. Install with `uv pip install openai`.")
return OpenAI()
def _create_async_client():
try:
from openai import AsyncOpenAI
except ImportError:
try:
import openai as _openai # noqa: F401
except ImportError:
_die("openai SDK not installed. Install with `uv pip install openai`.")
_die(
"AsyncOpenAI not available in this openai SDK version. Upgrade with `uv pip install -U openai`."
)
return AsyncOpenAI()
def _slugify(value: str) -> str:
value = value.strip().lower()
value = re.sub(r"[^a-z0-9]+", "-", value)
value = re.sub(r"-{2,}", "-", value).strip("-")
return value[:60] if value else "job"
def _normalize_job(job: Any, idx: int) -> Dict[str, Any]:
if isinstance(job, str):
prompt = job.strip()
if not prompt:
_die(f"Empty prompt at job {idx}")
return {"prompt": prompt}
if isinstance(job, dict):
if "prompt" not in job or not str(job["prompt"]).strip():
_die(f"Missing prompt for job {idx}")
return job
_die(f"Invalid job at index {idx}: expected string or object.")
return {} # unreachable
def _read_jobs_jsonl(path: str) -> List[Dict[str, Any]]:
p = Path(path)
if not p.exists():
_die(f"Input file not found: {p}")
jobs: List[Dict[str, Any]] = []
for line_no, raw in enumerate(p.read_text(encoding="utf-8").splitlines(), start=1):
line = raw.strip()
if not line or line.startswith("#"):
continue
try:
item: Any
if line.startswith("{"):
item = json.loads(line)
else:
item = line
jobs.append(_normalize_job(item, idx=line_no))
except json.JSONDecodeError as exc:
_die(f"Invalid JSON on line {line_no}: {exc}")
if not jobs:
_die("No jobs found in input file.")
if len(jobs) > MAX_BATCH_JOBS:
_die(f"Too many jobs ({len(jobs)}). Max is {MAX_BATCH_JOBS}.")
return jobs
def _merge_non_null(dst: Dict[str, Any], src: Dict[str, Any]) -> Dict[str, Any]:
merged = dict(dst)
for k, v in src.items():
if v is not None:
merged[k] = v
return merged
def _job_output_paths(
*,
out_dir: Path,
output_format: str,
idx: int,
prompt: str,
n: int,
explicit_out: Optional[str],
) -> List[Path]:
out_dir.mkdir(parents=True, exist_ok=True)
ext = "." + output_format
if explicit_out:
base = Path(explicit_out)
if base.suffix == "":
base = base.with_suffix(ext)
elif base.suffix.lstrip(".").lower() != output_format:
_warn(
f"Job {idx}: output extension {base.suffix} does not match output-format {output_format}."
)
base = out_dir / base.name
else:
slug = _slugify(prompt[:80])
base = out_dir / f"{idx:03d}-{slug}{ext}"
if n == 1:
return [base]
return [
base.with_name(f"{base.stem}-{i}{base.suffix}")
for i in range(1, n + 1)
]
def _extract_retry_after_seconds(exc: Exception) -> Optional[float]:
# Best-effort: openai SDK errors vary by version. Prefer a conservative fallback.
for attr in ("retry_after", "retry_after_seconds"):
val = getattr(exc, attr, None)
if isinstance(val, (int, float)) and val >= 0:
return float(val)
msg = str(exc)
m = re.search(r"retry[- ]after[:= ]+([0-9]+(?:\\.[0-9]+)?)", msg, re.IGNORECASE)
if m:
try:
return float(m.group(1))
except Exception:
return None
return None
def _is_rate_limit_error(exc: Exception) -> bool:
name = exc.__class__.__name__.lower()
if "ratelimit" in name or "rate_limit" in name:
return True
msg = str(exc).lower()
return "429" in msg or "rate limit" in msg or "too many requests" in msg
def _is_transient_error(exc: Exception) -> bool:
if _is_rate_limit_error(exc):
return True
name = exc.__class__.__name__.lower()
if "timeout" in name or "timedout" in name or "tempor" in name:
return True
msg = str(exc).lower()
return "timeout" in msg or "timed out" in msg or "connection reset" in msg
async def _generate_one_with_retries(
client: Any,
payload: Dict[str, Any],
*,
attempts: int,
job_label: str,
) -> Any:
last_exc: Optional[Exception] = None
for attempt in range(1, attempts + 1):
try:
return await client.images.generate(**payload)
except Exception as exc:
last_exc = exc
if not _is_transient_error(exc):
raise
if attempt == attempts:
raise
sleep_s = _extract_retry_after_seconds(exc)
if sleep_s is None:
sleep_s = min(60.0, 2.0**attempt)
print(
f"{job_label} attempt {attempt}/{attempts} failed ({exc.__class__.__name__}); retrying in {sleep_s:.1f}s",
file=sys.stderr,
)
await asyncio.sleep(sleep_s)
raise last_exc or RuntimeError("unknown error")
async def _run_generate_batch(args: argparse.Namespace) -> int:
jobs = _read_jobs_jsonl(args.input)
out_dir = Path(args.out_dir)
base_fields = _fields_from_args(args)
base_payload = {
"model": args.model,
"n": args.n,
"size": args.size,
"quality": args.quality,
"background": args.background,
"output_format": args.output_format,
"output_compression": args.output_compression,
"moderation": args.moderation,
}
if args.dry_run:
for i, job in enumerate(jobs, start=1):
prompt = str(job["prompt"]).strip()
fields = _merge_non_null(base_fields, job.get("fields", {}))
# Allow flat job keys as well (use_case, scene, etc.)
fields = _merge_non_null(fields, {k: job.get(k) for k in base_fields.keys()})
augmented = _augment_prompt_fields(args.augment, prompt, fields)
job_payload = dict(base_payload)
job_payload["prompt"] = augmented
job_payload = _merge_non_null(job_payload, {k: job.get(k) for k in base_payload.keys()})
job_payload = {k: v for k, v in job_payload.items() if v is not None}
_validate_generate_payload(job_payload)
effective_output_format = _normalize_output_format(job_payload.get("output_format"))
_validate_transparency(job_payload.get("background"), effective_output_format)
if "output_format" in job_payload:
job_payload["output_format"] = effective_output_format
n = int(job_payload.get("n", 1))
outputs = _job_output_paths(
out_dir=out_dir,
output_format=effective_output_format,
idx=i,
prompt=prompt,
n=n,
explicit_out=job.get("out"),
)
downscaled = None
if args.downscale_max_dim is not None:
downscaled = [
str(_derive_downscale_path(p, args.downscale_suffix)) for p in outputs
]
_print_request(
{
"endpoint": "/v1/images/generations",
"job": i,
"outputs": [str(p) for p in outputs],
"outputs_downscaled": downscaled,
**job_payload,
}
)
return 0
client = _create_async_client()
sem = asyncio.Semaphore(args.concurrency)
any_failed = False
async def run_job(i: int, job: Dict[str, Any]) -> Tuple[int, Optional[str]]:
nonlocal any_failed
prompt = str(job["prompt"]).strip()
job_label = f"[job {i}/{len(jobs)}]"
fields = _merge_non_null(base_fields, job.get("fields", {}))
fields = _merge_non_null(fields, {k: job.get(k) for k in base_fields.keys()})
augmented = _augment_prompt_fields(args.augment, prompt, fields)
payload = dict(base_payload)
payload["prompt"] = augmented
payload = _merge_non_null(payload, {k: job.get(k) for k in base_payload.keys()})
payload = {k: v for k, v in payload.items() if v is not None}
n = int(payload.get("n", 1))
_validate_generate_payload(payload)
effective_output_format = _normalize_output_format(payload.get("output_format"))
_validate_transparency(payload.get("background"), effective_output_format)
if "output_format" in payload:
payload["output_format"] = effective_output_format
outputs = _job_output_paths(
out_dir=out_dir,
output_format=effective_output_format,
idx=i,
prompt=prompt,
n=n,
explicit_out=job.get("out"),
)
try:
async with sem:
print(f"{job_label} starting", file=sys.stderr)
started = time.time()
result = await _generate_one_with_retries(
client,
payload,
attempts=args.max_attempts,
job_label=job_label,
)
elapsed = time.time() - started
print(f"{job_label} completed in {elapsed:.1f}s", file=sys.stderr)
images = [item.b64_json for item in result.data]
_decode_write_and_downscale(
images,
outputs,
force=args.force,
downscale_max_dim=args.downscale_max_dim,
downscale_suffix=args.downscale_suffix,
output_format=effective_output_format,
)
return i, None
except Exception as exc:
any_failed = True
print(f"{job_label} failed: {exc}", file=sys.stderr)
if args.fail_fast:
raise
return i, str(exc)
tasks = [asyncio.create_task(run_job(i, job)) for i, job in enumerate(jobs, start=1)]
try:
await asyncio.gather(*tasks)
except Exception:
for t in tasks:
if not t.done():
t.cancel()
raise
return 1 if any_failed else 0
def _generate_batch(args: argparse.Namespace) -> None:
exit_code = asyncio.run(_run_generate_batch(args))
if exit_code:
raise SystemExit(exit_code)
def _generate(args: argparse.Namespace) -> None:
prompt = _read_prompt(args.prompt, args.prompt_file)
prompt = _augment_prompt(args, prompt)
payload = {
"model": args.model,
"prompt": prompt,
"n": args.n,
"size": args.size,
"quality": args.quality,
"background": args.background,
"output_format": args.output_format,
"output_compression": args.output_compression,
"moderation": args.moderation,
}
payload = {k: v for k, v in payload.items() if v is not None}
output_format = _normalize_output_format(args.output_format)
_validate_transparency(args.background, output_format)
if "output_format" in payload:
payload["output_format"] = output_format
output_paths = _build_output_paths(args.out, output_format, args.n, args.out_dir)
if args.dry_run:
_print_request({"endpoint": "/v1/images/generations", **payload})
return
print(
"Calling Image API (generation). This can take up to a couple of minutes.",
file=sys.stderr,
)
started = time.time()
client = _create_client()
result = client.images.generate(**payload)
elapsed = time.time() - started
print(f"Generation completed in {elapsed:.1f}s.", file=sys.stderr)
images = [item.b64_json for item in result.data]
_decode_write_and_downscale(
images,
output_paths,
force=args.force,
downscale_max_dim=args.downscale_max_dim,
downscale_suffix=args.downscale_suffix,
output_format=output_format,
)
def _edit(args: argparse.Namespace) -> None:
prompt = _read_prompt(args.prompt, args.prompt_file)
prompt = _augment_prompt(args, prompt)
image_paths = _check_image_paths(args.image)
mask_path = Path(args.mask) if args.mask else None
if mask_path:
if not mask_path.exists():
_die(f"Mask file not found: {mask_path}")
if mask_path.suffix.lower() != ".png":
_warn(f"Mask should be a PNG with an alpha channel: {mask_path}")
if mask_path.stat().st_size > MAX_IMAGE_BYTES:
_warn(f"Mask exceeds 50MB limit: {mask_path}")
payload = {
"model": args.model,
"prompt": prompt,
"n": args.n,
"size": args.size,
"quality": args.quality,
"background": args.background,
"output_format": args.output_format,
"output_compression": args.output_compression,
"input_fidelity": args.input_fidelity,
"moderation": args.moderation,
}
payload = {k: v for k, v in payload.items() if v is not None}
output_format = _normalize_output_format(args.output_format)
_validate_transparency(args.background, output_format)
if "output_format" in payload:
payload["output_format"] = output_format
output_paths = _build_output_paths(args.out, output_format, args.n, args.out_dir)
if args.dry_run:
payload_preview = dict(payload)
payload_preview["image"] = [str(p) for p in image_paths]
if mask_path:
payload_preview["mask"] = str(mask_path)
_print_request({"endpoint": "/v1/images/edits", **payload_preview})
return
print(
f"Calling Image API (edit) with {len(image_paths)} image(s).",
file=sys.stderr,
)
started = time.time()
client = _create_client()
with _open_files(image_paths) as image_files, _open_mask(mask_path) as mask_file:
request = dict(payload)
request["image"] = image_files if len(image_files) > 1 else image_files[0]
if mask_file is not None:
request["mask"] = mask_file
result = client.images.edit(**request)
elapsed = time.time() - started
print(f"Edit completed in {elapsed:.1f}s.", file=sys.stderr)
images = [item.b64_json for item in result.data]
_decode_write_and_downscale(
images,
output_paths,
force=args.force,
downscale_max_dim=args.downscale_max_dim,
downscale_suffix=args.downscale_suffix,
output_format=output_format,
)
def _open_files(paths: List[Path]):
return _FileBundle(paths)
def _open_mask(mask_path: Optional[Path]):
if mask_path is None:
return _NullContext()
return _SingleFile(mask_path)
class _NullContext:
def __enter__(self):
return None
def __exit__(self, exc_type, exc, tb):
return False
class _SingleFile:
def __init__(self, path: Path):
self._path = path
self._handle = None
def __enter__(self):
self._handle = self._path.open("rb")
return self._handle
def __exit__(self, exc_type, exc, tb):
if self._handle:
try:
self._handle.close()
except Exception:
pass
return False
class _FileBundle:
def __init__(self, paths: List[Path]):
self._paths = paths
self._handles: List[object] = []
def __enter__(self):
self._handles = [p.open("rb") for p in self._paths]
return self._handles
def __exit__(self, exc_type, exc, tb):
for handle in self._handles:
try:
handle.close()
except Exception:
pass
return False
def _add_shared_args(parser: argparse.ArgumentParser) -> None:
parser.add_argument("--model", default=DEFAULT_MODEL)
parser.add_argument("--prompt")
parser.add_argument("--prompt-file")
parser.add_argument("--n", type=int, default=1)
parser.add_argument("--size", default=DEFAULT_SIZE)
parser.add_argument("--quality", default=DEFAULT_QUALITY)
parser.add_argument("--background")
parser.add_argument("--output-format")
parser.add_argument("--output-compression", type=int)
parser.add_argument("--moderation")
parser.add_argument("--out", default="output.png")
parser.add_argument("--out-dir")
parser.add_argument("--force", action="store_true")
parser.add_argument("--dry-run", action="store_true")
parser.add_argument("--augment", dest="augment", action="store_true")
parser.add_argument("--no-augment", dest="augment", action="store_false")
parser.set_defaults(augment=True)
# Prompt augmentation hints
parser.add_argument("--use-case")
parser.add_argument("--scene")
parser.add_argument("--subject")
parser.add_argument("--style")
parser.add_argument("--composition")
parser.add_argument("--lighting")
parser.add_argument("--palette")
parser.add_argument("--materials")
parser.add_argument("--text")
parser.add_argument("--constraints")
parser.add_argument("--negative")
# Post-processing (optional): generate an additional downscaled copy for fast web loading.
parser.add_argument("--downscale-max-dim", type=int)
parser.add_argument("--downscale-suffix", default=DEFAULT_DOWNSCALE_SUFFIX)
def main() -> int:
parser = argparse.ArgumentParser(description="Generate or edit images via the Image API")
subparsers = parser.add_subparsers(dest="command", required=True)
gen_parser = subparsers.add_parser("generate", help="Create a new image")
_add_shared_args(gen_parser)
gen_parser.set_defaults(func=_generate)
batch_parser = subparsers.add_parser(
"generate-batch",
help="Generate multiple prompts concurrently (JSONL input)",
)
_add_shared_args(batch_parser)
batch_parser.add_argument("--input", required=True, help="Path to JSONL file (one job per line)")
batch_parser.add_argument("--concurrency", type=int, default=DEFAULT_CONCURRENCY)
batch_parser.add_argument("--max-attempts", type=int, default=3)
batch_parser.add_argument("--fail-fast", action="store_true")
batch_parser.set_defaults(func=_generate_batch)
edit_parser = subparsers.add_parser("edit", help="Edit an existing image")
_add_shared_args(edit_parser)
edit_parser.add_argument("--image", action="append", required=True)
edit_parser.add_argument("--mask")
edit_parser.add_argument("--input-fidelity")
edit_parser.set_defaults(func=_edit)
args = parser.parse_args()
if args.n < 1 or args.n > 10:
_die("--n must be between 1 and 10")
if getattr(args, "concurrency", 1) < 1 or getattr(args, "concurrency", 1) > 25:
_die("--concurrency must be between 1 and 25")
if getattr(args, "max_attempts", 3) < 1 or getattr(args, "max_attempts", 3) > 10:
_die("--max-attempts must be between 1 and 10")
if args.output_compression is not None and not (0 <= args.output_compression <= 100):
_die("--output-compression must be between 0 and 100")
if args.command == "generate-batch" and not args.out_dir:
_die("generate-batch requires --out-dir")
if getattr(args, "downscale_max_dim", None) is not None and args.downscale_max_dim < 1:
_die("--downscale-max-dim must be >= 1")
_validate_size(args.size)
_validate_quality(args.quality)
_validate_background(args.background)
_ensure_api_key(args.dry_run)
args.func(args)
return 0
if __name__ == "__main__":
raise SystemExit(main())
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/creative-design/imagegen/scripts/image_gen.py",
"license": "MIT License",
"lines": 726,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/development/jupyter-notebook/scripts/new_notebook.py | from __future__ import annotations
import argparse
import json
import re
from pathlib import Path
from typing import Any
def slugify(text: str) -> str:
lowered = text.strip().lower()
cleaned = re.sub(r"[^a-z0-9]+", "-", lowered)
collapsed = re.sub(r"-+", "-", cleaned).strip("-")
return collapsed or "notebook"
def find_repo_root(start: Path) -> Path:
for candidate in (start, *start.parents):
if (candidate / ".git").exists():
return candidate
return start
def load_template(skill_dir: Path, kind: str) -> dict[str, Any]:
asset_name = "experiment-template.ipynb" if kind == "experiment" else "tutorial-template.ipynb"
template_path = skill_dir / "assets" / asset_name
if not template_path.exists():
raise SystemExit(f"Missing template: {template_path}")
with template_path.open("r", encoding="utf-8") as f:
data = json.load(f)
if not isinstance(data, dict):
raise SystemExit(f"Unexpected template shape: {template_path}")
return data
def update_title(notebook: dict[str, Any], kind: str, title: str) -> None:
prefix = "Experiment" if kind == "experiment" else "Tutorial"
expected = f"# {prefix}: {title}\n"
cells = notebook.get("cells")
if not isinstance(cells, list) or not cells:
raise SystemExit("Template notebook has no cells")
first_cell = cells[0]
if not isinstance(first_cell, dict) or first_cell.get("cell_type") != "markdown":
raise SystemExit("Template notebook must start with a markdown title cell")
source = first_cell.get("source", [])
if isinstance(source, str):
source_lines = [source]
elif isinstance(source, list):
source_lines = [str(line) for line in source]
else:
source_lines = []
if source_lines:
source_lines[0] = expected
else:
source_lines = [expected]
first_cell["source"] = source_lines
metadata = notebook.setdefault("metadata", {})
if not isinstance(metadata, dict):
raise SystemExit("Notebook metadata must be a mapping")
language_info = metadata.setdefault("language_info", {})
if isinstance(language_info, dict):
language_info.setdefault("name", "python")
language_info.setdefault("version", "3.12")
def default_output(repo_root: Path, title: str) -> Path:
filename = f"{slugify(title)}.ipynb"
return repo_root / "output" / "jupyter-notebook" / filename
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Scaffold a Jupyter notebook for experiments or tutorials.")
parser.add_argument(
"--kind",
choices=["experiment", "tutorial"],
default="experiment",
help="Notebook style to scaffold (default: experiment).",
)
parser.add_argument(
"--title",
required=True,
help="Human-readable notebook title used in the first markdown cell.",
)
parser.add_argument(
"--out",
type=Path,
default=None,
help="Output path for the notebook. Defaults to output/jupyter-notebook/<slug>.ipynb.",
)
parser.add_argument(
"--force",
action="store_true",
help="Overwrite the output file if it already exists.",
)
return parser.parse_args()
def main() -> None:
args = parse_args()
script_path = Path(__file__).resolve()
skill_dir = script_path.parents[1]
repo_root = find_repo_root(skill_dir)
notebook = load_template(skill_dir, args.kind)
update_title(notebook, args.kind, args.title)
out_path = args.out or default_output(repo_root, args.title)
out_path = out_path.resolve()
if out_path.exists() and not args.force:
raise SystemExit(f"Refusing to overwrite existing file without --force: {out_path}")
out_path.parent.mkdir(parents=True, exist_ok=True)
with out_path.open("w", encoding="utf-8") as f:
json.dump(notebook, f, indent=2)
f.write("\n")
print(f"Wrote {out_path} using kind={args.kind}.")
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/development/jupyter-notebook/scripts/new_notebook.py",
"license": "MIT License",
"lines": 100,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/document-processing/doc/scripts/render_docx.py | import argparse
import os
import re
import subprocess
import tempfile
import xml.etree.ElementTree as ET
from os import makedirs, replace
from os.path import abspath, basename, exists, expanduser, join, splitext
from shutil import which
import sys
from typing import Sequence, cast
from zipfile import ZipFile
from pdf2image import convert_from_path, pdfinfo_from_path
TWIPS_PER_INCH: int = 1440
def ensure_system_tools() -> None:
missing: list[str] = []
for tool in ("soffice", "pdftoppm"):
if which(tool) is None:
missing.append(tool)
if missing:
tools = ", ".join(missing)
raise RuntimeError(
f"Missing required system tool(s): {tools}. Install LibreOffice and Poppler, then retry."
)
def calc_dpi_via_ooxml_docx(input_path: str, max_w_px: int, max_h_px: int) -> int:
"""Calculate DPI from OOXML `word/document.xml` page size (w:pgSz in twips).
DOCX stores page dimensions in section properties as twips (1/1440 inch).
We read the first encountered section's page size and compute an isotropic DPI
that fits within the target max pixel dimensions.
"""
with ZipFile(input_path, "r") as zf:
xml = zf.read("word/document.xml")
root = ET.fromstring(xml)
ns = {"w": "http://schemas.openxmlformats.org/wordprocessingml/2006/main"}
# Common placements: w:body/w:sectPr or w:body/w:p/w:pPr/w:sectPr
sect_pr = root.find(".//w:sectPr", ns)
if sect_pr is None:
raise RuntimeError("Section properties not found in document.xml")
pg_sz = sect_pr.find("w:pgSz", ns)
if pg_sz is None:
raise RuntimeError("Page size not found in section properties")
# Values are in twips
w_twips_str = pg_sz.get(
"{http://schemas.openxmlformats.org/wordprocessingml/2006/main}w"
) or pg_sz.get("w")
h_twips_str = pg_sz.get(
"{http://schemas.openxmlformats.org/wordprocessingml/2006/main}h"
) or pg_sz.get("h")
if not w_twips_str or not h_twips_str:
raise RuntimeError("Page size attributes missing in pgSz")
width_in = int(w_twips_str) / TWIPS_PER_INCH
height_in = int(h_twips_str) / TWIPS_PER_INCH
if width_in <= 0 or height_in <= 0:
raise RuntimeError("Invalid page size values in document.xml")
return round(min(max_w_px / width_in, max_h_px / height_in))
def calc_dpi_via_pdf(input_path: str, max_w_px: int, max_h_px: int) -> int:
"""Convert input to PDF and compute DPI from its page size."""
with tempfile.TemporaryDirectory(prefix="soffice_profile_") as user_profile:
with tempfile.TemporaryDirectory(prefix="soffice_convert_") as convert_tmp_dir:
stem = splitext(basename(input_path))[0]
pdf_path = convert_to_pdf(input_path, user_profile, convert_tmp_dir, stem)
if not (pdf_path and exists(pdf_path)):
raise RuntimeError("Failed to convert input to PDF for DPI computation.")
info = pdfinfo_from_path(pdf_path)
size_val = info.get("Page size")
if not size_val:
for k, v in info.items():
if isinstance(v, str) and "size" in k.lower() and "pts" in v:
size_val = v
break
if not isinstance(size_val, str):
raise RuntimeError("Failed to read PDF page size for DPI computation.")
m = re.search(r"(\d+)\s*x\s*(\d+)\s*pts", size_val)
if not m:
raise RuntimeError("Unrecognized PDF page size format.")
width_pts = int(m.group(1))
height_pts = int(m.group(2))
width_in = width_pts / 72.0
height_in = height_pts / 72.0
if width_in <= 0 or height_in <= 0:
raise RuntimeError("Invalid PDF page size values.")
return round(min(max_w_px / width_in, max_h_px / height_in))
def run_cmd_no_check(cmd: list[str]) -> None:
subprocess.run(
cmd,
check=False,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
env=os.environ.copy(),
)
def convert_to_pdf(
doc_path: str,
user_profile: str,
convert_tmp_dir: str,
stem: str,
) -> str:
# Try direct DOC(X) -> PDF
cmd_pdf = [
"soffice",
"-env:UserInstallation=file://" + user_profile,
"--invisible",
"--headless",
"--norestore",
"--convert-to",
"pdf",
"--outdir",
convert_tmp_dir,
doc_path,
]
run_cmd_no_check(cmd_pdf)
pdf_path = join(convert_tmp_dir, f"{stem}.pdf")
if exists(pdf_path):
return pdf_path
# Fallback: DOCX -> ODT, then ODT -> PDF
cmd_odt = [
"soffice",
"-env:UserInstallation=file://" + user_profile,
"--invisible",
"--headless",
"--norestore",
"--convert-to",
"odt",
"--outdir",
convert_tmp_dir,
doc_path,
]
run_cmd_no_check(cmd_odt)
odt_path = join(convert_tmp_dir, f"{stem}.odt")
if exists(odt_path):
cmd_odt_pdf = [
"soffice",
"-env:UserInstallation=file://" + user_profile,
"--invisible",
"--headless",
"--norestore",
"--convert-to",
"pdf",
"--outdir",
convert_tmp_dir,
odt_path,
]
run_cmd_no_check(cmd_odt_pdf)
if exists(pdf_path):
return pdf_path
return ""
def rasterize(
doc_path: str,
out_dir: str,
dpi: int,
) -> Sequence[str]:
"""Rasterise DOCX (or similar) to images placed in out_dir and return their paths.
Images are named as page-<N>.<ext> with pages starting at 1.
"""
makedirs(out_dir, exist_ok=True)
doc_path = abspath(doc_path)
stem = splitext(basename(doc_path))[0]
# Use a unique user profile to avoid LibreOffice profile lock when running concurrently
with tempfile.TemporaryDirectory(prefix="soffice_profile_") as user_profile:
# Write conversion outputs into a temp directory to avoid any IO oddities
with tempfile.TemporaryDirectory(prefix="soffice_convert_") as convert_tmp_dir:
pdf_path = convert_to_pdf(
doc_path,
user_profile,
convert_tmp_dir,
stem,
)
if not pdf_path or not exists(pdf_path):
raise RuntimeError(
"Failed to produce PDF for rasterization (direct and ODT fallback)."
)
paths_raw = cast(
list[str],
convert_from_path(
pdf_path,
dpi=dpi,
fmt="png",
thread_count=8,
output_folder=out_dir,
paths_only=True,
output_file="page",
),
)
# Rename convert_from_path's output format f'page{thread_id:04d}-{page_num:02d}.<ext>' to 'page-<num>.<ext>'
pages: list[tuple[int, str]] = []
for src_path in paths_raw:
base = splitext(basename(src_path))[0]
page_num_str = base.split("-")[-1]
page_num = int(page_num_str)
dst_path = join(out_dir, f"page-{page_num}.png")
replace(src_path, dst_path)
pages.append((page_num, dst_path))
pages.sort(key=lambda t: t[0])
final_paths = [path for _, path in pages]
return final_paths
def main() -> None:
parser = argparse.ArgumentParser(description="Render DOCX-like file to PNG images.")
parser.add_argument(
"input_path",
type=str,
help="Path to the input DOCX file (or compatible).",
)
parser.add_argument(
"--output_dir",
type=str,
default=None,
help=(
"Output directory for the rendered images. "
"Defaults to a folder next to the input named after the input file (without extension)."
),
)
parser.add_argument(
"--width",
type=int,
default=1600,
help=(
"Approximate maximum width in pixels after isotropic scaling (default 1600). "
"The actual value may exceed slightly."
),
)
parser.add_argument(
"--height",
type=int,
default=2000,
help=(
"Approximate maximum height in pixels after isotropic scaling (default 2000). "
"The actual value may exceed slightly."
),
)
parser.add_argument(
"--dpi",
type=int,
default=None,
help=("Override computed DPI. If provided, skips DOCX/PDF-based DPI calculation."),
)
args = parser.parse_args()
try:
ensure_system_tools()
input_path = abspath(expanduser(args.input_path))
out_dir = (
abspath(expanduser(args.output_dir)) if args.output_dir else splitext(input_path)[0]
)
if args.dpi is not None:
dpi = int(args.dpi)
else:
try:
if input_path.lower().endswith((".docx", ".docm", ".dotx", ".dotm")):
dpi = calc_dpi_via_ooxml_docx(input_path, args.width, args.height)
else:
raise RuntimeError("Skip OOXML DPI; not a DOCX container")
except Exception:
dpi = calc_dpi_via_pdf(input_path, args.width, args.height)
rasterize(input_path, out_dir, dpi)
print("Pages rendered to " + out_dir)
except RuntimeError as exc:
print(f"Error: {exc}", file=sys.stderr)
raise SystemExit(1)
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/document-processing/doc/scripts/render_docx.py",
"license": "MIT License",
"lines": 258,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/document-processing/spreadsheet/references/examples/openpyxl/create_basic_spreadsheet.py | """Create a basic spreadsheet with two sheets and a simple formula.
Usage:
python3 create_basic_spreadsheet.py --output /tmp/basic_spreadsheet.xlsx
"""
from __future__ import annotations
import argparse
from pathlib import Path
from openpyxl import Workbook
from openpyxl.utils import get_column_letter
def main() -> None:
parser = argparse.ArgumentParser(description="Create a basic spreadsheet with example data.")
parser.add_argument(
"--output",
type=Path,
default=Path("basic_spreadsheet.xlsx"),
help="Output .xlsx path (default: basic_spreadsheet.xlsx)",
)
args = parser.parse_args()
wb = Workbook()
overview = wb.active
overview.title = "Overview"
employees = wb.create_sheet("Employees")
overview["A1"] = "Description"
overview["A2"] = "Awesome Company Report"
employees.append(["Title", "Name", "Address", "Score"])
employees.append(["Engineer", "Vicky", "90 50th Street", 98])
employees.append(["Manager", "Alex", "500 Market Street", 92])
employees.append(["Designer", "Jordan", "200 Pine Street", 88])
employees["A6"] = "Total Score"
employees["D6"] = "=SUM(D2:D4)"
for col in range(1, 5):
employees.column_dimensions[get_column_letter(col)].width = 20
args.output.parent.mkdir(parents=True, exist_ok=True)
wb.save(args.output)
print(f"Saved workbook to {args.output}")
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/document-processing/spreadsheet/references/examples/openpyxl/create_basic_spreadsheet.py",
"license": "MIT License",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
davila7/claude-code-templates:cli-tool/components/skills/document-processing/spreadsheet/references/examples/openpyxl/create_spreadsheet_with_styling.py | """Generate a styled games scoreboard workbook using openpyxl.
Usage:
python3 create_spreadsheet_with_styling.py --output /tmp/GamesSimpleStyling.xlsx
"""
from __future__ import annotations
import argparse
from pathlib import Path
from openpyxl import Workbook
from openpyxl.formatting.rule import FormulaRule
from openpyxl.styles import Alignment, Font, PatternFill
from openpyxl.utils import get_column_letter
HEADER_FILL_HEX = "B7E1CD"
HIGHLIGHT_FILL_HEX = "FFF2CC"
def apply_header_style(cell, fill_hex: str) -> None:
cell.fill = PatternFill("solid", fgColor=fill_hex)
cell.font = Font(bold=True)
cell.alignment = Alignment(horizontal="center", vertical="center")
def apply_highlight_style(cell, fill_hex: str) -> None:
cell.fill = PatternFill("solid", fgColor=fill_hex)
cell.font = Font(bold=True)
cell.alignment = Alignment(horizontal="center", vertical="center")
def populate_game_sheet(ws) -> None:
ws.title = "GameX"
ws.row_dimensions[2].height = 24
widths = {"B": 18, "C": 14, "D": 14, "E": 14, "F": 40}
for col, width in widths.items():
ws.column_dimensions[col].width = width
headers = ["", "Name", "Game 1 Score", "Game 2 Score", "Total Score", "Notes", ""]
for idx, value in enumerate(headers, start=1):
cell = ws.cell(row=2, column=idx, value=value)
if value:
apply_header_style(cell, HEADER_FILL_HEX)
players = [
("Vicky", 12, 30, "Dominated the minigames."),
("Yash", 20, 10, "Emily main with strong defense."),
("Bobby", 1000, 1030, "Numbers look suspiciously high."),
]
for row_idx, (name, g1, g2, note) in enumerate(players, start=3):
ws.cell(row=row_idx, column=2, value=name)
ws.cell(row=row_idx, column=3, value=g1)
ws.cell(row=row_idx, column=4, value=g2)
ws.cell(row=row_idx, column=5, value=f"=SUM(C{row_idx}:D{row_idx})")
ws.cell(row=row_idx, column=6, value=note)
ws.cell(row=7, column=2, value="Winner")
ws.cell(row=7, column=3, value="=INDEX(B3:B5, MATCH(MAX(E3:E5), E3:E5, 0))")
ws.cell(row=7, column=5, value="Congrats!")
ws.merge_cells("C7:D7")
for col in range(2, 6):
apply_highlight_style(ws.cell(row=7, column=col), HIGHLIGHT_FILL_HEX)
rule = FormulaRule(formula=["LEN(A2)>0"], fill=PatternFill("solid", fgColor=HEADER_FILL_HEX))
ws.conditional_formatting.add("A2:G2", rule)
def main() -> None:
parser = argparse.ArgumentParser(description="Create a styled games scoreboard workbook.")
parser.add_argument(
"--output",
type=Path,
default=Path("GamesSimpleStyling.xlsx"),
help="Output .xlsx path (default: GamesSimpleStyling.xlsx)",
)
args = parser.parse_args()
wb = Workbook()
ws = wb.active
populate_game_sheet(ws)
for col in range(1, 8):
col_letter = get_column_letter(col)
if col_letter not in ws.column_dimensions:
ws.column_dimensions[col_letter].width = 12
args.output.parent.mkdir(parents=True, exist_ok=True)
wb.save(args.output)
print(f"Saved workbook to {args.output}")
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/document-processing/spreadsheet/references/examples/openpyxl/create_spreadsheet_with_styling.py",
"license": "MIT License",
"lines": 72,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
davila7/claude-code-templates:cli-tool/components/skills/document-processing/spreadsheet/references/examples/openpyxl/read_existing_spreadsheet.py | """Read an existing .xlsx and print a small summary.
If --input is not provided, this script creates a tiny sample workbook in /tmp
and reads that instead.
"""
from __future__ import annotations
import argparse
import tempfile
from pathlib import Path
from openpyxl import Workbook, load_workbook
def create_sample(path: Path) -> Path:
wb = Workbook()
ws = wb.active
ws.title = "Sample"
ws.append(["Item", "Qty", "Price"])
ws.append(["Apples", 3, 1.25])
ws.append(["Oranges", 2, 0.95])
ws.append(["Bananas", 5, 0.75])
ws["D1"] = "Total"
ws["D2"] = "=B2*C2"
ws["D3"] = "=B3*C3"
ws["D4"] = "=B4*C4"
wb.save(path)
return path
def main() -> None:
parser = argparse.ArgumentParser(description="Read an existing spreadsheet.")
parser.add_argument("--input", type=Path, help="Path to an .xlsx file")
args = parser.parse_args()
if args.input:
input_path = args.input
else:
tmp_dir = Path(tempfile.gettempdir())
input_path = tmp_dir / "sample_read_existing.xlsx"
create_sample(input_path)
wb = load_workbook(input_path, data_only=False)
print(f"Loaded: {input_path}")
print("Sheet names:", wb.sheetnames)
for name in wb.sheetnames:
ws = wb[name]
max_row = ws.max_row or 0
max_col = ws.max_column or 0
print(f"\n== {name} (rows: {max_row}, cols: {max_col})")
for row in ws.iter_rows(min_row=1, max_row=min(max_row, 5), max_col=min(max_col, 5)):
values = [cell.value for cell in row]
print(values)
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/document-processing/spreadsheet/references/examples/openpyxl/read_existing_spreadsheet.py",
"license": "MIT License",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
davila7/claude-code-templates:cli-tool/components/skills/document-processing/spreadsheet/references/examples/openpyxl/styling_spreadsheet.py | """Create a styled spreadsheet with headers, borders, and a total row.
Usage:
python3 styling_spreadsheet.py --output /tmp/styling_spreadsheet.xlsx
"""
from __future__ import annotations
import argparse
from pathlib import Path
from openpyxl import Workbook
from openpyxl.styles import Alignment, Border, Font, PatternFill, Side
def main() -> None:
parser = argparse.ArgumentParser(description="Create a styled spreadsheet example.")
parser.add_argument(
"--output",
type=Path,
default=Path("styling_spreadsheet.xlsx"),
help="Output .xlsx path (default: styling_spreadsheet.xlsx)",
)
args = parser.parse_args()
wb = Workbook()
ws = wb.active
ws.title = "FirstGame"
ws.merge_cells("B2:E2")
ws["B2"] = "Name | Game 1 Score | Game 2 Score | Total Score"
header_fill = PatternFill("solid", fgColor="B7E1CD")
header_font = Font(bold=True)
header_alignment = Alignment(horizontal="center", vertical="center")
ws["B2"].fill = header_fill
ws["B2"].font = header_font
ws["B2"].alignment = header_alignment
ws["B3"] = "Vicky"
ws["C3"] = 50
ws["D3"] = 60
ws["E3"] = "=C3+D3"
ws["B4"] = "John"
ws["C4"] = 40
ws["D4"] = 50
ws["E4"] = "=C4+D4"
ws["B5"] = "Jane"
ws["C5"] = 30
ws["D5"] = 40
ws["E5"] = "=C5+D5"
ws["B6"] = "Jim"
ws["C6"] = 20
ws["D6"] = 30
ws["E6"] = "=C6+D6"
ws.merge_cells("B9:E9")
ws["B9"] = "=SUM(E3:E6)"
thin = Side(style="thin")
border = Border(top=thin, bottom=thin, left=thin, right=thin)
ws["B9"].border = border
ws["B9"].alignment = Alignment(horizontal="center")
ws["B9"].font = Font(bold=True)
for col in ("B", "C", "D", "E"):
ws.column_dimensions[col].width = 18
ws.row_dimensions[2].height = 24
args.output.parent.mkdir(parents=True, exist_ok=True)
wb.save(args.output)
print(f"Saved workbook to {args.output}")
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/document-processing/spreadsheet/references/examples/openpyxl/styling_spreadsheet.py",
"license": "MIT License",
"lines": 60,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
davila7/claude-code-templates:cli-tool/components/skills/media/screenshot/scripts/take_screenshot.py | #!/usr/bin/env python3
"""Cross-platform screenshot helper for Codex skills."""
from __future__ import annotations
import argparse
import datetime as dt
import json
import os
import platform
import shutil
import subprocess
import tempfile
from pathlib import Path
SCRIPT_DIR = Path(__file__).resolve().parent
MAC_PERM_SCRIPT = SCRIPT_DIR / "macos_permissions.swift"
MAC_PERM_HELPER = SCRIPT_DIR / "ensure_macos_permissions.sh"
MAC_WINDOW_SCRIPT = SCRIPT_DIR / "macos_window_info.swift"
MAC_DISPLAY_SCRIPT = SCRIPT_DIR / "macos_display_info.swift"
TEST_MODE_ENV = "CODEX_SCREENSHOT_TEST_MODE"
TEST_PLATFORM_ENV = "CODEX_SCREENSHOT_TEST_PLATFORM"
TEST_WINDOWS_ENV = "CODEX_SCREENSHOT_TEST_WINDOWS"
TEST_DISPLAYS_ENV = "CODEX_SCREENSHOT_TEST_DISPLAYS"
TEST_PNG = (
b"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00\x00\x01"
b"\x08\x06\x00\x00\x00\x1f\x15\xc4\x89\x00\x00\x00\x0cIDAT\x08\xd7c"
b"\xf8\xff\xff?\x00\x05\xfe\x02\xfeA\xad\x1c\x1c\x00\x00\x00\x00IEND"
b"\xaeB`\x82"
)
def parse_region(value: str) -> tuple[int, int, int, int]:
parts = [p.strip() for p in value.split(",")]
if len(parts) != 4:
raise argparse.ArgumentTypeError("region must be x,y,w,h")
try:
x, y, w, h = (int(p) for p in parts)
except ValueError as exc:
raise argparse.ArgumentTypeError("region values must be integers") from exc
if w <= 0 or h <= 0:
raise argparse.ArgumentTypeError("region width and height must be positive")
return x, y, w, h
def test_mode_enabled() -> bool:
value = os.environ.get(TEST_MODE_ENV, "")
return value.lower() in {"1", "true", "yes", "on"}
def normalize_platform(value: str) -> str:
lowered = value.strip().lower()
if lowered in {"darwin", "mac", "macos", "osx"}:
return "Darwin"
if lowered in {"linux", "ubuntu"}:
return "Linux"
if lowered in {"windows", "win"}:
return "Windows"
return value
def test_platform_override() -> str | None:
value = os.environ.get(TEST_PLATFORM_ENV)
if value:
return normalize_platform(value)
return None
def parse_int_list(value: str) -> list[int]:
results: list[int] = []
for part in value.split(","):
part = part.strip()
if not part:
continue
try:
results.append(int(part))
except ValueError:
continue
return results
def test_window_ids() -> list[int]:
value = os.environ.get(TEST_WINDOWS_ENV, "101,102")
ids = parse_int_list(value)
return ids or [101]
def test_display_ids() -> list[int]:
value = os.environ.get(TEST_DISPLAYS_ENV, "1,2")
ids = parse_int_list(value)
return ids or [1]
def write_test_png(path: Path) -> None:
ensure_parent(path)
path.write_bytes(TEST_PNG)
def timestamp() -> str:
return dt.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
def default_filename(fmt: str, prefix: str = "screenshot") -> str:
return f"{prefix}-{timestamp()}.{fmt}"
def mac_default_dir() -> Path:
desktop = Path.home() / "Desktop"
try:
proc = subprocess.run(
["defaults", "read", "com.apple.screencapture", "location"],
check=False,
capture_output=True,
text=True,
)
location = proc.stdout.strip()
if location:
return Path(location).expanduser()
except OSError:
pass
return desktop
def default_dir(system: str) -> Path:
home = Path.home()
if system == "Darwin":
return mac_default_dir()
if system == "Windows":
pictures = home / "Pictures"
screenshots = pictures / "Screenshots"
if screenshots.exists():
return screenshots
if pictures.exists():
return pictures
return home
pictures = home / "Pictures"
screenshots = pictures / "Screenshots"
if screenshots.exists():
return screenshots
if pictures.exists():
return pictures
return home
def ensure_parent(path: Path) -> None:
try:
path.parent.mkdir(parents=True, exist_ok=True)
except OSError:
# Fall back to letting the capture command report a clearer error.
pass
def resolve_output_path(
requested_path: str | None, mode: str, fmt: str, system: str
) -> Path:
if requested_path:
path = Path(requested_path).expanduser()
if path.exists() and path.is_dir():
path = path / default_filename(fmt)
elif requested_path.endswith(("/", "\\")) and not path.exists():
path.mkdir(parents=True, exist_ok=True)
path = path / default_filename(fmt)
elif path.suffix == "":
path = path.with_suffix(f".{fmt}")
ensure_parent(path)
return path
if mode == "temp":
tmp_dir = Path(tempfile.gettempdir())
tmp_path = tmp_dir / default_filename(fmt, prefix="codex-shot")
ensure_parent(tmp_path)
return tmp_path
dest_dir = default_dir(system)
dest_path = dest_dir / default_filename(fmt)
ensure_parent(dest_path)
return dest_path
def multi_output_paths(base: Path, suffixes: list[str]) -> list[Path]:
if len(suffixes) <= 1:
return [base]
paths: list[Path] = []
for suffix in suffixes:
candidate = base.with_name(f"{base.stem}-{suffix}{base.suffix}")
ensure_parent(candidate)
paths.append(candidate)
return paths
def run(cmd: list[str]) -> None:
try:
subprocess.run(cmd, check=True)
except FileNotFoundError as exc:
raise SystemExit(f"required command not found: {cmd[0]}") from exc
except subprocess.CalledProcessError as exc:
raise SystemExit(f"command failed ({exc.returncode}): {' '.join(cmd)}") from exc
def swift_json(script: Path, extra_args: list[str] | None = None) -> dict:
module_cache = Path(tempfile.gettempdir()) / "codex-swift-module-cache"
module_cache.mkdir(parents=True, exist_ok=True)
cmd = ["swift", "-module-cache-path", str(module_cache), str(script)]
if extra_args:
cmd.extend(extra_args)
try:
proc = subprocess.run(cmd, check=True, capture_output=True, text=True)
except FileNotFoundError as exc:
raise SystemExit("swift not found; install Xcode command line tools") from exc
except subprocess.CalledProcessError as exc:
stderr = (exc.stderr or "").strip()
if "ModuleCache" in stderr and "Operation not permitted" in stderr:
raise SystemExit(
"swift needs module-cache access; rerun with escalated permissions"
) from exc
msg = stderr or (exc.stdout or "").strip() or "swift helper failed"
raise SystemExit(msg) from exc
try:
return json.loads(proc.stdout)
except json.JSONDecodeError as exc:
raise SystemExit(f"swift helper returned invalid JSON: {proc.stdout.strip()}") from exc
def macos_screen_capture_granted(request: bool = False) -> bool:
args = ["--request"] if request else []
payload = swift_json(MAC_PERM_SCRIPT, args)
return bool(payload.get("screenCapture"))
def ensure_macos_permissions() -> None:
if os.environ.get("CODEX_SANDBOX"):
raise SystemExit(
"screen capture checks are blocked in the sandbox; rerun with escalated permissions"
)
if macos_screen_capture_granted():
return
subprocess.run(["bash", str(MAC_PERM_HELPER)], check=False)
if not macos_screen_capture_granted():
raise SystemExit(
"Screen Recording permission is required; enable it in System Settings and retry"
)
def activate_app(app: str) -> None:
safe_app = app.replace('"', '\\"')
script = f'tell application "{safe_app}" to activate'
subprocess.run(["osascript", "-e", script], check=False, capture_output=True, text=True)
def macos_window_payload(args: argparse.Namespace, frontmost: bool, include_list: bool) -> dict:
flags: list[str] = []
if frontmost:
flags.append("--frontmost")
if args.app:
flags.extend(["--app", args.app])
if args.window_name:
flags.extend(["--window-name", args.window_name])
if include_list:
flags.append("--list")
return swift_json(MAC_WINDOW_SCRIPT, flags)
def macos_display_indexes() -> list[int]:
payload = swift_json(MAC_DISPLAY_SCRIPT)
displays = payload.get("displays") or []
indexes: list[int] = []
for item in displays:
try:
value = int(item)
except (TypeError, ValueError):
continue
if value > 0:
indexes.append(value)
return indexes or [1]
def macos_window_ids(args: argparse.Namespace, capture_all: bool) -> list[int]:
payload = macos_window_payload(
args,
frontmost=args.active_window,
include_list=capture_all,
)
if capture_all:
windows = payload.get("windows") or []
ids: list[int] = []
for item in windows:
win_id = item.get("id")
if win_id is None:
continue
try:
ids.append(int(win_id))
except (TypeError, ValueError):
continue
if ids:
return ids
selected = payload.get("selected") or {}
win_id = selected.get("id")
if win_id is not None:
try:
return [int(win_id)]
except (TypeError, ValueError):
pass
raise SystemExit("no matching macOS window found; try --list-windows to inspect ids")
def list_macos_windows(args: argparse.Namespace) -> None:
payload = macos_window_payload(args, frontmost=args.active_window, include_list=True)
windows = payload.get("windows") or []
if not windows:
print("no matching windows found")
return
for item in windows:
bounds = item.get("bounds") or {}
name = item.get("name") or ""
width = bounds.get("width", 0)
height = bounds.get("height", 0)
x = bounds.get("x", 0)
y = bounds.get("y", 0)
print(f"{item.get('id')}\t{item.get('owner')}\t{name}\t{width}x{height}+{x}+{y}")
def list_test_macos_windows(args: argparse.Namespace) -> None:
owner = args.app or "TestApp"
name = args.window_name or ""
ids = test_window_ids()
if args.active_window and ids:
ids = [ids[0]]
for idx, win_id in enumerate(ids, start=1):
window_name = name or f"Window {idx}"
print(f"{win_id}\t{owner}\t{window_name}\t800x600+0+0")
def resolve_macos_windows(args: argparse.Namespace) -> list[int]:
if args.app:
activate_app(args.app)
capture_all = not args.active_window
return macos_window_ids(args, capture_all=capture_all)
def resolve_test_macos_windows(args: argparse.Namespace) -> list[int]:
ids = test_window_ids()
if args.active_window and ids:
return [ids[0]]
return ids
def capture_macos(
args: argparse.Namespace,
output: Path,
*,
window_id: int | None = None,
display: int | None = None,
) -> None:
cmd = ["screencapture", "-x", f"-t{args.format}"]
if args.interactive:
cmd.append("-i")
if display is not None:
cmd.append(f"-D{display}")
effective_window_id = window_id if window_id is not None else args.window_id
if effective_window_id is not None:
cmd.append(f"-l{effective_window_id}")
elif args.region is not None:
x, y, w, h = args.region
cmd.append(f"-R{x},{y},{w},{h}")
cmd.append(str(output))
run(cmd)
def capture_linux(args: argparse.Namespace, output: Path) -> None:
scrot = shutil.which("scrot")
gnome = shutil.which("gnome-screenshot")
imagemagick = shutil.which("import")
xdotool = shutil.which("xdotool")
if args.region is not None:
x, y, w, h = args.region
if scrot:
run(["scrot", "-a", f"{x},{y},{w},{h}", str(output)])
return
if imagemagick:
geometry = f"{w}x{h}+{x}+{y}"
run(["import", "-window", "root", "-crop", geometry, str(output)])
return
raise SystemExit("region capture requires scrot or ImageMagick (import)")
if args.window_id is not None:
if imagemagick:
run(["import", "-window", str(args.window_id), str(output)])
return
raise SystemExit("window-id capture requires ImageMagick (import)")
if args.active_window:
if scrot:
run(["scrot", "-u", str(output)])
return
if gnome:
run(["gnome-screenshot", "-w", "-f", str(output)])
return
if imagemagick and xdotool:
win_id = (
subprocess.check_output(["xdotool", "getactivewindow"], text=True)
.strip()
)
run(["import", "-window", win_id, str(output)])
return
raise SystemExit("active-window capture requires scrot, gnome-screenshot, or import+xdotool")
if scrot:
run(["scrot", str(output)])
return
if gnome:
run(["gnome-screenshot", "-f", str(output)])
return
if imagemagick:
run(["import", "-window", "root", str(output)])
return
raise SystemExit("no supported screenshot tool found (scrot, gnome-screenshot, or import)")
def main() -> None:
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--path",
help="output file path or directory; overrides --mode",
)
parser.add_argument(
"--mode",
choices=("default", "temp"),
default="default",
help="default saves to the OS screenshot location; temp saves to the temp dir",
)
parser.add_argument(
"--format",
default="png",
help="image format/extension (default: png)",
)
parser.add_argument(
"--app",
help="macOS only: capture all matching on-screen windows for this app name",
)
parser.add_argument(
"--window-name",
help="macOS only: substring match for a window title (optionally scoped by --app)",
)
parser.add_argument(
"--list-windows",
action="store_true",
help="macOS only: list matching window ids instead of capturing",
)
parser.add_argument(
"--region",
type=parse_region,
help="capture region as x,y,w,h (pixel coordinates)",
)
parser.add_argument(
"--window-id",
type=int,
help="capture a specific window id when supported",
)
parser.add_argument(
"--active-window",
action="store_true",
help="capture the focused/active window only when supported",
)
parser.add_argument(
"--interactive",
action="store_true",
help="use interactive selection where the OS tool supports it",
)
args = parser.parse_args()
if args.region and args.window_id is not None:
raise SystemExit("choose either --region or --window-id, not both")
if args.region and args.active_window:
raise SystemExit("choose either --region or --active-window, not both")
if args.window_id is not None and args.active_window:
raise SystemExit("choose either --window-id or --active-window, not both")
if args.app and args.window_id is not None:
raise SystemExit("choose either --app or --window-id, not both")
if args.region and args.app:
raise SystemExit("choose either --region or --app, not both")
if args.region and args.window_name:
raise SystemExit("choose either --region or --window-name, not both")
if args.interactive and args.app:
raise SystemExit("choose either --interactive or --app, not both")
if args.interactive and args.window_name:
raise SystemExit("choose either --interactive or --window-name, not both")
if args.interactive and args.window_id is not None:
raise SystemExit("choose either --interactive or --window-id, not both")
if args.interactive and args.active_window:
raise SystemExit("choose either --interactive or --active-window, not both")
if args.list_windows and (args.region or args.window_id is not None or args.interactive):
raise SystemExit("--list-windows only supports --app, --window-name, and --active-window")
test_mode = test_mode_enabled()
system = platform.system()
if test_mode:
override = test_platform_override()
if override:
system = override
window_ids: list[int] = []
display_ids: list[int] = []
if system != "Darwin" and (args.app or args.window_name or args.list_windows):
raise SystemExit("--app/--window-name/--list-windows are supported on macOS only")
if system == "Darwin":
if test_mode:
if args.list_windows:
list_test_macos_windows(args)
return
if args.window_id is not None:
window_ids = [args.window_id]
elif args.app or args.window_name or args.active_window:
window_ids = resolve_test_macos_windows(args)
elif args.region is None and not args.interactive:
display_ids = test_display_ids()
else:
ensure_macos_permissions()
if args.list_windows:
list_macos_windows(args)
return
if args.window_id is not None:
window_ids = [args.window_id]
elif args.app or args.window_name or args.active_window:
window_ids = resolve_macos_windows(args)
elif args.region is None and not args.interactive:
display_ids = macos_display_indexes()
output = resolve_output_path(args.path, args.mode, args.format, system)
if test_mode:
if system == "Darwin":
if window_ids:
suffixes = [f"w{wid}" for wid in window_ids]
paths = multi_output_paths(output, suffixes)
for path in paths:
write_test_png(path)
for path in paths:
print(path)
return
if len(display_ids) > 1:
suffixes = [f"d{did}" for did in display_ids]
paths = multi_output_paths(output, suffixes)
for path in paths:
write_test_png(path)
for path in paths:
print(path)
return
write_test_png(output)
print(output)
return
if system == "Darwin":
if window_ids:
suffixes = [f"w{wid}" for wid in window_ids]
paths = multi_output_paths(output, suffixes)
for wid, path in zip(window_ids, paths):
capture_macos(args, path, window_id=wid)
for path in paths:
print(path)
return
if len(display_ids) > 1:
suffixes = [f"d{did}" for did in display_ids]
paths = multi_output_paths(output, suffixes)
for did, path in zip(display_ids, paths):
capture_macos(args, path, display=did)
for path in paths:
print(path)
return
capture_macos(args, output)
elif system == "Linux":
capture_linux(args, output)
elif system == "Windows":
raise SystemExit(
"Windows support lives in scripts/take_screenshot.ps1; run it with PowerShell"
)
else:
raise SystemExit(f"unsupported platform: {system}")
print(output)
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/media/screenshot/scripts/take_screenshot.py",
"license": "MIT License",
"lines": 506,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/media/speech/scripts/text_to_speech.py | #!/usr/bin/env python3
"""Generate speech audio with the OpenAI Audio API (TTS).
Defaults to gpt-4o-mini-tts-2025-12-15 and a built-in voice (cedar).
"""
from __future__ import annotations
import argparse
import json
import os
from pathlib import Path
import re
import sys
import time
from typing import Any, Dict, List, Optional
DEFAULT_MODEL = "gpt-4o-mini-tts-2025-12-15"
DEFAULT_VOICE = "cedar"
DEFAULT_RESPONSE_FORMAT = "mp3"
DEFAULT_SPEED = 1.0
MAX_INPUT_CHARS = 4096
MAX_RPM = 50
DEFAULT_RPM = 50
DEFAULT_ATTEMPTS = 3
ALLOWED_VOICES = {
"alloy",
"ash",
"ballad",
"cedar",
"coral",
"echo",
"fable",
"marin",
"nova",
"onyx",
"sage",
"shimmer",
"verse",
}
ALLOWED_FORMATS = {"mp3", "opus", "aac", "flac", "wav", "pcm"}
def _die(message: str, code: int = 1) -> None:
print(f"Error: {message}", file=sys.stderr)
raise SystemExit(code)
def _warn(message: str) -> None:
print(f"Warning: {message}", file=sys.stderr)
def _ensure_api_key(dry_run: bool) -> None:
if os.getenv("OPENAI_API_KEY"):
print("OPENAI_API_KEY is set.", file=sys.stderr)
return
if dry_run:
_warn("OPENAI_API_KEY is not set; dry-run only.")
return
_die("OPENAI_API_KEY is not set. Export it before running.")
def _read_text(text: Optional[str], text_file: Optional[str], label: str) -> str:
if text and text_file:
_die(f"Use --{label} or --{label}-file, not both.")
if text_file:
path = Path(text_file)
if not path.exists():
_die(f"{label} file not found: {path}")
return path.read_text(encoding="utf-8").strip()
if text:
return str(text).strip()
_die(f"Missing {label}. Use --{label} or --{label}-file.")
return "" # unreachable
def _validate_input(text: str) -> None:
if not text:
_die("Input text is empty.")
if len(text) > MAX_INPUT_CHARS:
_die(
f"Input text exceeds {MAX_INPUT_CHARS} characters. Split into smaller chunks."
)
def _normalize_voice(voice: Optional[str]) -> str:
if not voice:
return DEFAULT_VOICE
value = str(voice).strip().lower()
if value not in ALLOWED_VOICES:
_die(
"voice must be one of: " + ", ".join(sorted(ALLOWED_VOICES))
)
return value
def _normalize_format(fmt: Optional[str]) -> str:
if not fmt:
return DEFAULT_RESPONSE_FORMAT
value = str(fmt).strip().lower()
if value not in ALLOWED_FORMATS:
_die("response-format must be one of: " + ", ".join(sorted(ALLOWED_FORMATS)))
return value
def _normalize_speed(speed: Optional[float]) -> Optional[float]:
if speed is None:
return None
try:
value = float(speed)
except ValueError:
_die("speed must be a number")
if value < 0.25 or value > 4.0:
_die("speed must be between 0.25 and 4.0")
return value
def _normalize_output_path(out: Optional[str], response_format: str) -> Path:
if out:
path = Path(out)
if path.exists() and path.is_dir():
return path / f"speech.{response_format}"
if path.suffix == "":
return path.with_suffix("." + response_format)
if path.suffix.lstrip(".").lower() != response_format:
_warn(
f"Output extension {path.suffix} does not match response-format {response_format}."
)
return path
return Path(f"speech.{response_format}")
def _create_client():
try:
from openai import OpenAI
except ImportError:
_die("openai SDK not installed. Install with `uv pip install openai`.")
return OpenAI()
def _extract_retry_after_seconds(exc: Exception) -> Optional[float]:
for attr in ("retry_after", "retry_after_seconds"):
val = getattr(exc, attr, None)
if isinstance(val, (int, float)) and val >= 0:
return float(val)
msg = str(exc)
m = re.search(r"retry[- ]after[:= ]+([0-9]+(?:\\.[0-9]+)?)", msg, re.IGNORECASE)
if m:
try:
return float(m.group(1))
except Exception:
return None
return None
def _is_rate_limit_error(exc: Exception) -> bool:
name = exc.__class__.__name__.lower()
if "ratelimit" in name or "rate_limit" in name:
return True
msg = str(exc).lower()
return "429" in msg or "rate limit" in msg or "too many requests" in msg
def _is_transient_error(exc: Exception) -> bool:
if _is_rate_limit_error(exc):
return True
name = exc.__class__.__name__.lower()
if "timeout" in name or "timedout" in name or "tempor" in name:
return True
msg = str(exc).lower()
return "timeout" in msg or "timed out" in msg or "connection reset" in msg
def _maybe_drop_instructions(model: str, instructions: Optional[str]) -> Optional[str]:
if instructions and model in {"tts-1", "tts-1-hd"}:
_warn("instructions are not supported for tts-1 / tts-1-hd; ignoring.")
return None
return instructions
def _print_payload(payload: Dict[str, Any]) -> None:
print(json.dumps(payload, indent=2, sort_keys=True))
def _write_audio(
client: Any,
payload: Dict[str, Any],
out_path: Path,
*,
dry_run: bool,
force: bool,
attempts: int,
) -> None:
if dry_run:
_print_payload(payload)
print(f"Would write {out_path}")
return
_ensure_api_key(dry_run)
if out_path.exists() and not force:
_die(f"Output already exists: {out_path} (use --force to overwrite)")
out_path.parent.mkdir(parents=True, exist_ok=True)
last_exc: Optional[Exception] = None
for attempt in range(1, attempts + 1):
try:
with client.audio.speech.with_streaming_response.create(**payload) as response:
response.stream_to_file(out_path)
print(f"Wrote {out_path}")
return
except Exception as exc:
last_exc = exc
if not _is_transient_error(exc) or attempt >= attempts:
raise
sleep_s = _extract_retry_after_seconds(exc)
if sleep_s is None:
sleep_s = min(60.0, 2.0 ** attempt)
print(
f"Attempt {attempt}/{attempts} failed ({exc.__class__.__name__}); retrying in {sleep_s:.1f}s",
file=sys.stderr,
)
time.sleep(sleep_s)
if last_exc:
raise last_exc
def _slugify(value: str) -> str:
value = value.strip().lower()
value = re.sub(r"[^a-z0-9]+", "-", value)
value = re.sub(r"-+", "-", value).strip("-")
return value[:60] if value else "job"
def _read_jobs_jsonl(path: str) -> List[Dict[str, Any]]:
p = Path(path)
if not p.exists():
_die(f"Input file not found: {p}")
jobs: List[Dict[str, Any]] = []
for line_no, raw in enumerate(p.read_text(encoding="utf-8").splitlines(), start=1):
line = raw.strip()
if not line or line.startswith("#"):
continue
if line.startswith("{"):
try:
item = json.loads(line)
except json.JSONDecodeError as exc:
_die(f"Invalid JSON on line {line_no}: {exc}")
if not isinstance(item, dict):
_die(f"Invalid job on line {line_no}: expected object")
jobs.append(item)
else:
jobs.append({"input": line})
if not jobs:
_die("No jobs found in input file.")
return jobs
def _job_input(job: Dict[str, Any]) -> str:
for key in ("input", "text", "prompt"):
if key in job and str(job[key]).strip():
return str(job[key]).strip()
_die("Job missing input text (use 'input').")
return "" # unreachable
def _merge_non_null(base: Dict[str, Any], extra: Dict[str, Any]) -> Dict[str, Any]:
merged = dict(base)
for k, v in extra.items():
if v is not None:
merged[k] = v
return merged
def _enforce_rpm(rpm: int) -> int:
if rpm <= 0:
_die("rpm must be > 0")
if rpm > MAX_RPM:
_warn(f"rpm capped at {MAX_RPM} (requested {rpm}).")
return MAX_RPM
return rpm
def _sleep_for_rate_limit(last_ts: Optional[float], rpm: int) -> float:
min_interval = 60.0 / float(rpm)
now = time.monotonic()
if last_ts is None:
return now
elapsed = now - last_ts
if elapsed < min_interval:
time.sleep(min_interval - elapsed)
return time.monotonic()
def _list_voices() -> None:
for name in sorted(ALLOWED_VOICES):
print(name)
def _run_speak(args: argparse.Namespace) -> int:
if args.list_voices:
_list_voices()
return 0
input_text = _read_text(args.input, args.input_file, "input")
_validate_input(input_text)
instructions = None
if args.instructions or args.instructions_file:
instructions = _read_text(args.instructions, args.instructions_file, "instructions")
model = str(args.model).strip()
voice = _normalize_voice(args.voice)
response_format = _normalize_format(args.response_format)
speed = _normalize_speed(args.speed)
instructions = _maybe_drop_instructions(model, instructions)
payload: Dict[str, Any] = {
"model": model,
"voice": voice,
"input": input_text,
"response_format": response_format,
}
if instructions:
payload["instructions"] = instructions
if speed is not None:
payload["speed"] = speed
out_path = _normalize_output_path(args.out, response_format)
if args.dry_run:
_ensure_api_key(True)
_print_payload(payload)
print(f"Would write {out_path}")
return 0
client = _create_client()
_write_audio(
client,
payload,
out_path,
dry_run=args.dry_run,
force=args.force,
attempts=args.attempts,
)
return 0
def _run_speak_batch(args: argparse.Namespace) -> int:
jobs = _read_jobs_jsonl(args.input)
out_dir = Path(args.out_dir)
base_instructions = None
if args.instructions or args.instructions_file:
base_instructions = _read_text(args.instructions, args.instructions_file, "instructions")
base_payload = {
"model": str(args.model).strip(),
"voice": _normalize_voice(args.voice),
"response_format": _normalize_format(args.response_format),
"speed": _normalize_speed(args.speed),
"instructions": base_instructions,
}
rpm = _enforce_rpm(args.rpm)
last_ts: Optional[float] = None
if args.dry_run:
_ensure_api_key(True)
client = None if args.dry_run else _create_client()
for idx, job in enumerate(jobs, start=1):
input_text = _job_input(job)
_validate_input(input_text)
job_payload = dict(base_payload)
job_payload["input"] = input_text
overrides: Dict[str, Any] = {}
if "model" in job:
overrides["model"] = str(job["model"]).strip()
if "voice" in job:
overrides["voice"] = _normalize_voice(job["voice"])
if "response_format" in job or "format" in job:
overrides["response_format"] = _normalize_format(job.get("response_format") or job.get("format"))
if "speed" in job and job["speed"] is not None:
overrides["speed"] = _normalize_speed(job["speed"])
if "instructions" in job and str(job["instructions"]).strip():
overrides["instructions"] = str(job["instructions"]).strip()
job_payload = _merge_non_null(job_payload, overrides)
job_payload["instructions"] = _maybe_drop_instructions(
job_payload["model"], job_payload.get("instructions")
)
if job_payload.get("instructions") is None:
job_payload.pop("instructions", None)
response_format = job_payload["response_format"]
explicit_out = job.get("out")
if explicit_out:
out_path = _normalize_output_path(str(explicit_out), response_format)
if out_path.is_absolute():
out_path = out_dir / out_path.name
else:
out_path = out_dir / out_path
else:
slug = _slugify(input_text[:80])
out_path = out_dir / f"{idx:03d}-{slug}.{response_format}"
if args.dry_run:
_print_payload(job_payload)
print(f"Would write {out_path}")
continue
last_ts = _sleep_for_rate_limit(last_ts, rpm)
if client is None:
client = _create_client()
_write_audio(
client,
job_payload,
out_path,
dry_run=False,
force=args.force,
attempts=args.attempts,
)
return 0
def _add_common_args(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"--model",
default=DEFAULT_MODEL,
help=f"Model to use (default: {DEFAULT_MODEL})",
)
parser.add_argument(
"--voice",
default=DEFAULT_VOICE,
help=f"Voice to use (default: {DEFAULT_VOICE})",
)
parser.add_argument(
"--response-format",
default=DEFAULT_RESPONSE_FORMAT,
help=f"Output format (default: {DEFAULT_RESPONSE_FORMAT})",
)
parser.add_argument(
"--speed",
type=float,
default=DEFAULT_SPEED,
help=f"Speech speed (0.25-4.0, default: {DEFAULT_SPEED})",
)
parser.add_argument(
"--instructions",
help="Style directions for the voice",
)
parser.add_argument(
"--instructions-file",
help="Path to instructions text file",
)
parser.add_argument(
"--attempts",
type=int,
default=DEFAULT_ATTEMPTS,
help=f"Retries on transient errors (default: {DEFAULT_ATTEMPTS})",
)
parser.add_argument(
"--dry-run",
action="store_true",
help="Print payload; do not call the API",
)
parser.add_argument(
"--force",
action="store_true",
help="Overwrite output files if they exist",
)
def main() -> int:
parser = argparse.ArgumentParser(
description="Generate speech audio using the OpenAI Audio API."
)
subparsers = parser.add_subparsers(dest="command", required=True)
list_voices = subparsers.add_parser("list-voices", help="List supported voices")
list_voices.set_defaults(func=lambda _args: (_list_voices() or 0))
speak = subparsers.add_parser("speak", help="Generate a single audio file")
speak.add_argument("--input", help="Input text")
speak.add_argument("--input-file", help="Path to input text file")
speak.add_argument("--out", help="Output file path")
speak.add_argument(
"--list-voices",
action="store_true",
help="Print voices and exit",
)
_add_common_args(speak)
speak.set_defaults(func=_run_speak)
batch = subparsers.add_parser("speak-batch", help="Generate from JSONL jobs")
batch.add_argument("--input", required=True, help="Path to JSONL file")
batch.add_argument(
"--out-dir",
default="out",
help="Output directory (default: out)",
)
batch.add_argument(
"--rpm",
type=int,
default=DEFAULT_RPM,
help=f"Requests per minute cap (default: {DEFAULT_RPM}, max: {MAX_RPM})",
)
_add_common_args(batch)
batch.set_defaults(func=_run_speak_batch)
args = parser.parse_args()
return int(args.func(args))
if __name__ == "__main__":
raise SystemExit(main())
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/media/speech/scripts/text_to_speech.py",
"license": "MIT License",
"lines": 434,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/media/transcribe/scripts/transcribe_diarize.py | #!/usr/bin/env python3
"""Transcribe audio (optionally with speaker diarization) using OpenAI."""
from __future__ import annotations
import argparse
import base64
import json
import mimetypes
import os
from pathlib import Path
import sys
from typing import Any, Dict, List, Optional, Tuple
DEFAULT_MODEL = "gpt-4o-mini-transcribe"
DEFAULT_RESPONSE_FORMAT = "text"
DEFAULT_CHUNKING_STRATEGY = "auto"
MAX_AUDIO_BYTES = 25 * 1024 * 1024
MAX_KNOWN_SPEAKERS = 4
ALLOWED_RESPONSE_FORMATS = {"text", "json", "diarized_json"}
def _die(message: str, code: int = 1) -> None:
print(f"Error: {message}", file=sys.stderr)
raise SystemExit(code)
def _warn(message: str) -> None:
print(f"Warning: {message}", file=sys.stderr)
def _ensure_api_key(dry_run: bool) -> None:
if os.getenv("OPENAI_API_KEY"):
print("OPENAI_API_KEY is set.", file=sys.stderr)
return
if dry_run:
_warn("OPENAI_API_KEY is not set; dry-run only.")
return
_die("OPENAI_API_KEY is not set. Export it before running.")
def _normalize_response_format(value: Optional[str]) -> str:
if not value:
return DEFAULT_RESPONSE_FORMAT
fmt = value.strip().lower()
if fmt not in ALLOWED_RESPONSE_FORMATS:
_die(
"response-format must be one of: "
+ ", ".join(sorted(ALLOWED_RESPONSE_FORMATS))
)
return fmt
def _normalize_chunking_strategy(value: Optional[str]) -> Any:
if not value:
return DEFAULT_CHUNKING_STRATEGY
raw = str(value).strip()
if raw.startswith("{"):
try:
return json.loads(raw)
except json.JSONDecodeError:
_die("chunking-strategy JSON is invalid")
return raw
def _guess_mime_type(path: Path) -> str:
mime, _ = mimetypes.guess_type(str(path))
if mime:
return mime
return "audio/wav"
def _encode_data_url(path: Path) -> str:
data = path.read_bytes()
mime = _guess_mime_type(path)
encoded = base64.b64encode(data).decode("ascii")
return f"data:{mime};base64,{encoded}"
def _parse_known_speakers(raw_items: List[str]) -> Tuple[List[str], List[str]]:
names: List[str] = []
refs: List[str] = []
for raw in raw_items:
if "=" not in raw:
_die("known-speaker must be NAME=PATH")
name, path_str = raw.split("=", 1)
name = name.strip()
path = Path(path_str.strip())
if not name or not path_str.strip():
_die("known-speaker must be NAME=PATH")
if not path.exists():
_die(f"Known speaker file not found: {path}")
names.append(name)
refs.append(_encode_data_url(path))
if len(names) > MAX_KNOWN_SPEAKERS:
_die(f"known speakers must be <= {MAX_KNOWN_SPEAKERS}")
return names, refs
def _output_extension(response_format: str) -> str:
return "txt" if response_format == "text" else "json"
def _build_output_path(
audio_path: Path,
response_format: str,
out: Optional[str],
out_dir: Optional[str],
) -> Path:
ext = "." + _output_extension(response_format)
if out:
path = Path(out)
if path.exists() and path.is_dir():
return path / f"{audio_path.stem}.transcript{ext}"
if path.suffix == "":
return path.with_suffix(ext)
return path
if out_dir:
base = Path(out_dir)
base.mkdir(parents=True, exist_ok=True)
return base / f"{audio_path.stem}.transcript{ext}"
return Path(f"{audio_path.stem}.transcript{ext}")
def _create_client():
try:
from openai import OpenAI
except ImportError:
_die("openai SDK not installed. Install with `uv pip install openai`.")
return OpenAI()
def _format_output(result: Any, response_format: str) -> str:
if response_format == "text":
text = getattr(result, "text", None)
return text if isinstance(text, str) else str(result)
if hasattr(result, "model_dump"):
return json.dumps(result.model_dump(), indent=2)
if isinstance(result, (dict, list)):
return json.dumps(result, indent=2)
return json.dumps({"text": getattr(result, "text", str(result))}, indent=2)
def _validate_audio(path: Path) -> None:
if not path.exists():
_die(f"Audio file not found: {path}")
size = path.stat().st_size
if size > MAX_AUDIO_BYTES:
_warn(
f"Audio file exceeds 25MB limit ({size} bytes): {path}"
)
def _build_payload(
args: argparse.Namespace,
known_speaker_names: List[str],
known_speaker_refs: List[str],
) -> Dict[str, Any]:
payload: Dict[str, Any] = {
"model": args.model,
"response_format": args.response_format,
"chunking_strategy": args.chunking_strategy,
}
if args.language:
payload["language"] = args.language
if args.prompt:
payload["prompt"] = args.prompt
if known_speaker_names:
payload["extra_body"] = {
"known_speaker_names": known_speaker_names,
"known_speaker_references": known_speaker_refs,
}
return payload
def _run_one(
client: Any,
audio_path: Path,
payload: Dict[str, Any],
) -> Any:
with audio_path.open("rb") as audio_file:
return client.audio.transcriptions.create(
file=audio_file,
**payload,
)
def main() -> None:
parser = argparse.ArgumentParser(
description="Transcribe audio (optionally with speaker diarization) using OpenAI."
)
parser.add_argument("audio", nargs="+", help="Audio file(s) to transcribe")
parser.add_argument(
"--model",
default=DEFAULT_MODEL,
help=f"Model to use (default: {DEFAULT_MODEL})",
)
parser.add_argument(
"--response-format",
default=DEFAULT_RESPONSE_FORMAT,
help="Response format: text, json, or diarized_json",
)
parser.add_argument(
"--chunking-strategy",
default=DEFAULT_CHUNKING_STRATEGY,
help="Chunking strategy (use 'auto' for long audio)",
)
parser.add_argument("--language", help="Optional language hint (e.g. 'en')")
parser.add_argument("--prompt", help="Optional prompt to guide transcription")
parser.add_argument(
"--known-speaker",
action="append",
default=[],
help="Known speaker reference as NAME=PATH (repeatable, max 4)",
)
parser.add_argument("--out", help="Output file path (single audio only)")
parser.add_argument("--out-dir", help="Output directory for transcripts")
parser.add_argument(
"--stdout",
action="store_true",
help="Write transcript to stdout instead of a file",
)
parser.add_argument(
"--dry-run",
action="store_true",
help="Validate inputs and print payload without calling the API",
)
args = parser.parse_args()
args.response_format = _normalize_response_format(args.response_format)
args.chunking_strategy = _normalize_chunking_strategy(args.chunking_strategy)
if args.out and len(args.audio) > 1:
_die("--out only supports a single audio file")
if args.stdout and (args.out or args.out_dir):
_die("--stdout cannot be combined with --out or --out-dir")
if args.stdout and len(args.audio) > 1:
_die("--stdout only supports a single audio file")
if args.prompt and "transcribe-diarize" in args.model:
_die("prompt is not supported with gpt-4o-transcribe-diarize")
if args.response_format == "diarized_json" and "transcribe-diarize" not in args.model:
_die("diarized_json requires gpt-4o-transcribe-diarize")
_ensure_api_key(args.dry_run)
audio_paths = [Path(p) for p in args.audio]
for path in audio_paths:
_validate_audio(path)
known_names, known_refs = _parse_known_speakers(args.known_speaker)
if known_names and "transcribe-diarize" not in args.model:
_warn("known-speaker references are only supported for gpt-4o-transcribe-diarize")
payload = _build_payload(args, known_names, known_refs)
if args.dry_run:
print(json.dumps(payload, indent=2))
return
client = _create_client()
for path in audio_paths:
result = _run_one(client, path, payload)
output = _format_output(result, args.response_format)
if args.stdout:
print(output)
continue
out_path = _build_output_path(path, args.response_format, args.out, args.out_dir)
out_path.parent.mkdir(parents=True, exist_ok=True)
out_path.write_text(output, encoding="utf-8")
print(f"Wrote {out_path}")
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/media/transcribe/scripts/transcribe_diarize.py",
"license": "MIT License",
"lines": 229,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/security/security-ownership-map/scripts/build_ownership_map.py | #!/usr/bin/env python3
"""Build a security ownership map from git history."""
from __future__ import annotations
import argparse
import csv
import datetime as dt
import fnmatch
import json
import math
import os
import re
import subprocess
import sys
from collections import defaultdict
from pathlib import Path
from typing import Iterable
DEFAULT_SENSITIVE_RULES: list[tuple[str, str, float]] = [
("**/auth/**", "auth", 1.0),
("**/oauth/**", "auth", 1.0),
("**/rbac/**", "auth", 1.0),
("**/session/**", "auth", 1.0),
("**/token/**", "auth", 1.0),
("**/crypto/**", "crypto", 1.0),
("**/tls/**", "crypto", 1.0),
("**/ssl/**", "crypto", 1.0),
("**/secrets/**", "secrets", 1.0),
("**/keys/**", "secrets", 1.0),
("**/*.pem", "secrets", 1.0),
("**/*.key", "secrets", 1.0),
("**/*.p12", "secrets", 1.0),
("**/*.pfx", "secrets", 1.0),
("**/iam/**", "auth", 1.0),
("**/sso/**", "auth", 1.0),
]
DEFAULT_AUTHOR_EXCLUDE_REGEXES = [
"dependabot",
]
DEFAULT_COCHANGE_EXCLUDES = [
"**/Cargo.lock",
"**/Cargo.toml",
"**/package-lock.json",
"**/yarn.lock",
"**/pnpm-lock.yaml",
"**/go.sum",
"**/go.mod",
"**/Gemfile.lock",
"**/Pipfile.lock",
"**/poetry.lock",
"**/composer.lock",
"**/.github/**",
"**/.gitignore",
"**/.gitattributes",
"**/.gitmodules",
"**/.editorconfig",
"**/.vscode/**",
"**/.idea/**",
]
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Build ownership graphs and security ownership summaries from git history."
)
parser.add_argument("--repo", default=".", help="Path to the git repo (default: .)")
parser.add_argument(
"--out",
default="ownership-map-out",
help="Output directory for graph artifacts",
)
parser.add_argument("--since", default=None, help="Limit git log to commits since date")
parser.add_argument("--until", default=None, help="Limit git log to commits until date")
parser.add_argument(
"--identity",
choices=("author", "committer"),
default="author",
help="Identity to attribute touches to",
)
parser.add_argument(
"--date-field",
choices=("author", "committer"),
default="author",
help="Date field to use for recency and bucketing",
)
parser.add_argument(
"--include-merges",
action="store_true",
help="Include merge commits (excluded by default)",
)
parser.add_argument(
"--half-life-days",
type=float,
default=180.0,
help="Half life for recency weighting",
)
parser.add_argument(
"--sensitive-config",
default=None,
help="CSV file with pattern,tag,weight for sensitive paths",
)
parser.add_argument(
"--owner-threshold",
type=float,
default=0.5,
help="Share threshold for hidden owner detection",
)
parser.add_argument(
"--bus-factor-threshold",
type=int,
default=1,
help="Bus factor threshold for hotspots",
)
parser.add_argument(
"--stale-days",
type=int,
default=365,
help="Days since last touch to consider stale",
)
parser.add_argument(
"--min-touches",
type=int,
default=1,
help="Minimum touches to keep an edge",
)
parser.add_argument(
"--emit-commits",
action="store_true",
help="Write commit list to commits.jsonl",
)
parser.add_argument(
"--author-exclude-regex",
action="append",
default=[],
help="Regex for author name/email to exclude (repeatable)",
)
parser.add_argument(
"--no-default-author-excludes",
action="store_true",
help="Disable default author excludes (dependabot)",
)
parser.add_argument(
"--no-cochange",
action="store_true",
help="Disable co-change graph output",
)
parser.add_argument(
"--cochange-max-files",
type=int,
default=50,
help="Ignore commits touching more than this many files for co-change graph",
)
parser.add_argument(
"--cochange-min-count",
type=int,
default=2,
help="Minimum co-change count to keep file-file edge",
)
parser.add_argument(
"--cochange-min-jaccard",
type=float,
default=0.05,
help="Minimum Jaccard similarity to keep file-file edge",
)
parser.add_argument(
"--cochange-exclude",
action="append",
default=[],
help="Glob to exclude from co-change graph (repeatable)",
)
parser.add_argument(
"--no-default-cochange-excludes",
action="store_true",
help="Disable default co-change excludes (lockfiles, .github, editor config)",
)
parser.add_argument(
"--no-communities",
dest="communities",
action="store_false",
help="Disable community detection (enabled by default, requires networkx)",
)
parser.add_argument(
"--graphml",
action="store_true",
help="Emit ownership.graphml (requires networkx)",
)
parser.add_argument(
"--max-community-files",
type=int,
default=50,
help="Max files listed per community",
)
parser.add_argument(
"--community-top-owners",
type=int,
default=5,
help="Top maintainers saved per community",
)
parser.set_defaults(communities=True)
return parser.parse_args()
def load_sensitive_rules(path: str | None) -> list[tuple[str, str, float]]:
if not path:
return list(DEFAULT_SENSITIVE_RULES)
rules: list[tuple[str, str, float]] = []
with open(path, "r", encoding="utf-8") as handle:
for raw in handle:
line = raw.strip()
if not line or line.startswith("#"):
continue
parts = [part.strip() for part in line.split(",")]
if not parts:
continue
pattern = parts[0]
tag = parts[1] if len(parts) > 1 and parts[1] else "sensitive"
weight = float(parts[2]) if len(parts) > 2 and parts[2] else 1.0
rules.append((pattern, tag, weight))
return rules
def parse_date(value: str) -> dt.datetime:
parsed = dt.datetime.fromisoformat(value)
if parsed.tzinfo is None:
parsed = parsed.replace(tzinfo=dt.timezone.utc)
return parsed
def offset_minutes(timestamp: dt.datetime) -> int | None:
offset = timestamp.utcoffset()
if offset is None:
return None
return int(offset.total_seconds() / 60)
def format_offset(minutes: int) -> str:
sign = "+" if minutes >= 0 else "-"
minutes = abs(minutes)
return f"{sign}{minutes // 60:02d}:{minutes % 60:02d}"
def recency_weighted(now: dt.datetime, when: dt.datetime, half_life_days: float) -> float:
if half_life_days <= 0:
return 1.0
age_days = max(0.0, (now - when).total_seconds() / 86400.0)
return math.exp(-math.log(2) * age_days / half_life_days)
def match_sensitive(path: str, rules: Iterable[tuple[str, str, float]]) -> dict[str, float]:
tags: dict[str, float] = defaultdict(float)
posix = path.replace("\\", "/")
for pattern, tag, weight in rules:
patterns = [pattern]
if pattern.startswith("**/"):
patterns.append(pattern[3:])
for candidate in patterns:
if fnmatch.fnmatchcase(posix, candidate):
tags[tag] += weight
break
return tags
def matches_glob(path: str, pattern: str) -> bool:
posix = path.replace("\\", "/")
patterns = [pattern]
if pattern.startswith("**/"):
patterns.append(pattern[3:])
return any(fnmatch.fnmatchcase(posix, candidate) for candidate in patterns)
def is_excluded(path: str, patterns: Iterable[str]) -> bool:
return any(matches_glob(path, pattern) for pattern in patterns)
def author_excluded(name: str, email: str, patterns: Iterable[re.Pattern[str]]) -> bool:
if not patterns:
return False
haystack = f"{name} {email}".strip()
return any(pattern.search(haystack) for pattern in patterns)
def compute_community_owners(
community_files: Iterable[str],
people: dict[str, dict[str, object]],
file_people_touches: dict[str, dict[str, int]],
file_people_recency: dict[str, dict[str, float]],
file_people_sensitive: dict[str, dict[str, float]],
top_n: int,
) -> dict[str, object]:
touches_by_person: dict[str, int] = defaultdict(int)
recency_by_person: dict[str, float] = defaultdict(float)
sensitive_by_person: dict[str, float] = defaultdict(float)
for path in community_files:
for person, touches in file_people_touches.get(path, {}).items():
touches_by_person[person] += touches
for person, recency in file_people_recency.get(path, {}).items():
recency_by_person[person] += recency
for person, weight in file_people_sensitive.get(path, {}).items():
sensitive_by_person[person] += weight
total_touches = sum(touches_by_person.values())
total_recency = sum(recency_by_person.values())
total_sensitive = sum(sensitive_by_person.values())
ranked = sorted(touches_by_person.items(), key=lambda item: item[1], reverse=True)
owners = []
for person_id, touches in ranked[:top_n]:
recency = recency_by_person.get(person_id, 0.0)
sensitive = sensitive_by_person.get(person_id, 0.0)
owners.append(
{
"person_id": person_id,
"name": people.get(person_id, {}).get("name", person_id),
"touches": touches,
"touch_share": round(touches / total_touches, 4) if total_touches else 0.0,
"recency_share": round(recency / total_recency, 4) if total_recency else 0.0,
"sensitive_share": round(sensitive / total_sensitive, 4)
if total_sensitive
else 0.0,
"primary_tz_offset": people.get(person_id, {}).get("primary_tz_offset", ""),
}
)
return {
"bus_factor": len(touches_by_person),
"owner_count": len(touches_by_person),
"totals": {
"touches": total_touches,
"recency_weight": round(total_recency, 6),
"sensitive_weight": round(total_sensitive, 2),
},
"top_maintainers": owners,
}
def run_git_log(
repo: str, since: str | None, until: str | None, include_merges: bool
) -> Iterable[list[str]]:
cmd = [
"git",
"-C",
repo,
"log",
"--name-only",
"--no-renames",
"--date=iso-strict",
"--format=---%n%H%n%P%n%an%n%ae%n%ad%n%cn%n%ce%n%cd",
]
if not include_merges:
cmd.append("--no-merges")
if since:
cmd.extend(["--since", since])
if until:
cmd.extend(["--until", until])
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
)
assert proc.stdout is not None
batch: list[str] = []
for line in proc.stdout:
batch.append(line.rstrip("\n"))
if line.rstrip("\n") == "---" and len(batch) > 1:
yield batch[:-1]
batch = ["---"]
if batch:
yield batch
stderr = proc.stderr.read() if proc.stderr else ""
exit_code = proc.wait()
if exit_code != 0:
raise RuntimeError(stderr.strip() or "git log failed")
def iter_commits(lines: Iterable[list[str]]) -> Iterable[tuple[dict[str, object], list[str]]]:
for chunk in lines:
if not chunk or chunk[0] != "---":
continue
header = chunk[1:9]
if len(header) < 8:
continue
parents = [entry for entry in header[1].split(" ") if entry]
commit = {
"hash": header[0],
"parents": parents,
"is_merge": len(parents) > 1,
"author_name": header[2],
"author_email": header[3],
"author_date": header[4],
"committer_name": header[5],
"committer_email": header[6],
"committer_date": header[7],
}
files = [line for line in chunk[9:] if line.strip()]
yield commit, files
def ensure_out_dir(path: str) -> Path:
out_dir = Path(path)
out_dir.mkdir(parents=True, exist_ok=True)
return out_dir
def write_csv(path: Path, header: list[str], rows: Iterable[list[str]]) -> None:
with path.open("w", encoding="utf-8", newline="") as handle:
writer = csv.writer(handle)
writer.writerow(header)
for row in rows:
writer.writerow(row)
def build_ownership_map(args: argparse.Namespace) -> Path:
now = dt.datetime.now(dt.timezone.utc)
rules = load_sensitive_rules(args.sensitive_config)
out_dir = ensure_out_dir(args.out)
people: dict[str, dict[str, object]] = {}
files: dict[str, dict[str, object]] = {}
edges: dict[tuple[str, str], dict[str, object]] = {}
file_people_touches: dict[str, dict[str, int]] = defaultdict(lambda: defaultdict(int))
file_people_recency: dict[str, dict[str, float]] = defaultdict(lambda: defaultdict(float))
file_people_sensitive: dict[str, dict[str, float]] = defaultdict(lambda: defaultdict(float))
tag_totals: dict[str, float] = defaultdict(float)
tag_person_totals: dict[str, dict[str, float]] = defaultdict(lambda: defaultdict(float))
person_timezone_counts: dict[str, dict[int, int]] = defaultdict(lambda: defaultdict(int))
cochange_counts: dict[tuple[str, str], int] = defaultdict(int)
cochange_file_commits: dict[str, int] = defaultdict(int)
cochange_commits_used = 0
cochange_commits_skipped = 0
cochange_commits_filtered = 0
cochange_files_excluded = 0
commits_path = out_dir / "commits.jsonl"
commit_handle = None
if args.emit_commits:
commit_handle = commits_path.open("w", encoding="utf-8")
total_commits_seen = 0
total_commits_included = 0
commits_excluded_identities = 0
commits_excluded_merges = 0
total_edges = 0
author_exclude_regexes = []
if not args.no_default_author_excludes:
author_exclude_regexes.extend(DEFAULT_AUTHOR_EXCLUDE_REGEXES)
author_exclude_regexes.extend(args.author_exclude_regex)
author_exclude_patterns = [
re.compile(pattern, re.IGNORECASE) for pattern in author_exclude_regexes
]
cochange_excludes = []
if not args.no_default_cochange_excludes:
cochange_excludes.extend(DEFAULT_COCHANGE_EXCLUDES)
cochange_excludes.extend(args.cochange_exclude)
log_lines = run_git_log(args.repo, args.since, args.until, args.include_merges)
for commit, touched_files in iter_commits(log_lines):
total_commits_seen += 1
if commit.get("is_merge") and not args.include_merges:
commits_excluded_merges += 1
continue
identity_name = commit.get(f"{args.identity}_name", "")
identity_email = commit.get(f"{args.identity}_email", "")
if author_excluded(
identity_name,
identity_email,
author_exclude_patterns,
):
commits_excluded_identities += 1
continue
if not touched_files:
continue
total_commits_included += 1
if commit_handle:
commit_handle.write(json.dumps({**commit, "files": touched_files}) + "\n")
identity_name = commit.get(f"{args.identity}_name", "")
identity_email = commit.get(f"{args.identity}_email", "") or identity_name
commit_date = parse_date(commit.get(f"{args.date_field}_date", ""))
recency = recency_weighted(now, commit_date, args.half_life_days)
tz_minutes = offset_minutes(commit_date)
if tz_minutes is not None:
person_timezone_counts[identity_email][tz_minutes] += 1
unique_files = sorted(set(touched_files))
if not args.no_cochange and len(unique_files) > 1:
if len(unique_files) > args.cochange_max_files:
cochange_commits_skipped += 1
else:
filtered_files = [
path for path in unique_files if not is_excluded(path, cochange_excludes)
]
excluded = len(unique_files) - len(filtered_files)
if excluded:
cochange_files_excluded += excluded
if len(filtered_files) < 2:
cochange_commits_filtered += 1
if filtered_files:
for path in filtered_files:
cochange_file_commits[path] += 1
if len(filtered_files) >= 2:
cochange_commits_used += 1
for idx, path in enumerate(filtered_files):
for other in filtered_files[idx + 1 :]:
cochange_counts[(path, other)] += 1
person = people.setdefault(
identity_email,
{
"name": identity_name,
"email": identity_email,
"first_seen": commit_date,
"last_seen": commit_date,
"commit_count": 0,
"touches": 0,
"sensitive_touches": 0.0,
},
)
person["commit_count"] = int(person["commit_count"]) + 1
person["first_seen"] = min(person["first_seen"], commit_date)
person["last_seen"] = max(person["last_seen"], commit_date)
for path in touched_files:
file_entry = files.setdefault(
path,
{
"path": path,
"first_seen": commit_date,
"last_seen": commit_date,
"commit_count": 0,
"touches": 0,
"authors": set(),
"sensitive_tags": {},
},
)
file_entry["commit_count"] = int(file_entry["commit_count"]) + 1
file_entry["first_seen"] = min(file_entry["first_seen"], commit_date)
file_entry["last_seen"] = max(file_entry["last_seen"], commit_date)
file_entry["touches"] = int(file_entry["touches"]) + 1
file_entry["authors"].add(identity_email)
edge = edges.setdefault(
(identity_email, path),
{
"touches": 0,
"first_seen": commit_date,
"last_seen": commit_date,
"recency_weight": 0.0,
"sensitive_weight": 0.0,
},
)
edge["touches"] = int(edge["touches"]) + 1
edge["first_seen"] = min(edge["first_seen"], commit_date)
edge["last_seen"] = max(edge["last_seen"], commit_date)
edge["recency_weight"] = float(edge["recency_weight"]) + recency
tags = match_sensitive(path, rules)
if tags:
file_entry["sensitive_tags"] = tags
sensitive_weight = sum(tags.values())
edge["sensitive_weight"] = float(edge["sensitive_weight"]) + sensitive_weight
person["sensitive_touches"] = float(person["sensitive_touches"]) + sensitive_weight
file_people_sensitive[path][identity_email] += sensitive_weight
for tag, weight in tags.items():
tag_totals[tag] += weight
tag_person_totals[tag][identity_email] += weight
person["touches"] = int(person["touches"]) + 1
file_people_touches[path][identity_email] += 1
file_people_recency[path][identity_email] += recency
total_edges += 1
if commit_handle:
commit_handle.close()
people_rows = []
for email, person in sorted(people.items()):
tz_counts = person_timezone_counts.get(email, {})
primary_tz_offset = ""
primary_tz_minutes = ""
timezone_offsets = ""
if tz_counts:
primary_tz_minutes_value = max(tz_counts.items(), key=lambda item: (item[1], item[0]))[
0
]
primary_tz_offset = format_offset(primary_tz_minutes_value)
primary_tz_minutes = str(primary_tz_minutes_value)
timezone_offsets = ";".join(
f"{format_offset(minutes)}:{count}"
for minutes, count in sorted(tz_counts.items(), key=lambda item: item[0])
)
person["primary_tz_offset"] = primary_tz_offset
people_rows.append(
[
email,
str(person["name"]),
email,
person["first_seen"].isoformat(),
person["last_seen"].isoformat(),
str(person["commit_count"]),
str(person["touches"]),
f"{person['sensitive_touches']:.2f}",
primary_tz_offset,
primary_tz_minutes,
timezone_offsets,
]
)
file_rows = []
for path, file_entry in sorted(files.items()):
authors = file_entry["authors"]
bus_factor = len(authors)
tags = file_entry["sensitive_tags"]
tag_list = ";".join(sorted(tags.keys()))
sensitivity_score = sum(tags.values()) if tags else 0.0
file_rows.append(
[
path,
path,
file_entry["first_seen"].isoformat(),
file_entry["last_seen"].isoformat(),
str(file_entry["commit_count"]),
str(file_entry["touches"]),
str(bus_factor),
f"{sensitivity_score:.2f}",
tag_list,
]
)
edge_rows = []
for (email, path), edge in edges.items():
if int(edge["touches"]) < args.min_touches:
continue
edge_rows.append(
[
email,
path,
str(edge["touches"]),
f"{edge['recency_weight']:.6f}",
edge["first_seen"].isoformat(),
edge["last_seen"].isoformat(),
f"{edge['sensitive_weight']:.2f}",
]
)
cochange_rows: list[list[str]] = []
if not args.no_cochange:
for (file_a, file_b), count in cochange_counts.items():
if count < args.cochange_min_count:
continue
commits_a = cochange_file_commits.get(file_a, 0)
commits_b = cochange_file_commits.get(file_b, 0)
denom = commits_a + commits_b - count
if denom <= 0:
continue
jaccard = count / denom
if jaccard < args.cochange_min_jaccard:
continue
cochange_rows.append([file_a, file_b, str(count), f"{jaccard:.6f}"])
write_csv(
out_dir / "people.csv",
[
"person_id",
"name",
"email",
"first_seen",
"last_seen",
"commit_count",
"touches",
"sensitive_touches",
"primary_tz_offset",
"primary_tz_minutes",
"timezone_offsets",
],
people_rows,
)
write_csv(
out_dir / "files.csv",
[
"file_id",
"path",
"first_seen",
"last_seen",
"commit_count",
"touches",
"bus_factor",
"sensitivity_score",
"sensitivity_tags",
],
file_rows,
)
write_csv(
out_dir / "edges.csv",
[
"person_id",
"file_id",
"touches",
"recency_weight",
"first_seen",
"last_seen",
"sensitive_weight",
],
edge_rows,
)
if not args.no_cochange:
write_csv(
out_dir / "cochange_edges.csv",
[
"file_a",
"file_b",
"cochange_count",
"jaccard",
],
cochange_rows,
)
orphaned_sensitive_code = []
bus_factor_hotspots = []
for path, file_entry in files.items():
tags = file_entry["sensitive_tags"]
if not tags:
continue
bus_factor = len(file_entry["authors"])
last_seen = file_entry["last_seen"]
age_days = (now - last_seen).days
top_owner = None
if path in file_people_touches:
top_owner = max(file_people_touches[path].items(), key=lambda item: item[1])[0]
hotspot = {
"path": path,
"bus_factor": bus_factor,
"last_touch": last_seen.isoformat(),
"sensitivity_tags": sorted(tags.keys()),
"top_owner": top_owner,
}
if bus_factor <= args.bus_factor_threshold:
bus_factor_hotspots.append(hotspot)
if age_days >= args.stale_days:
orphaned_sensitive_code.append(
{
**hotspot,
"last_security_touch": last_seen.isoformat(),
}
)
hidden_owners = []
for tag, total in tag_totals.items():
if total <= 0:
continue
person_totals = tag_person_totals[tag]
if not person_totals:
continue
top_email, top_value = max(person_totals.items(), key=lambda item: item[1])
share = top_value / total
if share >= args.owner_threshold:
person_name = people.get(top_email, {}).get("name", top_email)
hidden_owners.append(
{
"person": top_email,
"name": person_name,
"controls": f"{share * 100:.0f}% of {tag} code",
"category": tag,
"share": round(share, 4),
}
)
summary = {
"generated_at": now.isoformat(),
"repo": os.path.abspath(args.repo),
"parameters": {
"since": args.since,
"until": args.until,
"half_life_days": args.half_life_days,
"bus_factor_threshold": args.bus_factor_threshold,
"stale_days": args.stale_days,
"owner_threshold": args.owner_threshold,
"sensitive_config": args.sensitive_config,
"identity": args.identity,
"date_field": args.date_field,
"include_merges": args.include_merges,
"cochange_enabled": not args.no_cochange,
"cochange_max_files": args.cochange_max_files,
"cochange_min_count": args.cochange_min_count,
"cochange_min_jaccard": args.cochange_min_jaccard,
"cochange_default_excludes": not args.no_default_cochange_excludes,
"cochange_excludes": cochange_excludes,
"author_default_excludes": not args.no_default_author_excludes,
"author_exclude_regexes": author_exclude_regexes,
"community_top_owners": args.community_top_owners,
},
"orphaned_sensitive_code": orphaned_sensitive_code,
"hidden_owners": hidden_owners,
"bus_factor_hotspots": bus_factor_hotspots,
"stats": {
"commits": total_commits_included,
"commits_seen": total_commits_seen,
"commits_excluded_identities": commits_excluded_identities,
"commits_excluded_merges": commits_excluded_merges,
"edges": total_edges,
"people": len(people),
"files": len(files),
"cochange_pairs_total": len(cochange_counts) if not args.no_cochange else 0,
"cochange_edges": len(cochange_rows) if not args.no_cochange else 0,
"cochange_commits_used": cochange_commits_used if not args.no_cochange else 0,
"cochange_commits_skipped": cochange_commits_skipped if not args.no_cochange else 0,
"cochange_commits_filtered": cochange_commits_filtered if not args.no_cochange else 0,
"cochange_files_excluded": cochange_files_excluded if not args.no_cochange else 0,
},
}
with (out_dir / "summary.json").open("w", encoding="utf-8") as handle:
json.dump(summary, handle, indent=2)
if args.communities or args.graphml:
try:
import networkx as nx
from networkx.algorithms import bipartite
except ImportError:
raise RuntimeError(
"networkx is required for communities/graphml output. Install with: pip install networkx"
)
else:
graph_bipartite = None
graph_cochange = None
person_nodes = set()
file_nodes = set()
community_index: dict[str, int] = {}
community_metadata: list[dict[str, object]] = []
if args.graphml or (args.communities and (args.no_cochange or not cochange_rows)):
graph_bipartite = nx.Graph()
for (email, path), edge in edges.items():
if int(edge["touches"]) < args.min_touches:
continue
graph_bipartite.add_node(email, node_type="person")
graph_bipartite.add_node(path, node_type="file")
graph_bipartite.add_edge(email, path, weight=float(edge["touches"]))
person_nodes.add(email)
file_nodes.add(path)
if not args.no_cochange and cochange_rows:
graph_cochange = nx.Graph()
for file_a, file_b, count, jaccard in cochange_rows:
graph_cochange.add_edge(
file_a,
file_b,
weight=float(jaccard),
count=int(count),
)
if args.communities:
communities_result = None
if graph_cochange is not None:
communities_result = list(
nx.algorithms.community.greedy_modularity_communities(
graph_cochange, weight="weight"
)
)
elif graph_bipartite is not None and file_nodes:
projected = bipartite.weighted_projected_graph(graph_bipartite, file_nodes)
communities_result = list(
nx.algorithms.community.greedy_modularity_communities(projected)
)
if communities_result is not None:
serialized = []
for idx, community in enumerate(communities_result, start=1):
files_list = sorted(community)
owners = compute_community_owners(
files_list,
people,
file_people_touches,
file_people_recency,
file_people_sensitive,
args.community_top_owners,
)
for path in files_list:
community_index[path] = idx
entry = {
"id": idx,
"size": len(files_list),
"files": files_list[: args.max_community_files],
"maintainers": owners["top_maintainers"],
"bus_factor": owners["bus_factor"],
"owner_count": owners["owner_count"],
"totals": owners["totals"],
}
serialized.append(entry)
metadata = dict(entry)
metadata.pop("files", None)
community_metadata.append(metadata)
with (out_dir / "communities.json").open("w", encoding="utf-8") as handle:
json.dump(serialized, handle, indent=2)
if args.communities:
for node, community_id in community_index.items():
if graph_cochange is not None and node in graph_cochange:
graph_cochange.nodes[node]["community_id"] = community_id
if graph_bipartite is not None and node in graph_bipartite:
graph_bipartite.nodes[node]["community_id"] = community_id
graph_for_json = graph_cochange or graph_bipartite
if graph_for_json is not None:
try:
from networkx.readwrite import json_graph
except ImportError:
pass
else:
data = json_graph.node_link_data(graph_for_json, edges="edges")
data.setdefault("graph", {})
data["graph"]["community_maintainers"] = community_metadata
json_name = (
"cochange.graph.json"
if graph_for_json is graph_cochange
else "ownership.graph.json"
)
with (out_dir / json_name).open("w", encoding="utf-8") as handle:
json.dump(data, handle, indent=2)
if args.graphml:
if graph_bipartite is not None:
nx.write_graphml(graph_bipartite, out_dir / "ownership.graphml")
if graph_cochange is not None:
nx.write_graphml(graph_cochange, out_dir / "cochange.graphml")
return out_dir
def main() -> int:
args = parse_args()
try:
out_dir = build_ownership_map(args)
except RuntimeError as exc:
print(str(exc), file=sys.stderr)
return 1
print(f"Ownership map written to {out_dir}")
return 0
if __name__ == "__main__":
raise SystemExit(main())
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/security/security-ownership-map/scripts/build_ownership_map.py",
"license": "MIT License",
"lines": 871,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/security/security-ownership-map/scripts/community_maintainers.py | #!/usr/bin/env python3
"""Report monthly maintainers for a file's community."""
from __future__ import annotations
import argparse
import csv
import datetime as dt
import json
import math
import re
import subprocess
import sys
from collections import Counter, defaultdict
from pathlib import Path
from typing import Iterable
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Compute maintainers for a file's community over time."
)
parser.add_argument(
"--data-dir",
default="ownership-map-out",
help="Directory containing graph outputs",
)
parser.add_argument(
"--repo",
default=None,
help="Git repo path (required if commits.jsonl is missing)",
)
parser.add_argument(
"--file",
default=None,
help="File path (exact or substring) to locate community",
)
parser.add_argument(
"--community-id",
type=int,
default=None,
help="Community id to analyze",
)
parser.add_argument(
"--since",
default=None,
help="Filter commits since date (ISO or 'YYYY-MM-DD')",
)
parser.add_argument(
"--until",
default=None,
help="Filter commits until date (ISO or 'YYYY-MM-DD')",
)
parser.add_argument(
"--identity",
choices=("author", "committer"),
default="author",
help="Identity to attribute touches to",
)
parser.add_argument(
"--date-field",
choices=("author", "committer"),
default="author",
help="Date field to use for bucketing",
)
parser.add_argument(
"--include-merges",
action="store_true",
help="Include merge commits (excluded by default)",
)
parser.add_argument(
"--top",
type=int,
default=5,
help="Top maintainers per month",
)
parser.add_argument(
"--bucket",
choices=("month", "quarter"),
default="month",
help="Time bucket for grouping",
)
parser.add_argument(
"--touch-mode",
choices=("commit", "file"),
default="commit",
help="Count one touch per commit or one per file touched",
)
parser.add_argument(
"--window-days",
type=int,
default=0,
help="Use a rolling window of N days ending each month (0 = calendar month only)",
)
parser.add_argument(
"--weight",
choices=("touches", "recency"),
default="touches",
help="Weight touches by recency using exponential decay",
)
parser.add_argument(
"--half-life-days",
type=float,
default=180.0,
help="Half-life days for recency weighting",
)
parser.add_argument(
"--min-share",
type=float,
default=0.0,
help="Minimum share within a month to include a maintainer",
)
parser.add_argument(
"--ignore-author-regex",
default=None,
help="Regex to skip authors by name or email (e.g., '(bot|dependabot)')",
)
parser.add_argument(
"--min-touches",
type=int,
default=1,
help="Minimum touches per month to include a maintainer",
)
return parser.parse_args()
def parse_date(value: str) -> dt.datetime:
try:
parsed = dt.datetime.fromisoformat(value)
except ValueError:
parsed = dt.datetime.fromisoformat(value + "T00:00:00")
if parsed.tzinfo is None:
parsed = parsed.replace(tzinfo=dt.timezone.utc)
return parsed
def month_key(timestamp: dt.datetime) -> str:
return timestamp.strftime("%Y-%m")
def quarter_key(timestamp: dt.datetime) -> str:
quarter = (timestamp.month - 1) // 3 + 1
return f"{timestamp.year}-Q{quarter}"
def month_end(timestamp: dt.datetime) -> dt.datetime:
year = timestamp.year
month = timestamp.month
if month == 12:
next_month = dt.datetime(year + 1, 1, 1, tzinfo=dt.timezone.utc)
else:
next_month = dt.datetime(year, month + 1, 1, tzinfo=dt.timezone.utc)
return next_month - dt.timedelta(seconds=1)
def quarter_start(timestamp: dt.datetime) -> dt.datetime:
quarter = (timestamp.month - 1) // 3
start_month = quarter * 3 + 1
return dt.datetime(timestamp.year, start_month, 1, tzinfo=dt.timezone.utc)
def quarter_end(timestamp: dt.datetime) -> dt.datetime:
start = quarter_start(timestamp)
end_month = start.month + 2
end_year = start.year
if end_month > 12:
end_month -= 12
end_year += 1
end_anchor = dt.datetime(end_year, end_month, 1, tzinfo=dt.timezone.utc)
return month_end(end_anchor)
def add_months(timestamp: dt.datetime, months: int) -> dt.datetime:
year = timestamp.year + (timestamp.month - 1 + months) // 12
month = (timestamp.month - 1 + months) % 12 + 1
return dt.datetime(year, month, 1, tzinfo=dt.timezone.utc)
def recency_weight(age_days: float, half_life_days: float) -> float:
if half_life_days <= 0:
return 1.0
return math.exp(-age_days / half_life_days)
def read_csv(path: Path) -> Iterable[dict[str, str]]:
with path.open("r", encoding="utf-8") as handle:
reader = csv.DictReader(handle)
yield from reader
def load_people(data_dir: Path) -> dict[str, dict[str, str]]:
people_path = data_dir / "people.csv"
people = {}
for row in read_csv(people_path):
people[row.get("person_id", "")] = {
"name": row.get("name", ""),
"email": row.get("email", ""),
"primary_tz_offset": row.get("primary_tz_offset", ""),
}
return people
def load_graph_json(data_dir: Path) -> dict[str, object] | None:
cochange_path = data_dir / "cochange.graph.json"
ownership_path = data_dir / "ownership.graph.json"
if cochange_path.exists():
return json.loads(cochange_path.read_text(encoding="utf-8"))
if ownership_path.exists():
return json.loads(ownership_path.read_text(encoding="utf-8"))
return None
def find_file_node(nodes: list[dict[str, object]], query: str) -> dict[str, object]:
exact = [node for node in nodes if node.get("id") == query]
if exact:
return exact[0]
contains = [node for node in nodes if query in str(node.get("id", ""))]
if len(contains) == 1:
return contains[0]
if not contains:
raise ValueError(f"File not found in graph: {query}")
candidates = ", ".join(str(node.get("id")) for node in contains[:10])
raise ValueError(f"Multiple matches for file {query}: {candidates}")
def load_community_files(
data_dir: Path, file_query: str | None, community_id: int | None
) -> tuple[int, list[str]]:
graph = load_graph_json(data_dir)
if graph:
nodes = graph.get("nodes", [])
if file_query:
node = find_file_node(nodes, file_query)
community_id = int(node.get("community_id", -1))
if community_id is None:
raise ValueError("Provide --file or --community-id")
files = [node.get("id") for node in nodes if node.get("community_id") == community_id]
files = [entry for entry in files if entry]
if not files:
raise ValueError(f"No files found for community {community_id}")
return community_id, files
communities_path = data_dir / "communities.json"
if not communities_path.exists():
raise FileNotFoundError("Missing graph json and communities.json")
communities = json.loads(communities_path.read_text(encoding="utf-8"))
if file_query:
for entry in communities:
files = entry.get("files", [])
if any(file_query == f or file_query in f for f in files):
return int(entry.get("id", -1)), list(files)
raise ValueError("File not found in communities.json (list may be truncated)")
if community_id is None:
raise ValueError("Provide --file or --community-id")
for entry in communities:
if int(entry.get("id", -1)) == community_id:
return community_id, list(entry.get("files", []))
raise ValueError(f"Community id not found: {community_id}")
def iter_commits_from_json(
commits_path: Path,
since: dt.datetime | None,
until: dt.datetime | None,
date_field: str,
) -> Iterable[dict[str, object]]:
with commits_path.open("r", encoding="utf-8") as handle:
for line in handle:
entry = json.loads(line)
author_date = entry.get("author_date") or entry.get("date")
committer_date = entry.get("committer_date")
if author_date:
author_dt = parse_date(author_date)
else:
author_dt = None
if committer_date:
committer_dt = parse_date(committer_date)
else:
committer_dt = None
if date_field == "committer":
commit_date = committer_dt or author_dt
else:
commit_date = author_dt or committer_dt
if commit_date is None:
continue
if since and commit_date < since:
continue
if until and commit_date > until:
continue
yield {
"hash": entry.get("hash", ""),
"parents": entry.get("parents", []),
"is_merge": entry.get("is_merge", False),
"author_name": entry.get("author_name", ""),
"author_email": entry.get("author_email", ""),
"author_date": author_date,
"committer_name": entry.get("committer_name", ""),
"committer_email": entry.get("committer_email", ""),
"committer_date": committer_date,
"files": entry.get("files", []),
}
def iter_commits_from_git(
repo: str, since: str | None, until: str | None, include_merges: bool
) -> Iterable[dict[str, object]]:
cmd = [
"git",
"-C",
repo,
"log",
"--name-only",
"--no-renames",
"--date=iso-strict",
"--format=---%n%H%n%P%n%an%n%ae%n%ad%n%cn%n%ce%n%cd",
]
if not include_merges:
cmd.append("--no-merges")
if since:
cmd.extend(["--since", since])
if until:
cmd.extend(["--until", until])
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
)
assert proc.stdout is not None
block: list[str] = []
for line in proc.stdout:
line = line.rstrip("\n")
if line == "---":
if block:
yield from parse_git_block(block)
block = []
else:
block.append(line)
if block:
yield from parse_git_block(block)
stderr = proc.stderr.read() if proc.stderr else ""
exit_code = proc.wait()
if exit_code != 0:
raise RuntimeError(stderr.strip() or "git log failed")
def parse_git_block(block: list[str]) -> Iterable[dict[str, object]]:
if len(block) < 8:
return []
commit_hash = block[0]
parents = [entry for entry in block[1].split(" ") if entry]
author_name = block[2]
author_email = block[3]
author_date = block[4]
committer_name = block[5]
committer_email = block[6]
committer_date = block[7]
files = [line for line in block[8:] if line]
return [
{
"hash": commit_hash,
"parents": parents,
"is_merge": len(parents) > 1,
"author_name": author_name,
"author_email": author_email,
"author_date": author_date,
"committer_name": committer_name,
"committer_email": committer_email,
"committer_date": committer_date,
"files": files,
}
]
def main() -> int:
args = parse_args()
data_dir = Path(args.data_dir)
if not data_dir.exists():
print(f"Data directory not found: {data_dir}", file=sys.stderr)
return 1
since = parse_date(args.since) if args.since else None
until = parse_date(args.until) if args.until else None
try:
community_id, community_files = load_community_files(data_dir, args.file, args.community_id)
except (ValueError, FileNotFoundError) as exc:
print(str(exc), file=sys.stderr)
return 2
people = load_people(data_dir)
ignore_re = re.compile(args.ignore_author_regex) if args.ignore_author_regex else None
commits_path = data_dir / "commits.jsonl"
if commits_path.exists():
commit_iter = iter_commits_from_json(commits_path, since, until, args.date_field)
else:
if not args.repo:
print("--repo is required when commits.jsonl is missing", file=sys.stderr)
return 2
commit_iter = iter_commits_from_git(args.repo, args.since, args.until, args.include_merges)
commit_rows: list[tuple[dt.datetime, str, int, str, str]] = []
for commit in commit_iter:
if commit.get("is_merge") and not args.include_merges:
continue
files = commit.get("files", [])
in_community = sum(1 for path in files if path in community_files)
if in_community == 0:
continue
identity_name = commit.get(f"{args.identity}_name", "")
identity_email = commit.get(f"{args.identity}_email", "")
date_value = commit.get(f"{args.date_field}_date")
if not date_value:
print(
"Missing committer fields in commits.jsonl. Re-run build or pass --repo.",
file=sys.stderr,
)
return 2
commit_date = parse_date(date_value)
person_id = identity_email or identity_name
if ignore_re and ignore_re.search(identity_name or ""):
continue
if ignore_re and ignore_re.search(identity_email or ""):
continue
touches = 1 if args.touch_mode == "commit" else in_community
commit_rows.append((commit_date, person_id, touches, identity_name, identity_email))
if person_id not in people:
people[person_id] = {
"name": identity_name,
"email": identity_email,
"primary_tz_offset": "",
}
if not commit_rows:
print("No commits touching community files for the selected window.", file=sys.stderr)
return 0
commit_rows.sort(key=lambda row: row[0])
period_counts: dict[str, Counter[str]] = defaultdict(Counter)
period_totals: dict[str, float] = defaultdict(float)
min_date = commit_rows[0][0]
max_date = commit_rows[-1][0]
if args.bucket == "quarter":
period_cursor = quarter_start(min_date)
period_end_anchor = quarter_start(max_date)
step_months = 3
key_func = quarter_key
end_func = quarter_end
else:
period_cursor = dt.datetime(min_date.year, min_date.month, 1, tzinfo=dt.timezone.utc)
period_end_anchor = dt.datetime(max_date.year, max_date.month, 1, tzinfo=dt.timezone.utc)
step_months = 1
key_func = month_key
end_func = month_end
while period_cursor <= period_end_anchor:
bucket_end = end_func(period_cursor)
bucket_key = key_func(bucket_end)
if args.window_days > 0:
window_start = bucket_end - dt.timedelta(days=args.window_days)
def in_bucket(commit_date: dt.datetime) -> bool:
return window_start <= commit_date <= bucket_end
else:
if args.bucket == "quarter":
bucket_start = quarter_start(period_cursor)
def in_bucket(commit_date: dt.datetime) -> bool:
return bucket_start <= commit_date <= bucket_end
else:
def in_bucket(commit_date: dt.datetime) -> bool:
return (
commit_date.year == bucket_end.year
and commit_date.month == bucket_end.month
)
for commit_date, person_id, touches, _name, _email in commit_rows:
if not in_bucket(commit_date):
continue
weight = 1.0
if args.weight == "recency":
age_days = (bucket_end - commit_date).total_seconds() / 86400.0
weight = recency_weight(age_days, args.half_life_days)
contribution = touches * weight
period_counts[bucket_key][person_id] += contribution
period_totals[bucket_key] += contribution
period_cursor = add_months(period_cursor, step_months)
writer = csv.writer(sys.stdout)
writer.writerow(
[
"period",
"rank",
"name",
"email",
"primary_tz_offset",
"community_touches",
"touch_share",
]
)
for period in sorted(period_counts.keys()):
total = period_totals[period]
ranked = sorted(period_counts[period].items(), key=lambda item: item[1], reverse=True)
rank = 0
for person_id, touches in ranked:
if touches < args.min_touches:
continue
share = touches / total if total else 0.0
if share < args.min_share:
continue
rank += 1
if rank > args.top:
break
person = people.get(person_id, {})
if args.weight == "recency":
touches_value = f"{touches:.4f}"
else:
touches_value = f"{touches:.0f}"
writer.writerow(
[
period,
rank,
person.get("name", ""),
person.get("email", person_id),
person.get("primary_tz_offset", ""),
touches_value,
f"{share:.4f}",
]
)
return 0
if __name__ == "__main__":
raise SystemExit(main())
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/security/security-ownership-map/scripts/community_maintainers.py",
"license": "MIT License",
"lines": 482,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/security/security-ownership-map/scripts/query_ownership.py | #!/usr/bin/env python3
"""Query ownership-map outputs without loading everything into an LLM context."""
from __future__ import annotations
import argparse
import csv
import json
import sys
from collections import defaultdict
from pathlib import Path
from typing import Iterable
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Query ownership-map outputs with bounded JSON results."
)
parser.add_argument(
"--data-dir",
default="ownership-map-out",
help="Directory containing people.csv, files.csv, edges.csv",
)
subparsers = parser.add_subparsers(dest="command", required=True)
people = subparsers.add_parser("people", help="List people")
people.add_argument("--limit", type=int, default=20)
people.add_argument("--sort", default="touches")
people.add_argument("--email-contains", default=None)
people.add_argument("--min-touches", type=int, default=0)
people.add_argument("--min-sensitive", type=float, default=0.0)
files = subparsers.add_parser("files", help="List files")
files.add_argument("--limit", type=int, default=20)
files.add_argument("--sort", default="sensitivity_score")
files.add_argument("--path-contains", default=None)
files.add_argument("--tag", default=None)
files.add_argument("--bus-factor-max", type=int, default=None)
files.add_argument("--sensitivity-min", type=float, default=0.0)
person = subparsers.add_parser("person", help="Show person details and top files")
person.add_argument("--person", required=True, help="Exact email or substring")
person.add_argument("--limit", type=int, default=20)
person.add_argument("--sort", default="touches")
file_cmd = subparsers.add_parser("file", help="Show file details and top people")
file_cmd.add_argument("--file", required=True, help="Exact path or substring")
file_cmd.add_argument("--limit", type=int, default=20)
file_cmd.add_argument("--sort", default="touches")
cochange = subparsers.add_parser("cochange", help="List co-change neighbors for a file")
cochange.add_argument("--file", required=True, help="Exact path or substring")
cochange.add_argument("--limit", type=int, default=20)
cochange.add_argument("--sort", default="jaccard")
cochange.add_argument("--min-jaccard", type=float, default=0.0)
cochange.add_argument("--min-count", type=int, default=1)
tag = subparsers.add_parser("tag", help="Show top people/files for a sensitive tag")
tag.add_argument("--tag", required=True)
tag.add_argument("--limit", type=int, default=20)
summary = subparsers.add_parser("summary", help="Show summary.json sections")
summary.add_argument("--section", default=None)
communities = subparsers.add_parser("communities", help="List communities")
communities.add_argument("--limit", type=int, default=10)
communities.add_argument("--id", type=int, default=None)
community = subparsers.add_parser("community", help="Show community maintainers")
community.add_argument("--id", type=int, required=True)
community.add_argument("--include-files", action="store_true")
community.add_argument("--file-limit", type=int, default=50)
return parser.parse_args()
def to_int(value: str) -> int:
try:
return int(value)
except (TypeError, ValueError):
return 0
def to_float(value: str) -> float:
try:
return float(value)
except (TypeError, ValueError):
return 0.0
def read_csv(path: Path) -> Iterable[dict[str, str]]:
with path.open("r", encoding="utf-8") as handle:
reader = csv.DictReader(handle)
yield from reader
def load_people(data_dir: Path) -> list[dict[str, object]]:
people_path = data_dir / "people.csv"
people = []
for row in read_csv(people_path):
person = dict(row)
person["touches"] = to_int(row.get("touches", "0"))
person["commit_count"] = to_int(row.get("commit_count", "0"))
person["sensitive_touches"] = to_float(row.get("sensitive_touches", "0"))
people.append(person)
return people
def load_files(data_dir: Path) -> list[dict[str, object]]:
files_path = data_dir / "files.csv"
files = []
for row in read_csv(files_path):
file_entry = dict(row)
file_entry["touches"] = to_int(row.get("touches", "0"))
file_entry["commit_count"] = to_int(row.get("commit_count", "0"))
file_entry["bus_factor"] = to_int(row.get("bus_factor", "0"))
file_entry["sensitivity_score"] = to_float(row.get("sensitivity_score", "0"))
tags = row.get("sensitivity_tags", "")
file_entry["sensitivity_tags"] = [tag for tag in tags.split(";") if tag]
files.append(file_entry)
return files
def load_summary(data_dir: Path) -> dict[str, object]:
summary_path = data_dir / "summary.json"
with summary_path.open("r", encoding="utf-8") as handle:
return json.load(handle)
def load_communities(data_dir: Path) -> list[dict[str, object]]:
communities_path = data_dir / "communities.json"
if not communities_path.exists():
raise FileNotFoundError("communities.json not found; rerun build with --communities")
with communities_path.open("r", encoding="utf-8") as handle:
return json.load(handle)
def load_cochange_edges(data_dir: Path) -> Iterable[dict[str, object]]:
edges_path = data_dir / "cochange_edges.csv"
if not edges_path.exists():
raise FileNotFoundError("cochange_edges.csv not found; rerun build without --no-cochange")
for row in read_csv(edges_path):
yield {
"file_a": row.get("file_a"),
"file_b": row.get("file_b"),
"cochange_count": to_int(row.get("cochange_count", "0")),
"jaccard": to_float(row.get("jaccard", "0")),
}
def select_single(records: list[dict[str, object]], key: str, query: str) -> dict[str, object]:
exact = [record for record in records if str(record.get(key, "")) == query]
if exact:
return exact[0]
contains = [record for record in records if query in str(record.get(key, ""))]
if len(contains) == 1:
return contains[0]
if not contains:
raise ValueError(f"No match for {query}")
candidates = [str(record.get(key, "")) for record in contains[:10]]
raise ValueError(f"Multiple matches for {query}: {', '.join(candidates)}")
def top_edges_for_person(data_dir: Path, person_id: str) -> list[dict[str, object]]:
edges_path = data_dir / "edges.csv"
results = []
for row in read_csv(edges_path):
if row.get("person_id") != person_id:
continue
results.append(
{
"file_id": row.get("file_id"),
"touches": to_int(row.get("touches", "0")),
"recency_weight": to_float(row.get("recency_weight", "0")),
"sensitive_weight": to_float(row.get("sensitive_weight", "0")),
"last_seen": row.get("last_seen"),
}
)
return results
def top_edges_for_file(data_dir: Path, file_id: str) -> list[dict[str, object]]:
edges_path = data_dir / "edges.csv"
results = []
for row in read_csv(edges_path):
if row.get("file_id") != file_id:
continue
results.append(
{
"person_id": row.get("person_id"),
"touches": to_int(row.get("touches", "0")),
"recency_weight": to_float(row.get("recency_weight", "0")),
"sensitive_weight": to_float(row.get("sensitive_weight", "0")),
"last_seen": row.get("last_seen"),
}
)
return results
def sort_records(records: list[dict[str, object]], key: str) -> list[dict[str, object]]:
return sorted(records, key=lambda item: item.get(key, 0), reverse=True)
def handle_people(args: argparse.Namespace, data_dir: Path) -> None:
people = load_people(data_dir)
if args.email_contains:
people = [p for p in people if args.email_contains in p.get("email", "")]
people = [p for p in people if p["touches"] >= args.min_touches]
people = [p for p in people if p["sensitive_touches"] >= args.min_sensitive]
people = sort_records(people, args.sort)[: args.limit]
payload = [
{
"person_id": p.get("person_id"),
"name": p.get("name"),
"email": p.get("email"),
"touches": p.get("touches"),
"commit_count": p.get("commit_count"),
"sensitive_touches": p.get("sensitive_touches"),
"primary_tz_offset": p.get("primary_tz_offset"),
}
for p in people
]
print(json.dumps(payload, indent=2))
def handle_files(args: argparse.Namespace, data_dir: Path) -> None:
files = load_files(data_dir)
if args.path_contains:
files = [f for f in files if args.path_contains in f.get("path", "")]
if args.tag:
files = [f for f in files if args.tag in f.get("sensitivity_tags", [])]
if args.bus_factor_max is not None:
files = [f for f in files if f["bus_factor"] <= args.bus_factor_max]
files = [f for f in files if f["sensitivity_score"] >= args.sensitivity_min]
files = sort_records(files, args.sort)[: args.limit]
payload = [
{
"file_id": f.get("file_id"),
"path": f.get("path"),
"touches": f.get("touches"),
"bus_factor": f.get("bus_factor"),
"sensitivity_score": f.get("sensitivity_score"),
"sensitivity_tags": f.get("sensitivity_tags"),
"last_seen": f.get("last_seen"),
}
for f in files
]
print(json.dumps(payload, indent=2))
def handle_person(args: argparse.Namespace, data_dir: Path) -> None:
people = load_people(data_dir)
person = select_single(people, "person_id", args.person)
files = load_files(data_dir)
file_map = {f["file_id"]: f for f in files}
edges = top_edges_for_person(data_dir, person["person_id"])
edges = sort_records(edges, args.sort)[: args.limit]
payload = {
"person": {
"person_id": person.get("person_id"),
"name": person.get("name"),
"email": person.get("email"),
"touches": person.get("touches"),
"commit_count": person.get("commit_count"),
"sensitive_touches": person.get("sensitive_touches"),
"primary_tz_offset": person.get("primary_tz_offset"),
"timezone_offsets": person.get("timezone_offsets"),
},
"top_files": [
{
"file_id": edge.get("file_id"),
"path": file_map.get(edge.get("file_id"), {}).get("path"),
"touches": edge.get("touches"),
"recency_weight": edge.get("recency_weight"),
"sensitive_weight": edge.get("sensitive_weight"),
"last_seen": edge.get("last_seen"),
"sensitivity_tags": file_map.get(edge.get("file_id"), {}).get("sensitivity_tags"),
}
for edge in edges
],
}
print(json.dumps(payload, indent=2))
def handle_file(args: argparse.Namespace, data_dir: Path) -> None:
files = load_files(data_dir)
file_entry = select_single(files, "file_id", args.file)
people = load_people(data_dir)
people_map = {p["person_id"]: p for p in people}
edges = top_edges_for_file(data_dir, file_entry["file_id"])
edges = sort_records(edges, args.sort)[: args.limit]
payload = {
"file": {
"file_id": file_entry.get("file_id"),
"path": file_entry.get("path"),
"touches": file_entry.get("touches"),
"bus_factor": file_entry.get("bus_factor"),
"sensitivity_score": file_entry.get("sensitivity_score"),
"sensitivity_tags": file_entry.get("sensitivity_tags"),
"last_seen": file_entry.get("last_seen"),
},
"top_people": [
{
"person_id": edge.get("person_id"),
"name": people_map.get(edge.get("person_id"), {}).get("name"),
"email": people_map.get(edge.get("person_id"), {}).get("email"),
"touches": edge.get("touches"),
"recency_weight": edge.get("recency_weight"),
"sensitive_weight": edge.get("sensitive_weight"),
"primary_tz_offset": people_map.get(edge.get("person_id"), {}).get(
"primary_tz_offset"
),
}
for edge in edges
],
}
print(json.dumps(payload, indent=2))
def handle_cochange(args: argparse.Namespace, data_dir: Path) -> None:
files = load_files(data_dir)
file_entry = select_single(files, "file_id", args.file)
neighbors = []
for row in load_cochange_edges(data_dir):
file_a = row.get("file_a")
file_b = row.get("file_b")
if file_a == file_entry["file_id"]:
other = file_b
elif file_b == file_entry["file_id"]:
other = file_a
else:
continue
if row["cochange_count"] < args.min_count:
continue
if row["jaccard"] < args.min_jaccard:
continue
neighbors.append(
{
"file_id": other,
"path": other,
"cochange_count": row["cochange_count"],
"jaccard": row["jaccard"],
}
)
neighbors = sort_records(neighbors, args.sort)[: args.limit]
payload = {
"file": {
"file_id": file_entry.get("file_id"),
"path": file_entry.get("path"),
},
"neighbors": neighbors,
}
print(json.dumps(payload, indent=2))
def handle_tag(args: argparse.Namespace, data_dir: Path) -> None:
files = load_files(data_dir)
tagged_files = [f for f in files if args.tag in f.get("sensitivity_tags", [])]
tagged_ids = {f["file_id"] for f in tagged_files}
person_touch = defaultdict(int)
edges_path = data_dir / "edges.csv"
for row in read_csv(edges_path):
if row.get("file_id") not in tagged_ids:
continue
person_touch[row.get("person_id")] += to_int(row.get("touches", "0"))
people = load_people(data_dir)
people_map = {p["person_id"]: p for p in people}
top_people = [
{
"person_id": person_id,
"name": people_map.get(person_id, {}).get("name"),
"email": people_map.get(person_id, {}).get("email"),
"touches": touches,
}
for person_id, touches in person_touch.items()
]
top_people = sorted(top_people, key=lambda item: item.get("touches", 0), reverse=True)[
: args.limit
]
top_files = sorted(tagged_files, key=lambda item: item.get("touches", 0), reverse=True)[
: args.limit
]
payload = {
"tag": args.tag,
"top_people": top_people,
"top_files": [
{
"file_id": entry.get("file_id"),
"path": entry.get("path"),
"touches": entry.get("touches"),
"bus_factor": entry.get("bus_factor"),
}
for entry in top_files
],
}
print(json.dumps(payload, indent=2))
def handle_summary(args: argparse.Namespace, data_dir: Path) -> None:
summary = load_summary(data_dir)
if args.section:
if args.section not in summary:
raise ValueError(f"Section not found: {args.section}")
payload = summary[args.section]
else:
payload = summary
print(json.dumps(payload, indent=2))
def handle_communities(args: argparse.Namespace, data_dir: Path) -> None:
communities = load_communities(data_dir)
if args.id is not None:
matches = [entry for entry in communities if entry.get("id") == args.id]
if not matches:
raise ValueError(f"Community id not found: {args.id}")
payload = matches[0]
else:
payload = sorted(communities, key=lambda item: item.get("size", 0), reverse=True)[
: args.limit
]
print(json.dumps(payload, indent=2))
def handle_community(args: argparse.Namespace, data_dir: Path) -> None:
communities = load_communities(data_dir)
matches = [entry for entry in communities if entry.get("id") == args.id]
if not matches:
raise ValueError(f"Community id not found: {args.id}")
entry = dict(matches[0])
files = entry.pop("files", [])
payload = entry
if args.include_files:
payload["files"] = files[: args.file_limit]
payload["files_truncated"] = len(files) > args.file_limit
print(json.dumps(payload, indent=2))
def main() -> int:
args = parse_args()
data_dir = Path(args.data_dir)
if not data_dir.exists():
print(f"Data directory not found: {data_dir}", file=sys.stderr)
return 1
try:
if args.command == "people":
handle_people(args, data_dir)
elif args.command == "files":
handle_files(args, data_dir)
elif args.command == "person":
handle_person(args, data_dir)
elif args.command == "file":
handle_file(args, data_dir)
elif args.command == "cochange":
handle_cochange(args, data_dir)
elif args.command == "tag":
handle_tag(args, data_dir)
elif args.command == "summary":
handle_summary(args, data_dir)
elif args.command == "communities":
handle_communities(args, data_dir)
elif args.command == "community":
handle_community(args, data_dir)
else:
raise ValueError(f"Unknown command: {args.command}")
except (FileNotFoundError, ValueError) as exc:
print(str(exc), file=sys.stderr)
return 2
return 0
if __name__ == "__main__":
raise SystemExit(main())
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/security/security-ownership-map/scripts/query_ownership.py",
"license": "MIT License",
"lines": 412,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/security/security-ownership-map/scripts/run_ownership_map.py | #!/usr/bin/env python3
"""One-shot runner for building the security ownership map."""
from __future__ import annotations
import argparse
import subprocess
import sys
from pathlib import Path
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Run build_ownership_map.py with sensible defaults."
)
parser.add_argument("--repo", default=".", help="Path to the git repo (default: .)")
parser.add_argument(
"--out",
default="ownership-map-out",
help="Output directory for graph artifacts",
)
parser.add_argument("--since", default=None, help="Limit git log to commits since date")
parser.add_argument("--until", default=None, help="Limit git log to commits until date")
parser.add_argument(
"--identity",
choices=("author", "committer"),
default="author",
help="Identity to attribute touches to",
)
parser.add_argument(
"--date-field",
choices=("author", "committer"),
default="author",
help="Date field to use for recency and bucketing",
)
parser.add_argument(
"--include-merges",
action="store_true",
help="Include merge commits (excluded by default)",
)
parser.add_argument(
"--emit-commits",
action="store_true",
help="Write commit list to commits.jsonl",
)
parser.add_argument(
"--author-exclude-regex",
action="append",
default=[],
help="Regex for author name/email to exclude (repeatable)",
)
parser.add_argument(
"--no-default-author-excludes",
action="store_true",
help="Disable default author excludes (dependabot)",
)
parser.add_argument(
"--graphml",
action="store_true",
help="Emit GraphML outputs",
)
parser.add_argument(
"--sensitive-config",
default=None,
help="CSV file with pattern,tag,weight for sensitive paths",
)
parser.add_argument(
"--cochange-max-files",
type=int,
default=50,
help="Ignore commits touching more than this many files for co-change graph",
)
parser.add_argument(
"--cochange-min-count",
type=int,
default=2,
help="Minimum co-change count to keep file-file edge",
)
parser.add_argument(
"--cochange-min-jaccard",
type=float,
default=0.05,
help="Minimum Jaccard similarity to keep file-file edge",
)
parser.add_argument(
"--cochange-exclude",
action="append",
default=[],
help="Glob to exclude from co-change graph (repeatable)",
)
parser.add_argument(
"--no-default-cochange-excludes",
action="store_true",
help="Disable default co-change excludes (lockfiles, .github, editor config)",
)
parser.add_argument(
"--community-top-owners",
type=int,
default=5,
help="Top maintainers saved per community",
)
parser.add_argument(
"--bus-factor-threshold",
type=int,
default=1,
help="Bus factor threshold for hotspots",
)
parser.add_argument(
"--stale-days",
type=int,
default=365,
help="Days since last touch to consider stale",
)
parser.add_argument(
"--owner-threshold",
type=float,
default=0.5,
help="Share threshold for hidden owner detection",
)
parser.add_argument(
"--no-cochange",
action="store_true",
help="Disable co-change graph output",
)
parser.add_argument(
"--no-communities",
action="store_true",
help="Disable community detection (not recommended)",
)
return parser.parse_args()
def main() -> int:
args = parse_args()
try:
import networkx # noqa: F401
except ImportError:
print("networkx is required. Install with: pip install networkx", file=sys.stderr)
return 2
script_path = Path(__file__).resolve().parent / "build_ownership_map.py"
cmd = [
sys.executable,
str(script_path),
"--repo",
args.repo,
"--out",
args.out,
"--identity",
args.identity,
"--date-field",
args.date_field,
"--cochange-max-files",
str(args.cochange_max_files),
"--cochange-min-count",
str(args.cochange_min_count),
"--cochange-min-jaccard",
str(args.cochange_min_jaccard),
"--community-top-owners",
str(args.community_top_owners),
"--bus-factor-threshold",
str(args.bus_factor_threshold),
"--stale-days",
str(args.stale_days),
"--owner-threshold",
str(args.owner_threshold),
]
if args.since:
cmd.extend(["--since", args.since])
if args.until:
cmd.extend(["--until", args.until])
if args.include_merges:
cmd.append("--include-merges")
if args.emit_commits:
cmd.append("--emit-commits")
if args.graphml:
cmd.append("--graphml")
if args.sensitive_config:
cmd.extend(["--sensitive-config", args.sensitive_config])
if args.no_cochange:
cmd.append("--no-cochange")
if args.no_communities:
cmd.append("--no-communities")
if args.no_default_cochange_excludes:
cmd.append("--no-default-cochange-excludes")
for pattern in args.cochange_exclude:
cmd.extend(["--cochange-exclude", pattern])
if args.no_default_author_excludes:
cmd.append("--no-default-author-excludes")
for pattern in args.author_exclude_regex:
cmd.extend(["--author-exclude-regex", pattern])
result = subprocess.run(cmd, check=False)
return result.returncode
if __name__ == "__main__":
raise SystemExit(main())
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/security/security-ownership-map/scripts/run_ownership_map.py",
"license": "MIT License",
"lines": 188,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/video/sora/scripts/sora.py | #!/usr/bin/env python3
"""Create and manage Sora videos with the OpenAI Video API.
Defaults to sora-2 and a structured prompt augmentation workflow.
"""
from __future__ import annotations
import argparse
import asyncio
import json
import os
from pathlib import Path
import re
import sys
import time
from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
DEFAULT_MODEL = "sora-2"
DEFAULT_SIZE = "1280x720"
DEFAULT_SECONDS = "4"
DEFAULT_POLL_INTERVAL = 10.0
DEFAULT_VARIANT = "video"
DEFAULT_CONCURRENCY = 3
DEFAULT_MAX_ATTEMPTS = 3
ALLOWED_MODELS = {"sora-2", "sora-2-pro"}
ALLOWED_SIZES_SORA2 = {"1280x720", "720x1280"}
ALLOWED_SIZES_SORA2_PRO = {"1280x720", "720x1280", "1024x1792", "1792x1024"}
ALLOWED_SECONDS = {"4", "8", "12"}
ALLOWED_VARIANTS = {"video", "thumbnail", "spritesheet"}
ALLOWED_ORDERS = {"asc", "desc"}
ALLOWED_INPUT_EXTS = {".jpg", ".jpeg", ".png", ".webp"}
TERMINAL_STATUSES = {"completed", "failed", "canceled"}
VARIANT_EXTENSIONS = {"video": ".mp4", "thumbnail": ".webp", "spritesheet": ".jpg"}
MAX_BATCH_JOBS = 200
def _die(message: str, code: int = 1) -> None:
print(f"Error: {message}", file=sys.stderr)
raise SystemExit(code)
def _warn(message: str) -> None:
print(f"Warning: {message}", file=sys.stderr)
def _ensure_api_key(dry_run: bool) -> None:
if os.getenv("OPENAI_API_KEY"):
print("OPENAI_API_KEY is set.", file=sys.stderr)
return
if dry_run:
_warn("OPENAI_API_KEY is not set; dry-run only.")
return
_die("OPENAI_API_KEY is not set. Export it before running.")
def _read_prompt(prompt: Optional[str], prompt_file: Optional[str]) -> str:
if prompt and prompt_file:
_die("Use --prompt or --prompt-file, not both.")
if prompt_file:
path = Path(prompt_file)
if not path.exists():
_die(f"Prompt file not found: {path}")
return path.read_text(encoding="utf-8").strip()
if prompt:
return prompt.strip()
_die("Missing prompt. Use --prompt or --prompt-file.")
return "" # unreachable
def _normalize_model(model: Optional[str]) -> str:
value = (model or DEFAULT_MODEL).strip().lower()
if value not in ALLOWED_MODELS:
_die("model must be one of: sora-2, sora-2-pro")
return value
def _normalize_size(size: Optional[str], model: str) -> str:
value = (size or DEFAULT_SIZE).strip().lower()
allowed = ALLOWED_SIZES_SORA2 if model == "sora-2" else ALLOWED_SIZES_SORA2_PRO
if value not in allowed:
allowed_list = ", ".join(sorted(allowed))
_die(f"size must be one of: {allowed_list} for model {model}")
return value
def _normalize_seconds(seconds: Optional[Union[int, str]]) -> str:
if seconds is None:
value = DEFAULT_SECONDS
elif isinstance(seconds, int):
value = str(seconds)
else:
value = str(seconds).strip()
if value not in ALLOWED_SECONDS:
_die("seconds must be one of: 4, 8, 12")
return value
def _normalize_variant(variant: Optional[str]) -> str:
value = (variant or DEFAULT_VARIANT).strip().lower()
if value not in ALLOWED_VARIANTS:
_die("variant must be one of: video, thumbnail, spritesheet")
return value
def _normalize_order(order: Optional[str]) -> Optional[str]:
if order is None:
return None
value = order.strip().lower()
if value not in ALLOWED_ORDERS:
_die("order must be one of: asc, desc")
return value
def _normalize_poll_interval(interval: Optional[float]) -> float:
value = float(interval if interval is not None else DEFAULT_POLL_INTERVAL)
if value <= 0:
_die("poll-interval must be > 0")
return value
def _normalize_timeout(timeout: Optional[float]) -> Optional[float]:
if timeout is None:
return None
value = float(timeout)
if value <= 0:
_die("timeout must be > 0")
return value
def _default_out_path(variant: str) -> Path:
if variant == "video":
return Path("video.mp4")
if variant == "thumbnail":
return Path("thumbnail.webp")
return Path("spritesheet.jpg")
def _normalize_out_path(out: Optional[str], variant: str) -> Path:
expected_ext = VARIANT_EXTENSIONS[variant]
if not out:
return _default_out_path(variant)
path = Path(out)
if path.suffix == "":
return path.with_suffix(expected_ext)
if path.suffix.lower() != expected_ext:
_warn(f"Output extension {path.suffix} does not match {expected_ext} for {variant}.")
return path
def _normalize_json_out(out: Optional[str], default_name: str) -> Optional[Path]:
if not out:
return None
raw = str(out)
if raw.endswith("/") or raw.endswith(os.sep):
return Path(raw) / default_name
path = Path(out)
if path.exists() and path.is_dir():
return path / default_name
if path.suffix == "":
path = path.with_suffix(".json")
return path
def _open_input_reference(path: Optional[str]):
if not path:
return _NullContext()
p = Path(path)
if not p.exists():
_die(f"Input reference not found: {p}")
if p.suffix.lower() not in ALLOWED_INPUT_EXTS:
_warn("Input reference should be jpeg, png, or webp.")
return _SingleFile(p)
def _create_client():
try:
from openai import OpenAI
except ImportError:
_die("openai SDK not installed. Run with `uv run --with openai` or install with `uv pip install openai`.")
return OpenAI()
def _create_async_client():
try:
from openai import AsyncOpenAI
except ImportError:
try:
import openai as _openai # noqa: F401
except ImportError:
_die("openai SDK not installed. Run with `uv run --with openai` or install with `uv pip install openai`.")
_die(
"AsyncOpenAI not available in this openai SDK version. Upgrade with `uv pip install -U openai`."
)
return AsyncOpenAI()
def _to_dict(obj: Any) -> Any:
if isinstance(obj, dict):
return obj
if hasattr(obj, "model_dump"):
return obj.model_dump()
if hasattr(obj, "dict"):
return obj.dict()
if hasattr(obj, "__dict__"):
return obj.__dict__
return obj
def _print_json(obj: Any) -> None:
print(json.dumps(_to_dict(obj), indent=2, sort_keys=True))
def _print_request(payload: Dict[str, Any]) -> None:
print(json.dumps(payload, indent=2, sort_keys=True))
def _slugify(value: str) -> str:
value = value.strip().lower()
value = re.sub(r"[^a-z0-9]+", "-", value)
value = re.sub(r"-{2,}", "-", value).strip("-")
return value[:60] if value else "job"
def _normalize_job(job: Any, idx: int) -> Dict[str, Any]:
if isinstance(job, str):
prompt = job.strip()
if not prompt:
_die(f"Empty prompt at job {idx}")
return {"prompt": prompt}
if isinstance(job, dict):
if "prompt" not in job or not str(job["prompt"]).strip():
_die(f"Missing prompt for job {idx}")
return job
_die(f"Invalid job at index {idx}: expected string or object.")
return {} # unreachable
def _read_jobs_jsonl(path: str) -> List[Dict[str, Any]]:
p = Path(path)
if not p.exists():
_die(f"Input file not found: {p}")
jobs: List[Dict[str, Any]] = []
for line_no, raw in enumerate(p.read_text(encoding="utf-8").splitlines(), start=1):
line = raw.strip()
if not line or line.startswith("#"):
continue
try:
item: Any
if line.startswith("{"):
item = json.loads(line)
else:
item = line
jobs.append(_normalize_job(item, idx=line_no))
except json.JSONDecodeError as exc:
_die(f"Invalid JSON on line {line_no}: {exc}")
if not jobs:
_die("No jobs found in input file.")
if len(jobs) > MAX_BATCH_JOBS:
_die(f"Too many jobs ({len(jobs)}). Max is {MAX_BATCH_JOBS}.")
return jobs
def _merge_non_null(dst: Dict[str, Any], src: Dict[str, Any]) -> Dict[str, Any]:
merged = dict(dst)
for k, v in src.items():
if v is not None:
merged[k] = v
return merged
def _job_output_path(out_dir: Path, idx: int, prompt: str, explicit_out: Optional[str]) -> Path:
out_dir.mkdir(parents=True, exist_ok=True)
if explicit_out:
path = Path(explicit_out)
if path.suffix == "":
path = path.with_suffix(".json")
return out_dir / path.name
slug = _slugify(prompt[:80])
return out_dir / f"{idx:03d}-{slug}.json"
def _extract_retry_after_seconds(exc: Exception) -> Optional[float]:
for attr in ("retry_after", "retry_after_seconds"):
val = getattr(exc, attr, None)
if isinstance(val, (int, float)) and val >= 0:
return float(val)
msg = str(exc)
m = re.search(r"retry[- ]after[:= ]+([0-9]+(?:\\.[0-9]+)?)", msg, re.IGNORECASE)
if m:
try:
return float(m.group(1))
except Exception:
return None
return None
def _is_rate_limit_error(exc: Exception) -> bool:
name = exc.__class__.__name__.lower()
if "ratelimit" in name or "rate_limit" in name:
return True
msg = str(exc).lower()
return "429" in msg or "rate limit" in msg or "too many requests" in msg
def _is_transient_error(exc: Exception) -> bool:
if _is_rate_limit_error(exc):
return True
name = exc.__class__.__name__.lower()
if "timeout" in name or "timedout" in name or "tempor" in name:
return True
msg = str(exc).lower()
return "timeout" in msg or "timed out" in msg or "connection reset" in msg
def _fields_from_args(args: argparse.Namespace) -> Dict[str, Optional[str]]:
return {
"use_case": getattr(args, "use_case", None),
"scene": getattr(args, "scene", None),
"subject": getattr(args, "subject", None),
"action": getattr(args, "action", None),
"camera": getattr(args, "camera", None),
"style": getattr(args, "style", None),
"lighting": getattr(args, "lighting", None),
"palette": getattr(args, "palette", None),
"audio": getattr(args, "audio", None),
"dialogue": getattr(args, "dialogue", None),
"text": getattr(args, "text", None),
"timing": getattr(args, "timing", None),
"constraints": getattr(args, "constraints", None),
"negative": getattr(args, "negative", None),
}
def _augment_prompt_fields(augment: bool, prompt: str, fields: Dict[str, Optional[str]]) -> str:
if not augment:
return prompt
sections: List[str] = []
if fields.get("use_case"):
sections.append(f"Use case: {fields['use_case']}")
sections.append(f"Primary request: {prompt}")
if fields.get("scene"):
sections.append(f"Scene/background: {fields['scene']}")
if fields.get("subject"):
sections.append(f"Subject: {fields['subject']}")
if fields.get("action"):
sections.append(f"Action: {fields['action']}")
if fields.get("camera"):
sections.append(f"Camera: {fields['camera']}")
if fields.get("lighting"):
sections.append(f"Lighting/mood: {fields['lighting']}")
if fields.get("palette"):
sections.append(f"Color palette: {fields['palette']}")
if fields.get("style"):
sections.append(f"Style/format: {fields['style']}")
if fields.get("timing"):
sections.append(f"Timing/beats: {fields['timing']}")
if fields.get("audio"):
sections.append(f"Audio: {fields['audio']}")
if fields.get("text"):
sections.append(f"Text (verbatim): \"{fields['text']}\"")
if fields.get("dialogue"):
dialogue = fields["dialogue"].strip()
sections.append("Dialogue:\n<dialogue>\n" + dialogue + "\n</dialogue>")
if fields.get("constraints"):
sections.append(f"Constraints: {fields['constraints']}")
if fields.get("negative"):
sections.append(f"Avoid: {fields['negative']}")
return "\n".join(sections)
def _augment_prompt(args: argparse.Namespace, prompt: str) -> str:
fields = _fields_from_args(args)
return _augment_prompt_fields(args.augment, prompt, fields)
def _get_status(video: Any) -> Optional[str]:
if isinstance(video, dict):
for key in ("status", "state"):
if key in video and isinstance(video[key], str):
return video[key]
data = video.get("data") if isinstance(video.get("data"), dict) else None
if data:
for key in ("status", "state"):
if key in data and isinstance(data[key], str):
return data[key]
return None
for key in ("status", "state"):
val = getattr(video, key, None)
if isinstance(val, str):
return val
return None
def _get_video_id(video: Any) -> Optional[str]:
if isinstance(video, dict):
if isinstance(video.get("id"), str):
return video["id"]
data = video.get("data") if isinstance(video.get("data"), dict) else None
if data and isinstance(data.get("id"), str):
return data["id"]
return None
vid = getattr(video, "id", None)
return vid if isinstance(vid, str) else None
def _poll_video(
client: Any,
video_id: str,
*,
poll_interval: float,
timeout: Optional[float],
) -> Any:
start = time.time()
last_status: Optional[str] = None
while True:
video = client.videos.retrieve(video_id)
status = _get_status(video) or "unknown"
if status != last_status:
print(f"Status: {status}", file=sys.stderr)
last_status = status
if status in TERMINAL_STATUSES:
return video
if timeout is not None and (time.time() - start) > timeout:
_die(f"Timed out after {timeout:.1f}s waiting for {video_id}")
time.sleep(poll_interval)
def _download_content(client: Any, video_id: str, variant: str) -> Any:
content = client.videos.download_content(video_id, variant=variant)
if hasattr(content, "write_to_file"):
return content
if hasattr(content, "read"):
return content.read()
if isinstance(content, (bytes, bytearray)):
return bytes(content)
if hasattr(content, "content"):
return content.content
return content
def _write_download(data: Any, out_path: Path, *, force: bool) -> None:
if out_path.exists() and not force:
_die(f"Output exists: {out_path} (use --force to overwrite)")
if hasattr(data, "write_to_file"):
data.write_to_file(out_path)
print(f"Wrote {out_path}")
return
if hasattr(data, "read"):
out_path.write_bytes(data.read())
print(f"Wrote {out_path}")
return
out_path.write_bytes(data)
print(f"Wrote {out_path}")
def _build_create_payload(args: argparse.Namespace, prompt: str) -> Dict[str, Any]:
model = _normalize_model(args.model)
size = _normalize_size(args.size, model)
seconds = _normalize_seconds(args.seconds)
return {
"model": model,
"prompt": prompt,
"size": size,
"seconds": seconds,
}
def _prepare_job_payload(
args: argparse.Namespace,
job: Dict[str, Any],
base_fields: Dict[str, Optional[str]],
base_payload: Dict[str, Any],
) -> Tuple[Dict[str, Any], Optional[str], str]:
prompt = str(job["prompt"]).strip()
fields = _merge_non_null(base_fields, job.get("fields", {}))
fields = _merge_non_null(fields, {k: job.get(k) for k in base_fields.keys()})
augmented = _augment_prompt_fields(args.augment, prompt, fields)
payload = dict(base_payload)
payload["prompt"] = augmented
payload = _merge_non_null(payload, {k: job.get(k) for k in base_payload.keys()})
payload = {k: v for k, v in payload.items() if v is not None}
model = _normalize_model(payload.get("model"))
size = _normalize_size(payload.get("size"), model)
seconds = _normalize_seconds(payload.get("seconds"))
payload["model"] = model
payload["size"] = size
payload["seconds"] = seconds
input_ref = (
job.get("input_reference")
or job.get("input_reference_path")
or job.get("input_reference_file")
)
return payload, input_ref, prompt
def _write_json(path: Path, obj: Any) -> None:
path.parent.mkdir(parents=True, exist_ok=True)
path.write_text(json.dumps(_to_dict(obj), indent=2, sort_keys=True), encoding="utf-8")
print(f"Wrote {path}")
def _write_json_out(out_path: Optional[Path], obj: Any) -> None:
if out_path is None:
return
_write_json(out_path, obj)
async def _create_one_with_retries(
client: Any,
payload: Dict[str, Any],
*,
attempts: int,
job_label: str,
) -> Any:
last_exc: Optional[Exception] = None
for attempt in range(1, attempts + 1):
try:
return await client.videos.create(**payload)
except Exception as exc:
last_exc = exc
if not _is_transient_error(exc):
raise
if attempt == attempts:
raise
sleep_s = _extract_retry_after_seconds(exc)
if sleep_s is None:
sleep_s = min(60.0, 2.0**attempt)
print(
f"{job_label} attempt {attempt}/{attempts} failed ({exc.__class__.__name__}); retrying in {sleep_s:.1f}s",
file=sys.stderr,
)
await asyncio.sleep(sleep_s)
raise last_exc or RuntimeError("unknown error")
async def _run_create_batch(args: argparse.Namespace) -> int:
jobs = _read_jobs_jsonl(args.input)
out_dir = Path(args.out_dir)
base_fields = _fields_from_args(args)
base_payload = {
"model": args.model,
"size": args.size,
"seconds": args.seconds,
}
if args.dry_run:
for i, job in enumerate(jobs, start=1):
payload, input_ref, prompt = _prepare_job_payload(args, job, base_fields, base_payload)
out_path = _job_output_path(out_dir, i, prompt, job.get("out"))
preview = dict(payload)
if input_ref:
preview["input_reference"] = input_ref
_print_request(
{
"endpoint": "/v1/videos",
"job": i,
"output": str(out_path),
**preview,
}
)
return 0
client = _create_async_client()
sem = asyncio.Semaphore(args.concurrency)
any_failed = False
async def run_job(i: int, job: Dict[str, Any]) -> Tuple[int, Optional[str]]:
nonlocal any_failed
payload, input_ref, prompt = _prepare_job_payload(args, job, base_fields, base_payload)
job_label = f"[job {i}/{len(jobs)}]"
out_path = _job_output_path(out_dir, i, prompt, job.get("out"))
try:
async with sem:
print(f"{job_label} starting", file=sys.stderr)
started = time.time()
with _open_input_reference(input_ref) as ref:
request = dict(payload)
if ref is not None:
request["input_reference"] = ref
result = await _create_one_with_retries(
client,
request,
attempts=args.max_attempts,
job_label=job_label,
)
elapsed = time.time() - started
print(f"{job_label} completed in {elapsed:.1f}s", file=sys.stderr)
_write_json(out_path, result)
return i, None
except Exception as exc:
any_failed = True
print(f"{job_label} failed: {exc}", file=sys.stderr)
if args.fail_fast:
raise
return i, str(exc)
tasks = [asyncio.create_task(run_job(i, job)) for i, job in enumerate(jobs, start=1)]
try:
await asyncio.gather(*tasks)
except Exception:
for t in tasks:
if not t.done():
t.cancel()
raise
return 1 if any_failed else 0
def _create_batch(args: argparse.Namespace) -> None:
exit_code = asyncio.run(_run_create_batch(args))
if exit_code:
raise SystemExit(exit_code)
def _cmd_create(args: argparse.Namespace) -> int:
prompt = _read_prompt(args.prompt, args.prompt_file)
prompt = _augment_prompt(args, prompt)
payload = _build_create_payload(args, prompt)
json_out = _normalize_json_out(args.json_out, "create.json")
if args.dry_run:
preview = dict(payload)
if args.input_reference:
preview["input_reference"] = args.input_reference
_print_request({"endpoint": "/v1/videos", **preview})
_write_json_out(json_out, {"dry_run": True, "request": {"endpoint": "/v1/videos", **preview}})
return 0
client = _create_client()
with _open_input_reference(args.input_reference) as input_ref:
if input_ref is not None:
payload["input_reference"] = input_ref
video = client.videos.create(**payload)
_print_json(video)
_write_json_out(json_out, video)
return 0
def _cmd_create_and_poll(args: argparse.Namespace) -> int:
prompt = _read_prompt(args.prompt, args.prompt_file)
prompt = _augment_prompt(args, prompt)
payload = _build_create_payload(args, prompt)
json_out = _normalize_json_out(args.json_out, "create-and-poll.json")
if args.dry_run:
preview = dict(payload)
if args.input_reference:
preview["input_reference"] = args.input_reference
_print_request({"endpoint": "/v1/videos", **preview})
print("Would poll for completion.")
if args.download:
variant = _normalize_variant(args.variant)
out_path = _normalize_out_path(args.out, variant)
print(f"Would download variant={variant} to {out_path}")
if json_out:
dry_bundle: Dict[str, Any] = {
"dry_run": True,
"request": {"endpoint": "/v1/videos", **preview},
"poll": True,
}
if args.download:
dry_bundle["download"] = {
"variant": variant,
"out": str(out_path),
}
_write_json_out(json_out, dry_bundle)
return 0
client = _create_client()
with _open_input_reference(args.input_reference) as input_ref:
if input_ref is not None:
payload["input_reference"] = input_ref
video = client.videos.create(**payload)
_print_json(video)
video_id = _get_video_id(video)
if not video_id:
_die("Could not determine video id from create response.")
poll_interval = _normalize_poll_interval(args.poll_interval)
timeout = _normalize_timeout(args.timeout)
final_video = _poll_video(
client,
video_id,
poll_interval=poll_interval,
timeout=timeout,
)
_print_json(final_video)
if args.download:
status = _get_status(final_video) or "unknown"
if status != "completed":
_die(f"Video status is {status}; download is available only after completion.")
variant = _normalize_variant(args.variant)
out_path = _normalize_out_path(args.out, variant)
data = _download_content(client, video_id, variant)
_write_download(data, out_path, force=args.force)
if json_out:
_write_json_out(
json_out,
{"create": _to_dict(video), "final": _to_dict(final_video)},
)
return 0
def _cmd_poll(args: argparse.Namespace) -> int:
poll_interval = _normalize_poll_interval(args.poll_interval)
timeout = _normalize_timeout(args.timeout)
json_out = _normalize_json_out(args.json_out, "poll.json")
client = _create_client()
final_video = _poll_video(
client,
args.id,
poll_interval=poll_interval,
timeout=timeout,
)
_print_json(final_video)
_write_json_out(json_out, final_video)
if args.download:
status = _get_status(final_video) or "unknown"
if status != "completed":
_die(f"Video status is {status}; download is available only after completion.")
variant = _normalize_variant(args.variant)
out_path = _normalize_out_path(args.out, variant)
data = _download_content(client, args.id, variant)
_write_download(data, out_path, force=args.force)
return 0
def _cmd_status(args: argparse.Namespace) -> int:
json_out = _normalize_json_out(args.json_out, "status.json")
client = _create_client()
video = client.videos.retrieve(args.id)
_print_json(video)
_write_json_out(json_out, video)
return 0
def _cmd_list(args: argparse.Namespace) -> int:
params: Dict[str, Any] = {
"limit": args.limit,
"order": _normalize_order(args.order),
"after": args.after,
"before": args.before,
}
params = {k: v for k, v in params.items() if v is not None}
json_out = _normalize_json_out(args.json_out, "list.json")
client = _create_client()
videos = client.videos.list(**params)
_print_json(videos)
_write_json_out(json_out, videos)
return 0
def _cmd_delete(args: argparse.Namespace) -> int:
json_out = _normalize_json_out(args.json_out, "delete.json")
client = _create_client()
result = client.videos.delete(args.id)
_print_json(result)
_write_json_out(json_out, result)
return 0
def _cmd_remix(args: argparse.Namespace) -> int:
prompt = _read_prompt(args.prompt, args.prompt_file)
prompt = _augment_prompt(args, prompt)
json_out = _normalize_json_out(args.json_out, "remix.json")
if args.dry_run:
preview = {"endpoint": f"/v1/videos/{args.id}/remix", "prompt": prompt}
_print_request(preview)
_write_json_out(json_out, {"dry_run": True, "request": preview})
return 0
client = _create_client()
result = client.videos.remix(video_id=args.id, prompt=prompt)
_print_json(result)
_write_json_out(json_out, result)
return 0
def _cmd_download(args: argparse.Namespace) -> int:
variant = _normalize_variant(args.variant)
out_path = _normalize_out_path(args.out, variant)
client = _create_client()
data = _download_content(client, args.id, variant)
_write_download(data, out_path, force=args.force)
return 0
class _NullContext:
def __enter__(self):
return None
def __exit__(self, exc_type, exc, tb):
return False
class _SingleFile:
def __init__(self, path: Path):
self._path = path
self._handle = None
def __enter__(self):
self._handle = self._path.open("rb")
return self._handle
def __exit__(self, exc_type, exc, tb):
if self._handle:
try:
self._handle.close()
except Exception:
pass
return False
def _add_prompt_args(parser: argparse.ArgumentParser) -> None:
parser.add_argument("--prompt")
parser.add_argument("--prompt-file")
parser.add_argument("--augment", dest="augment", action="store_true")
parser.add_argument("--no-augment", dest="augment", action="store_false")
parser.set_defaults(augment=True)
parser.add_argument("--use-case")
parser.add_argument("--scene")
parser.add_argument("--subject")
parser.add_argument("--action")
parser.add_argument("--camera")
parser.add_argument("--style")
parser.add_argument("--lighting")
parser.add_argument("--palette")
parser.add_argument("--audio")
parser.add_argument("--dialogue")
parser.add_argument("--text")
parser.add_argument("--timing")
parser.add_argument("--constraints")
parser.add_argument("--negative")
def _add_create_args(parser: argparse.ArgumentParser) -> None:
parser.add_argument("--model", default=DEFAULT_MODEL)
parser.add_argument("--size", default=DEFAULT_SIZE)
parser.add_argument("--seconds", default=DEFAULT_SECONDS)
parser.add_argument("--input-reference")
parser.add_argument("--dry-run", action="store_true")
_add_prompt_args(parser)
def _add_poll_args(parser: argparse.ArgumentParser) -> None:
parser.add_argument("--poll-interval", type=float, default=DEFAULT_POLL_INTERVAL)
parser.add_argument("--timeout", type=float)
def _add_download_args(parser: argparse.ArgumentParser) -> None:
parser.add_argument("--download", action="store_true")
parser.add_argument("--variant", default=DEFAULT_VARIANT)
parser.add_argument("--out")
parser.add_argument("--force", action="store_true")
def _add_json_out(parser: argparse.ArgumentParser) -> None:
parser.add_argument("--json-out")
def main() -> int:
parser = argparse.ArgumentParser(description="Create and manage videos via the Sora Video API")
subparsers = parser.add_subparsers(dest="command", required=True)
create_parser = subparsers.add_parser("create", help="Create a new video job")
_add_create_args(create_parser)
_add_json_out(create_parser)
create_parser.set_defaults(func=_cmd_create)
create_poll_parser = subparsers.add_parser(
"create-and-poll",
help="Create a job, poll until complete, optionally download",
)
_add_create_args(create_poll_parser)
_add_poll_args(create_poll_parser)
_add_download_args(create_poll_parser)
_add_json_out(create_poll_parser)
create_poll_parser.set_defaults(func=_cmd_create_and_poll)
poll_parser = subparsers.add_parser("poll", help="Poll a job until it completes")
poll_parser.add_argument("--id", required=True)
_add_poll_args(poll_parser)
_add_download_args(poll_parser)
_add_json_out(poll_parser)
poll_parser.set_defaults(func=_cmd_poll)
status_parser = subparsers.add_parser("status", help="Retrieve a job status")
status_parser.add_argument("--id", required=True)
_add_json_out(status_parser)
status_parser.set_defaults(func=_cmd_status)
list_parser = subparsers.add_parser("list", help="List recent video jobs")
list_parser.add_argument("--limit", type=int)
list_parser.add_argument("--order")
list_parser.add_argument("--after")
list_parser.add_argument("--before")
_add_json_out(list_parser)
list_parser.set_defaults(func=_cmd_list)
delete_parser = subparsers.add_parser("delete", help="Delete a video job")
delete_parser.add_argument("--id", required=True)
_add_json_out(delete_parser)
delete_parser.set_defaults(func=_cmd_delete)
remix_parser = subparsers.add_parser("remix", help="Remix a completed video job")
remix_parser.add_argument("--id", required=True)
remix_parser.add_argument("--dry-run", action="store_true")
_add_prompt_args(remix_parser)
_add_json_out(remix_parser)
remix_parser.set_defaults(func=_cmd_remix)
download_parser = subparsers.add_parser("download", help="Download video/thumbnail/spritesheet")
download_parser.add_argument("--id", required=True)
download_parser.add_argument("--variant", default=DEFAULT_VARIANT)
download_parser.add_argument("--out")
download_parser.add_argument("--force", action="store_true")
download_parser.set_defaults(func=_cmd_download)
batch_parser = subparsers.add_parser("create-batch", help="Create multiple video jobs (JSONL input)")
_add_create_args(batch_parser)
batch_parser.add_argument("--input", required=True, help="Path to JSONL file (one job per line)")
batch_parser.add_argument("--out-dir", required=True)
batch_parser.add_argument("--concurrency", type=int, default=DEFAULT_CONCURRENCY)
batch_parser.add_argument("--max-attempts", type=int, default=DEFAULT_MAX_ATTEMPTS)
batch_parser.add_argument("--fail-fast", action="store_true")
batch_parser.set_defaults(func=_create_batch)
args = parser.parse_args()
if getattr(args, "concurrency", 1) < 1 or getattr(args, "concurrency", 1) > 10:
_die("--concurrency must be between 1 and 10")
if getattr(args, "max_attempts", DEFAULT_MAX_ATTEMPTS) < 1 or getattr(args, "max_attempts", DEFAULT_MAX_ATTEMPTS) > 10:
_die("--max-attempts must be between 1 and 10")
dry_run = bool(getattr(args, "dry_run", False))
_ensure_api_key(dry_run)
args.func(args)
return 0
if __name__ == "__main__":
raise SystemExit(main())
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/video/sora/scripts/sora.py",
"license": "MIT License",
"lines": 793,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:.claude/hooks/telegram-pr-webhook.py | #!/usr/bin/env python3
"""
Telegram PR Webhook Hook
Sends a Telegram notification when a new PR is created via `gh pr create`.
Includes the PR URL and the Vercel preview URL.
Required environment variables:
TELEGRAM_BOT_TOKEN - Bot token from @BotFather
TELEGRAM_CHAT_ID - Chat ID for notifications
Optional environment variables:
VERCEL_PROJECT_NAME - Vercel project name (for preview URL)
VERCEL_TEAM_SLUG - Vercel team slug (for preview URL)
"""
import json
import os
import re
import subprocess
import sys
import urllib.request
import urllib.parse
def get_input():
try:
return json.load(sys.stdin)
except (json.JSONDecodeError, ValueError):
return {}
def extract_pr_url(text):
"""Extract GitHub PR URL from command output."""
match = re.search(r"https://github\.com/[^\s]+/pull/\d+", text)
return match.group(0) if match else None
def get_branch_name():
"""Get the current git branch name."""
try:
return subprocess.check_output(
["git", "branch", "--show-current"],
stderr=subprocess.DEVNULL,
text=True,
).strip()
except Exception:
return None
def build_vercel_preview_url(branch):
"""Build Vercel preview URL from branch name and env vars."""
project = os.environ.get("VERCEL_PROJECT_NAME", "")
team = os.environ.get("VERCEL_TEAM_SLUG", "")
if not project or not team:
return None
# Vercel slugifies branch names: lowercase, replace non-alphanumeric with -
slug = re.sub(r"[^a-z0-9]+", "-", branch.lower()).strip("-")
return f"https://{project}-git-{slug}-{team}.vercel.app"
def send_telegram(message):
"""Send a message via Telegram Bot API."""
token = os.environ.get("TELEGRAM_BOT_TOKEN", "")
chat_id = os.environ.get("TELEGRAM_CHAT_ID", "")
if not token or not chat_id:
print(
"Telegram notification skipped: "
"Set TELEGRAM_BOT_TOKEN and TELEGRAM_CHAT_ID",
file=sys.stderr,
)
return False
url = f"https://api.telegram.org/bot{token}/sendMessage"
data = urllib.parse.urlencode(
{"chat_id": chat_id, "text": message, "parse_mode": "HTML"}
).encode("utf-8")
try:
req = urllib.request.Request(url, data=data, method="POST")
urllib.request.urlopen(req, timeout=10)
return True
except Exception as e:
print(f"Failed to send Telegram notification: {e}", file=sys.stderr)
return False
def main():
input_data = get_input()
tool_name = input_data.get("tool_name", "")
tool_input = input_data.get("tool_input", {})
command = tool_input.get("command", "")
# Only act on gh pr create commands
if tool_name != "Bash" or "gh pr create" not in command:
sys.exit(0)
# Extract PR URL from tool response
tool_response = input_data.get("tool_response", "")
if isinstance(tool_response, dict):
tool_response = tool_response.get("stdout", "") or json.dumps(tool_response)
pr_url = extract_pr_url(str(tool_response))
if not pr_url:
# No PR URL found — the command may have failed
sys.exit(0)
# Build Vercel preview URL
branch = get_branch_name()
vercel_url = build_vercel_preview_url(branch) if branch else None
# Compose Telegram message
lines = [
"<b>New Pull Request Created</b>",
"",
f"<b>PR:</b> <a href=\"{pr_url}\">{pr_url}</a>",
]
if vercel_url:
lines.append(f"<b>Preview:</b> <a href=\"{vercel_url}\">{vercel_url}</a>")
else:
lines.append(
"<b>Preview:</b> Check the PR checks tab for the Vercel deployment URL"
)
if branch:
lines.append(f"<b>Branch:</b> <code>{branch}</code>")
message = "\n".join(lines)
send_telegram(message)
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": ".claude/hooks/telegram-pr-webhook.py",
"license": "MIT License",
"lines": 106,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:scripts/generate_claude_prs.py | #!/usr/bin/env python3
"""
Generate Claude Code PRs data.
Fetches all PRs from the GitHub API that were created from branches
starting with 'claude/' and writes a static JSON file for the dashboard.
Usage:
python scripts/generate_claude_prs.py
Optionally set GITHUB_TOKEN env var to avoid rate limits.
"""
import json
import os
import requests
import time
from datetime import datetime, timezone
REPO = "davila7/claude-code-templates"
API_URL = f"https://api.github.com/repos/{REPO}/pulls"
OUTPUT_FILE = os.path.join(os.path.dirname(__file__), "..", "docs", "claude-prs", "data.json")
def fetch_prs(state="all", token=None):
"""Fetch all PRs with pagination."""
headers = {"Accept": "application/vnd.github+json"}
if token:
headers["Authorization"] = f"Bearer {token}"
all_prs = []
page = 1
while True:
url = f"{API_URL}?state={state}&per_page=100&page={page}&sort=created&direction=desc"
print(f" Fetching page {page}...")
for attempt in range(3):
try:
resp = requests.get(url, headers=headers, timeout=30)
break
except requests.exceptions.RequestException as e:
if attempt < 2:
wait = 2 ** (attempt + 1)
print(f" Retry in {wait}s: {e}")
time.sleep(wait)
else:
raise
if resp.status_code == 403:
print(f" Rate limited. Set GITHUB_TOKEN env var to increase limits.")
break
if resp.status_code != 200:
print(f" Error {resp.status_code}: {resp.text[:200]}")
break
data = resp.json()
if not data:
break
all_prs.extend(data)
page += 1
if len(data) < 100:
break
return all_prs
def is_claude_pr(pr):
"""Check if a PR was created by Claude Code (branch starts with claude/)."""
ref = pr.get("head", {}).get("ref", "")
return ref.startswith("claude/")
def extract_pr_data(pr):
"""Extract only the fields needed for the dashboard."""
return {
"number": pr["number"],
"title": pr["title"],
"state": pr["state"],
"merged": pr.get("merged_at") is not None,
"branch": pr["head"]["ref"],
"created_at": pr["created_at"],
"merged_at": pr.get("merged_at"),
"closed_at": pr.get("closed_at"),
"url": pr["html_url"],
"user": pr.get("user", {}).get("login", "unknown"),
}
def main():
print("Generating Claude Code PRs data...")
token = os.environ.get("GITHUB_TOKEN")
if token:
print(" Using GITHUB_TOKEN for authentication")
else:
print(" No GITHUB_TOKEN set (60 req/hr limit). Set it to avoid rate limits.")
all_prs = fetch_prs(token=token)
print(f" Total PRs fetched: {len(all_prs)}")
claude_prs = [extract_pr_data(pr) for pr in all_prs if is_claude_pr(pr)]
print(f" Claude Code PRs found: {len(claude_prs)}")
# Sort by creation date descending
claude_prs.sort(key=lambda p: p["created_at"], reverse=True)
output = {
"generated_at": datetime.now(timezone.utc).isoformat(),
"repo": REPO,
"total": len(claude_prs),
"prs": claude_prs,
}
os.makedirs(os.path.dirname(OUTPUT_FILE), exist_ok=True)
with open(OUTPUT_FILE, "w") as f:
json.dump(output, f, indent=2)
print(f" Written to {OUTPUT_FILE}")
print("Done.")
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "scripts/generate_claude_prs.py",
"license": "MIT License",
"lines": 96,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/0.py | from typing import List
def has_close_elements(numbers: List[float], threshold: float) -> bool:
""" Check if in given list of numbers, are any two numbers closer to each other than
given threshold.
>>> has_close_elements([1.0, 2.0, 3.0], 0.5)
False
>>> has_close_elements([1.0, 2.8, 3.0, 4.0, 5.0, 2.0], 0.3)
True
"""
for i in range(len(numbers)):
for j in range(i + 1, len(numbers)):
if abs(numbers[i] - numbers[j]) < threshold:
return True
return False | {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/0.py",
"license": "MIT License",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
davila7/claude-code-templates:cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/1.py | from typing import List
def separate_paren_groups(paren_string: str) -> List[str]:
""" Input to this function is a string containing multiple groups of nested parentheses. Your goal is to
separate those group into separate strings and return the list of those.
Separate groups are balanced (each open brace is properly closed) and not nested within each other
Ignore any spaces in the input string.
>>> separate_paren_groups('( ) (( )) (( )( ))')
['()', '(())', '(()())']
"""
paren_string = paren_string.replace(' ', '')
result = []
current_group = ''
depth = 0
for char in paren_string:
if char == '(':
depth += 1
current_group += char
elif char == ')':
depth -= 1
current_group += char
if depth == 0:
result.append(current_group)
current_group = ''
return result | {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/1.py",
"license": "MIT License",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
davila7/claude-code-templates:cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/10.py | def is_palindrome(string: str) -> bool:
""" Test if given string is a palindrome """
return string == string[::-1]
def make_palindrome(string: str) -> str:
""" Find the shortest palindrome that begins with a supplied string.
Algorithm idea is simple:
- Find the longest postfix of supplied string that is a palindrome.
- Append to the end of the string reverse of a string prefix that comes before the palindromic suffix.
>>> make_palindrome('')
''
>>> make_palindrome('cat')
'catac'
>>> make_palindrome('cata')
'catac'
"""
if not string:
return ''
for i in range(len(string)):
if is_palindrome(string[i:]):
return string + string[:i][::-1]
return string | {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/10.py",
"license": "MIT License",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
davila7/claude-code-templates:cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/100.py | def make_a_pile(n):
"""
Given a positive integer n, you have to make a pile of n levels of stones.
The first level has n stones.
The number of stones in the next level is:
- the next odd number if n is odd.
- the next even number if n is even.
Return the number of stones in each level in a list, where element at index
i represents the number of stones in the level (i+1).
Examples:
>>> make_a_pile(3)
[3, 5, 7]
"""
result = []
current = n
for _ in range(n):
result.append(current)
current += 2
return result | {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/100.py",
"license": "MIT License",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
davila7/claude-code-templates:cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/101.py | def words_string(s):
"""
You will be given a string of words separated by commas or spaces. Your task is
to split the string into words and return an array of the words.
For example:
words_string("Hi, my name is John") == ["Hi", "my", "name", "is", "John"]
words_string("One, two, three, four, five, six") == ["One", "two", "three", "four", "five", "six"]
"""
if not s:
return []
# Replace commas with spaces, then split on whitespace
s = s.replace(',', ' ')
return s.split() | {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/101.py",
"license": "MIT License",
"lines": 13,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
davila7/claude-code-templates:cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/102.py | def choose_num(x, y):
"""This function takes two positive numbers x and y and returns the
biggest even integer number that is in the range [x, y] inclusive. If
there's no such number, then the function should return -1.
For example:
choose_num(12, 15) = 14
choose_num(13, 12) = -1
"""
if x > y:
return -1
if y % 2 == 0:
return y
if y - 1 >= x:
return y - 1
return -1 | {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/102.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
davila7/claude-code-templates:cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/103.py | def rounded_avg(n, m):
"""You are given two positive integers n and m, and your task is to compute the
average of the integers from n through m (including n and m).
Round the answer to the nearest integer and convert that to binary.
If n is greater than m, return -1.
Example:
rounded_avg(1, 5) => "0b11"
rounded_avg(7, 5) => -1
rounded_avg(10, 20) => "0b1111"
rounded_avg(20, 33) => "0b11010"
"""
if n > m:
return -1
avg = round((n + m) / 2)
return bin(avg) | {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/103.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
davila7/claude-code-templates:cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/104.py | def unique_digits(x):
"""Given a list of positive integers x. return a sorted list of all
elements that hasn't any even digit.
Note: Returned list should be sorted in increasing order.
For example:
>>> unique_digits([15, 33, 1422, 1])
[1, 15, 33]
>>> unique_digits([152, 323, 1422, 10])
[]
"""
def has_even_digit(n):
while n > 0:
digit = n % 10
if digit % 2 == 0:
return True
n //= 10
return False
result = [num for num in x if not has_even_digit(num)]
return sorted(result) | {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/104.py",
"license": "MIT License",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
davila7/claude-code-templates:cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/105.py | def by_length(arr):
"""
Given an array of integers, sort the integers that are between 1 and 9 inclusive,
reverse the resulting array, and then replace each digit by its corresponding name from
"One", "Two", "Three", "Four", "Five", "Six", "Seven", "Eight", "Nine".
For example:
arr = [2, 1, 1, 4, 5, 8, 2, 3]
-> sort arr -> [1, 1, 2, 2, 3, 4, 5, 8]
-> reverse arr -> [8, 5, 4, 3, 2, 2, 1, 1]
return ["Eight", "Five", "Four", "Three", "Two", "Two", "One", "One"]
If the array is empty, return an empty array:
arr = []
return []
If the array has any strange number ignore it:
arr = [1, -1 , 55]
-> sort arr -> [-1, 1, 55]
-> reverse arr -> [55, 1, -1]
return = ['One']
"""
names = {
1: "One",
2: "Two",
3: "Three",
4: "Four",
5: "Five",
6: "Six",
7: "Seven",
8: "Eight",
9: "Nine"
}
filtered = [x for x in arr if 1 <= x <= 9]
filtered.sort()
filtered.reverse()
return [names[x] for x in filtered] | {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/105.py",
"license": "MIT License",
"lines": 34,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
davila7/claude-code-templates:cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/106.py | def f(n):
""" Implement the function f that takes n as a parameter,
and returns a list of size n, such that the value of the element at index i is the factorial of i if i is even
or the sum of numbers from 1 to i otherwise.
i starts from 1.
the factorial of i is the multiplication of the numbers from 1 to i (1 * 2 * ... * i).
Example:
f(5) == [1, 2, 6, 24, 15]
"""
result = []
for i in range(1, n + 1):
if i % 2 == 0:
# factorial of i
factorial = 1
for j in range(1, i + 1):
factorial *= j
result.append(factorial)
else:
# sum of numbers from 1 to i
result.append(sum(range(1, i + 1)))
return result | {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/106.py",
"license": "MIT License",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
davila7/claude-code-templates:cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/107.py | def even_odd_palindrome(n):
"""
Given a positive integer n, return a tuple that has the number of even and odd
integer palindromes that fall within the range(1, n), inclusive.
Example 1:
Input: 3
Output: (1, 2)
Explanation:
Integer palindrome are 1, 2, 3. one of them is even, and two of them are odd.
Example 2:
Input: 12
Output: (4, 6)
Explanation:
Integer palindrome are 1, 2, 3, 4, 5, 6, 7, 8, 9, 11. four of them are even, and 6 of them are odd.
Note:
1. 1 <= n <= 10^3
2. returned tuple has the number of even and odd integer palindromes respectively.
"""
even_count = 0
odd_count = 0
for i in range(1, n + 1):
s = str(i)
if s == s[::-1]:
if i % 2 == 0:
even_count += 1
else:
odd_count += 1
return (even_count, odd_count) | {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/107.py",
"license": "MIT License",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
davila7/claude-code-templates:cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/108.py | def count_nums(arr):
"""
Write a function count_nums which takes an array of integers and returns
the number of elements which has a sum of digits > 0.
If a number is negative, then its first signed digit will be negative:
e.g. -123 has signed digits -1, 2, and 3.
>>> count_nums([]) == 0
>>> count_nums([-1, 11, -11]) == 1
>>> count_nums([1, 1, 2]) == 3
"""
count = 0
for num in arr:
if num == 0:
continue
elif num > 0:
digit_sum = sum(int(d) for d in str(num))
else:
s = str(abs(num))
digit_sum = -int(s[0]) + sum(int(d) for d in s[1:])
if digit_sum > 0:
count += 1
return count | {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/108.py",
"license": "MIT License",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
davila7/claude-code-templates:cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/109.py | def move_one_ball(arr):
"""We have an array 'arr' of N integers arr[1], arr[2], ..., arr[N].The
numbers in the array will be randomly ordered. Your task is to determine if
it is possible to get an array sorted in non-decreasing order by performing
the following operation on the given array:
You are allowed to perform right shift operation any number of times.
One right shift operation means shifting all elements of the array by one
position in the right direction. The last element of the array will be moved to
the starting position in the array i.e. 0th index.
If it is possible to obtain the sorted array by performing the above operation
then return True else return False.
If the given array is empty then return True.
Note: The given list is guaranteed to have unique elements.
For Example:
move_one_ball([3, 4, 5, 1, 2])==>True
Explanation: By performin 2 right shift operations, non-decreasing order can
be achieved for the given array.
move_one_ball([3, 5, 4, 1, 2])==>False
Explanation:It is not possible to get non-decreasing order for the given
array by performing any number of right shift operations.
"""
if len(arr) == 0:
return True
# Count the number of "breaks" where arr[i] > arr[i+1]
# For a rotated sorted array, there should be at most one such break
breaks = 0
n = len(arr)
for i in range(n):
if arr[i] > arr[(i + 1) % n]:
breaks += 1
# If there's at most one break, the array can be sorted by rotation
return breaks <= 1 | {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/109.py",
"license": "MIT License",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
davila7/claude-code-templates:cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/11.py | from typing import List
def string_xor(a: str, b: str) -> str:
""" Input are two strings a and b consisting only of 1s and 0s.
Perform binary XOR on these inputs and return result also as a string.
>>> string_xor('010', '110')
'100'
"""
result = []
for char_a, char_b in zip(a, b):
if char_a == char_b:
result.append('0')
else:
result.append('1')
return ''.join(result) | {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/11.py",
"license": "MIT License",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
davila7/claude-code-templates:cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/110.py | def exchange(lst1, lst2):
"""In this problem, you will implement a function that takes two lists of numbers,
and determines whether it is possible to perform an exchange of elements
between them to make lst1 a list of only even numbers.
There is no limit on the number of exchanged elements between lst1 and lst2.
If it is possible to exchange elements between the lst1 and lst2 to make
all the elements of lst1 to be even, return "YES".
Otherwise, return "NO".
For example:
exchange([1, 2, 3, 4], [1, 2, 3, 4]) => "YES"
exchange([1, 2, 3, 4], [1, 5, 3, 4]) => "NO"
It is assumed that the input lists will be non-empty.
"""
odd_count_lst1 = sum(1 for x in lst1 if x % 2 != 0)
even_count_lst2 = sum(1 for x in lst2 if x % 2 == 0)
if even_count_lst2 >= odd_count_lst1:
return "YES"
else:
return "NO" | {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/110.py",
"license": "MIT License",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
davila7/claude-code-templates:cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/111.py | def histogram(test):
"""Given a string representing a space separated lowercase letters, return a dictionary
of the letter with the most repetition and containing the corresponding count.
If several letters have the same occurrence, return all of them.
Example:
histogram('a b c') == {'a': 1, 'b': 1, 'c': 1}
histogram('a b b a') == {'a': 2, 'b': 2}
histogram('a b c a b') == {'a': 2, 'b': 2}
histogram('b b b b a') == {'b': 4}
histogram('') == {}
"""
if not test or test.strip() == '':
return {}
letters = test.split()
counts = {}
for letter in letters:
counts[letter] = counts.get(letter, 0) + 1
if not counts:
return {}
max_count = max(counts.values())
return {letter: count for letter, count in counts.items() if count == max_count} | {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/111.py",
"license": "MIT License",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
davila7/claude-code-templates:cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/112.py | def reverse_delete(s,c):
"""Task
We are given two strings s and c, you have to deleted all the characters in s that are equal to any character in c
then check if the result string is palindrome.
A string is called palindrome if it reads the same backward as forward.
You should return a tuple containing the result string and True/False for the check.
Example
For s = "abcde", c = "ae", the result should be ('bcd',False)
For s = "abcdef", c = "b" the result should be ('acdef',False)
For s = "abcdedcba", c = "ab", the result should be ('cdedc',True)
"""
result = ''.join(char for char in s if char not in c)
is_palindrome = result == result[::-1]
return (result, is_palindrome) | {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/112.py",
"license": "MIT License",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
davila7/claude-code-templates:cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/113.py | def odd_count(lst):
"""Given a list of strings, where each string consists of only digits, return a list.
Each element i of the output should be "the number of odd elements in the
string i of the input." where all the i's should be replaced by the number
of odd digits in the i'th string of the input.
>>> odd_count(['1234567'])
["the number of odd elements 4n the str4ng 4 of the 4nput."]
>>> odd_count(['3',"11111111"])
["the number of odd elements 1n the str1ng 1 of the 1nput.",
"the number of odd elements 8n the str8ng 8 of the 8nput."]
"""
result = []
for s in lst:
count = sum(1 for c in s if c in '13579')
template = "the number of odd elements in the string i of the input."
replaced = template.replace('i', str(count))
result.append(replaced)
return result | {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/113.py",
"license": "MIT License",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
davila7/claude-code-templates:cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/114.py | def minSubArraySum(nums):
"""
Given an array of integers nums, find the minimum sum of any non-empty sub-array
of nums.
Example
minSubArraySum([2, 3, 4, 1, 2, 4]) == 1
minSubArraySum([-1, -2, -3]) == -6
"""
min_sum = nums[0]
current_sum = nums[0]
for i in range(1, len(nums)):
current_sum = min(nums[i], current_sum + nums[i])
min_sum = min(min_sum, current_sum)
return min_sum | {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/114.py",
"license": "MIT License",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
davila7/claude-code-templates:cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/115.py | def max_fill(grid, capacity):
import math
"""
You are given a rectangular grid of wells. Each row represents a single well,
and each 1 in a row represents a single unit of water.
Each well has a corresponding bucket that can be used to extract water from it,
and all buckets have the same capacity.
Your task is to use the buckets to empty the wells.
Output the number of times you need to lower the buckets.
Example 1:
Input:
grid : [[0,0,1,0], [0,1,0,0], [1,1,1,1]]
bucket_capacity : 1
Output: 6
Example 2:
Input:
grid : [[0,0,1,1], [0,0,0,0], [1,1,1,1], [0,1,1,1]]
bucket_capacity : 2
Output: 5
Example 3:
Input:
grid : [[0,0,0], [0,0,0]]
bucket_capacity : 5
Output: 0
Constraints:
* all wells have the same length
* 1 <= grid.length <= 10^2
* 1 <= grid[:,1].length <= 10^2
* grid[i][j] -> 0 | 1
* 1 <= capacity <= 10
"""
total_lowerings = 0
for row in grid:
water_units = sum(row)
if water_units > 0:
total_lowerings += math.ceil(water_units / capacity)
return total_lowerings | {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/115.py",
"license": "MIT License",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
davila7/claude-code-templates:cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/116.py | def sort_array(arr):
"""
In this Kata, you have to sort an array of non-negative integers according to
number of ones in their binary representation in ascending order.
For similar number of ones, sort based on decimal value.
It must be implemented like this:
>>> sort_array([1, 5, 2, 3, 4]) == [1, 2, 3, 4, 5]
>>> sort_array([-2, -3, -4, -5, -6]) == [-6, -5, -4, -3, -2]
>>> sort_array([1, 0, 2, 3, 4]) [0, 1, 2, 3, 4]
"""
return sorted(arr, key=lambda x: (bin(x).count('1'), x)) | {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/116.py",
"license": "MIT License",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
davila7/claude-code-templates:cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/117.py | def select_words(s, n):
"""Given a string s and a natural number n, you have been tasked to implement
a function that returns a list of all words from string s that contain exactly
n consonants, in order these words appear in the string s.
If the string s is empty then the function should return an empty list.
Note: you may assume the input string contains only letters and spaces.
Examples:
select_words("Mary had a little lamb", 4) ==> ["little"]
select_words("Mary had a little lamb", 3) ==> ["Mary", "lamb"]
select_words("simple white space", 2) ==> []
select_words("Hello world", 4) ==> ["world"]
select_words("Uncle sam", 3) ==> ["Uncle"]
"""
if not s:
return []
vowels = set('aeiouAEIOU')
result = []
for word in s.split():
consonant_count = sum(1 for char in word if char.isalpha() and char not in vowels)
if consonant_count == n:
result.append(word)
return result | {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/117.py",
"license": "MIT License",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
davila7/claude-code-templates:cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/118.py | def get_closest_vowel(word):
"""You are given a word. Your task is to find the closest vowel that stands between
two consonants from the right side of the word (case sensitive).
Vowels in the beginning and ending doesn't count. Return empty string if you didn't
find any vowel met the above condition.
You may assume that the given string contains English letter only.
Example:
get_closest_vowel("yogurt") ==> "u"
get_closest_vowel("FULL") ==> "U"
get_closest_vowel("quick") ==> ""
get_closest_vowel("ab") ==> ""
"""
if len(word) < 3:
return ""
vowels = "aeiouAEIOU"
for i in range(len(word) - 2, 0, -1):
if word[i] in vowels:
if word[i-1] not in vowels and word[i+1] not in vowels:
return word[i]
return "" | {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/118.py",
"license": "MIT License",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
davila7/claude-code-templates:cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/119.py | def match_parens(lst):
'''
You are given a list of two strings, both strings consist of open
parentheses '(' or close parentheses ')' only.
Your job is to check if it is possible to concatenate the two strings in
some order, that the resulting string will be good.
A string S is considered to be good if and only if all parentheses in S
are balanced. For example: the string '(())()' is good, while the string
'())' is not.
Return 'Yes' if there's a way to make a good string, and return 'No' otherwise.
Examples:
match_parens(['()(', ')']) == 'Yes'
match_parens([')', ')']) == 'No'
'''
def is_balanced(s):
count = 0
for c in s:
if c == '(':
count += 1
else:
count -= 1
if count < 0:
return False
return count == 0
s1, s2 = lst[0], lst[1]
if is_balanced(s1 + s2) or is_balanced(s2 + s1):
return 'Yes'
return 'No' | {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/119.py",
"license": "MIT License",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
davila7/claude-code-templates:cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/12.py | from typing import List, Optional
def longest(strings: List[str]) -> Optional[str]:
""" Out of list of strings, return the longest one. Return the first one in case of multiple
strings of the same length. Return None in case the input list is empty.
>>> longest([])
>>> longest(['a', 'b', 'c'])
'a'
>>> longest(['a', 'bb', 'ccc'])
'ccc'
"""
if not strings:
return None
longest_str = strings[0]
for s in strings[1:]:
if len(s) > len(longest_str):
longest_str = s
return longest_str | {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/12.py",
"license": "MIT License",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
davila7/claude-code-templates:cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/120.py | def maximum(arr, k):
"""
Given an array arr of integers and a positive integer k, return a sorted list
of length k with the maximum k numbers in arr.
Example 1:
Input: arr = [-3, -4, 5], k = 3
Output: [-4, -3, 5]
Example 2:
Input: arr = [4, -4, 4], k = 2
Output: [4, 4]
Example 3:
Input: arr = [-3, 2, 1, 2, -1, -2, 1], k = 1
Output: [2]
Note:
1. The length of the array will be in the range of [1, 1000].
2. The elements in the array will be in the range of [-1000, 1000].
3. 0 <= k <= len(arr)
"""
sorted_arr = sorted(arr, reverse=True)
top_k = sorted_arr[:k]
return sorted(top_k) | {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/120.py",
"license": "MIT License",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
davila7/claude-code-templates:cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/121.py | def solution(lst):
"""Given a non-empty list of integers, return the sum of all of the odd elements that are in even positions.
Examples
solution([5, 8, 7, 1]) ==> 12
solution([3, 3, 3, 3, 3]) ==> 9
solution([30, 13, 24, 321]) ==>0
"""
return sum(x for i, x in enumerate(lst) if i % 2 == 0 and x % 2 == 1) | {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/121.py",
"license": "MIT License",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
davila7/claude-code-templates:cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/122.py | def add_elements(arr, k):
"""
Given a non-empty array of integers arr and an integer k, return
the sum of the elements with at most two digits from the first k elements of arr.
Example:
Input: arr = [111,21,3,4000,5,6,7,8,9], k = 4
Output: 24 # sum of 21 + 3
Constraints:
1. 1 <= len(arr) <= 100
2. 1 <= k <= len(arr)
"""
total = 0
for i in range(k):
if -99 <= arr[i] <= 99:
total += arr[i]
return total | {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/2026-01-05-00-49-17/humaneval-solutions/122.py",
"license": "MIT License",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.